Commit 0ffa798d947f5f5e40690cc9d38e678080a34f87
Exists in
master
and in
7 other branches
Merge branches 'perf/powerpc' and 'perf/bench' into perf/core
Merge reason: Both 'perf bench' and the pending PowerPC changes
are now ready for the next merge window.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 37 changed files Inline Diff
- arch/powerpc/Kconfig.debug
- arch/powerpc/configs/pseries_defconfig
- arch/powerpc/include/asm/emulated_ops.h
- arch/powerpc/include/asm/hvcall.h
- arch/powerpc/include/asm/reg.h
- arch/powerpc/include/asm/trace.h
- arch/powerpc/kernel/align.c
- arch/powerpc/kernel/entry_64.S
- arch/powerpc/kernel/exceptions-64s.S
- arch/powerpc/kernel/irq.c
- arch/powerpc/kernel/perf_event.c
- arch/powerpc/kernel/power5+-pmu.c
- arch/powerpc/kernel/power5-pmu.c
- arch/powerpc/kernel/power6-pmu.c
- arch/powerpc/kernel/power7-pmu.c
- arch/powerpc/kernel/ppc970-pmu.c
- arch/powerpc/kernel/setup-common.c
- arch/powerpc/kernel/time.c
- arch/powerpc/kernel/traps.c
- arch/powerpc/lib/copypage_64.S
- arch/powerpc/platforms/pseries/hvCall.S
- arch/powerpc/platforms/pseries/hvCall_inst.c
- arch/powerpc/platforms/pseries/lpar.c
- include/linux/perf_counter.h
- include/linux/perf_event.h
- kernel/perf_event.c
- tools/perf/Documentation/perf-bench.txt
- tools/perf/Makefile
- tools/perf/bench/bench.h
- tools/perf/bench/sched-messaging.c
- tools/perf/bench/sched-pipe.c
- tools/perf/builtin-bench.c
- tools/perf/builtin.h
- tools/perf/command-list.txt
- tools/perf/design.txt
- tools/perf/perf.c
- tools/perf/util/parse-events.c
arch/powerpc/Kconfig.debug
| 1 | menu "Kernel hacking" | 1 | menu "Kernel hacking" |
| 2 | 2 | ||
| 3 | source "lib/Kconfig.debug" | 3 | source "lib/Kconfig.debug" |
| 4 | 4 | ||
| 5 | config PPC_DISABLE_WERROR | 5 | config PPC_DISABLE_WERROR |
| 6 | bool "Don't build arch/powerpc code with -Werror" | 6 | bool "Don't build arch/powerpc code with -Werror" |
| 7 | default n | 7 | default n |
| 8 | help | 8 | help |
| 9 | This option tells the compiler NOT to build the code under | 9 | This option tells the compiler NOT to build the code under |
| 10 | arch/powerpc with the -Werror flag (which means warnings | 10 | arch/powerpc with the -Werror flag (which means warnings |
| 11 | are treated as errors). | 11 | are treated as errors). |
| 12 | 12 | ||
| 13 | Only enable this if you are hitting a build failure in the | 13 | Only enable this if you are hitting a build failure in the |
| 14 | arch/powerpc code caused by a warning, and you don't feel | 14 | arch/powerpc code caused by a warning, and you don't feel |
| 15 | inclined to fix it. | 15 | inclined to fix it. |
| 16 | 16 | ||
| 17 | config PPC_WERROR | 17 | config PPC_WERROR |
| 18 | bool | 18 | bool |
| 19 | depends on !PPC_DISABLE_WERROR | 19 | depends on !PPC_DISABLE_WERROR |
| 20 | default y | 20 | default y |
| 21 | 21 | ||
| 22 | config PRINT_STACK_DEPTH | 22 | config PRINT_STACK_DEPTH |
| 23 | int "Stack depth to print" if DEBUG_KERNEL | 23 | int "Stack depth to print" if DEBUG_KERNEL |
| 24 | default 64 | 24 | default 64 |
| 25 | help | 25 | help |
| 26 | This option allows you to set the stack depth that the kernel | 26 | This option allows you to set the stack depth that the kernel |
| 27 | prints in stack traces. This can be useful if your display is | 27 | prints in stack traces. This can be useful if your display is |
| 28 | too small and stack traces cause important information to | 28 | too small and stack traces cause important information to |
| 29 | scroll off the screen. | 29 | scroll off the screen. |
| 30 | 30 | ||
| 31 | config DEBUG_STACKOVERFLOW | 31 | config DEBUG_STACKOVERFLOW |
| 32 | bool "Check for stack overflows" | 32 | bool "Check for stack overflows" |
| 33 | depends on DEBUG_KERNEL | 33 | depends on DEBUG_KERNEL |
| 34 | help | 34 | help |
| 35 | This option will cause messages to be printed if free stack space | 35 | This option will cause messages to be printed if free stack space |
| 36 | drops below a certain limit. | 36 | drops below a certain limit. |
| 37 | 37 | ||
| 38 | config DEBUG_STACK_USAGE | 38 | config DEBUG_STACK_USAGE |
| 39 | bool "Stack utilization instrumentation" | 39 | bool "Stack utilization instrumentation" |
| 40 | depends on DEBUG_KERNEL | 40 | depends on DEBUG_KERNEL |
| 41 | help | 41 | help |
| 42 | Enables the display of the minimum amount of free stack which each | 42 | Enables the display of the minimum amount of free stack which each |
| 43 | task has ever had available in the sysrq-T and sysrq-P debug output. | 43 | task has ever had available in the sysrq-T and sysrq-P debug output. |
| 44 | 44 | ||
| 45 | This option will slow down process creation somewhat. | 45 | This option will slow down process creation somewhat. |
| 46 | 46 | ||
| 47 | config HCALL_STATS | 47 | config HCALL_STATS |
| 48 | bool "Hypervisor call instrumentation" | 48 | bool "Hypervisor call instrumentation" |
| 49 | depends on PPC_PSERIES && DEBUG_FS | 49 | depends on PPC_PSERIES && DEBUG_FS && TRACEPOINTS |
| 50 | help | 50 | help |
| 51 | Adds code to keep track of the number of hypervisor calls made and | 51 | Adds code to keep track of the number of hypervisor calls made and |
| 52 | the amount of time spent in hypervisor calls. Wall time spent in | 52 | the amount of time spent in hypervisor calls. Wall time spent in |
| 53 | each call is always calculated, and if available CPU cycles spent | 53 | each call is always calculated, and if available CPU cycles spent |
| 54 | are also calculated. A directory named hcall_inst is added at the | 54 | are also calculated. A directory named hcall_inst is added at the |
| 55 | root of the debugfs filesystem. Within the hcall_inst directory | 55 | root of the debugfs filesystem. Within the hcall_inst directory |
| 56 | are files that contain CPU specific call statistics. | 56 | are files that contain CPU specific call statistics. |
| 57 | 57 | ||
| 58 | This option will add a small amount of overhead to all hypervisor | 58 | This option will add a small amount of overhead to all hypervisor |
| 59 | calls. | 59 | calls. |
| 60 | 60 | ||
| 61 | config PPC_EMULATED_STATS | 61 | config PPC_EMULATED_STATS |
| 62 | bool "Emulated instructions tracking" | 62 | bool "Emulated instructions tracking" |
| 63 | depends on DEBUG_FS | 63 | depends on DEBUG_FS |
| 64 | help | 64 | help |
| 65 | Adds code to keep track of the number of instructions that are | 65 | Adds code to keep track of the number of instructions that are |
| 66 | emulated by the in-kernel emulator. Counters for the various classes | 66 | emulated by the in-kernel emulator. Counters for the various classes |
| 67 | of emulated instructions are available under | 67 | of emulated instructions are available under |
| 68 | powerpc/emulated_instructions/ in the root of the debugfs file | 68 | powerpc/emulated_instructions/ in the root of the debugfs file |
| 69 | system. Optionally (controlled by | 69 | system. Optionally (controlled by |
| 70 | powerpc/emulated_instructions/do_warn in debugfs), rate-limited | 70 | powerpc/emulated_instructions/do_warn in debugfs), rate-limited |
| 71 | warnings can be printed to the console when instructions are | 71 | warnings can be printed to the console when instructions are |
| 72 | emulated. | 72 | emulated. |
| 73 | 73 | ||
| 74 | config CODE_PATCHING_SELFTEST | 74 | config CODE_PATCHING_SELFTEST |
| 75 | bool "Run self-tests of the code-patching code." | 75 | bool "Run self-tests of the code-patching code." |
| 76 | depends on DEBUG_KERNEL | 76 | depends on DEBUG_KERNEL |
| 77 | default n | 77 | default n |
| 78 | 78 | ||
| 79 | config FTR_FIXUP_SELFTEST | 79 | config FTR_FIXUP_SELFTEST |
| 80 | bool "Run self-tests of the feature-fixup code." | 80 | bool "Run self-tests of the feature-fixup code." |
| 81 | depends on DEBUG_KERNEL | 81 | depends on DEBUG_KERNEL |
| 82 | default n | 82 | default n |
| 83 | 83 | ||
| 84 | config MSI_BITMAP_SELFTEST | 84 | config MSI_BITMAP_SELFTEST |
| 85 | bool "Run self-tests of the MSI bitmap code." | 85 | bool "Run self-tests of the MSI bitmap code." |
| 86 | depends on DEBUG_KERNEL | 86 | depends on DEBUG_KERNEL |
| 87 | default n | 87 | default n |
| 88 | 88 | ||
| 89 | config XMON | 89 | config XMON |
| 90 | bool "Include xmon kernel debugger" | 90 | bool "Include xmon kernel debugger" |
| 91 | depends on DEBUG_KERNEL | 91 | depends on DEBUG_KERNEL |
| 92 | help | 92 | help |
| 93 | Include in-kernel hooks for the xmon kernel monitor/debugger. | 93 | Include in-kernel hooks for the xmon kernel monitor/debugger. |
| 94 | Unless you are intending to debug the kernel, say N here. | 94 | Unless you are intending to debug the kernel, say N here. |
| 95 | Make sure to enable also CONFIG_BOOTX_TEXT on Macs. Otherwise | 95 | Make sure to enable also CONFIG_BOOTX_TEXT on Macs. Otherwise |
| 96 | nothing will appear on the screen (xmon writes directly to the | 96 | nothing will appear on the screen (xmon writes directly to the |
| 97 | framebuffer memory). | 97 | framebuffer memory). |
| 98 | The cmdline option 'xmon' or 'xmon=early' will drop into xmon | 98 | The cmdline option 'xmon' or 'xmon=early' will drop into xmon |
| 99 | very early during boot. 'xmon=on' will just enable the xmon | 99 | very early during boot. 'xmon=on' will just enable the xmon |
| 100 | debugger hooks. 'xmon=off' will disable the debugger hooks | 100 | debugger hooks. 'xmon=off' will disable the debugger hooks |
| 101 | if CONFIG_XMON_DEFAULT is set. | 101 | if CONFIG_XMON_DEFAULT is set. |
| 102 | xmon will print a backtrace on the very first invocation. | 102 | xmon will print a backtrace on the very first invocation. |
| 103 | 'xmon=nobt' will disable this autobacktrace. | 103 | 'xmon=nobt' will disable this autobacktrace. |
| 104 | 104 | ||
| 105 | config XMON_DEFAULT | 105 | config XMON_DEFAULT |
| 106 | bool "Enable xmon by default" | 106 | bool "Enable xmon by default" |
| 107 | depends on XMON | 107 | depends on XMON |
| 108 | help | 108 | help |
| 109 | xmon is normally disabled unless booted with 'xmon=on'. | 109 | xmon is normally disabled unless booted with 'xmon=on'. |
| 110 | Use 'xmon=off' to disable xmon init during runtime. | 110 | Use 'xmon=off' to disable xmon init during runtime. |
| 111 | 111 | ||
| 112 | config XMON_DISASSEMBLY | 112 | config XMON_DISASSEMBLY |
| 113 | bool "Include disassembly support in xmon" | 113 | bool "Include disassembly support in xmon" |
| 114 | depends on XMON | 114 | depends on XMON |
| 115 | default y | 115 | default y |
| 116 | help | 116 | help |
| 117 | Include support for disassembling in xmon. You probably want | 117 | Include support for disassembling in xmon. You probably want |
| 118 | to say Y here, unless you're building for a memory-constrained | 118 | to say Y here, unless you're building for a memory-constrained |
| 119 | system. | 119 | system. |
| 120 | 120 | ||
| 121 | config DEBUGGER | 121 | config DEBUGGER |
| 122 | bool | 122 | bool |
| 123 | depends on KGDB || XMON | 123 | depends on KGDB || XMON |
| 124 | default y | 124 | default y |
| 125 | 125 | ||
| 126 | config IRQSTACKS | 126 | config IRQSTACKS |
| 127 | bool "Use separate kernel stacks when processing interrupts" | 127 | bool "Use separate kernel stacks when processing interrupts" |
| 128 | help | 128 | help |
| 129 | If you say Y here the kernel will use separate kernel stacks | 129 | If you say Y here the kernel will use separate kernel stacks |
| 130 | for handling hard and soft interrupts. This can help avoid | 130 | for handling hard and soft interrupts. This can help avoid |
| 131 | overflowing the process kernel stacks. | 131 | overflowing the process kernel stacks. |
| 132 | 132 | ||
| 133 | config VIRQ_DEBUG | 133 | config VIRQ_DEBUG |
| 134 | bool "Expose hardware/virtual IRQ mapping via debugfs" | 134 | bool "Expose hardware/virtual IRQ mapping via debugfs" |
| 135 | depends on DEBUG_FS | 135 | depends on DEBUG_FS |
| 136 | help | 136 | help |
| 137 | This option will show the mapping relationship between hardware irq | 137 | This option will show the mapping relationship between hardware irq |
| 138 | numbers and virtual irq numbers. The mapping is exposed via debugfs | 138 | numbers and virtual irq numbers. The mapping is exposed via debugfs |
| 139 | in the file powerpc/virq_mapping. | 139 | in the file powerpc/virq_mapping. |
| 140 | 140 | ||
| 141 | If you don't know what this means you don't need it. | 141 | If you don't know what this means you don't need it. |
| 142 | 142 | ||
| 143 | config BDI_SWITCH | 143 | config BDI_SWITCH |
| 144 | bool "Include BDI-2000 user context switcher" | 144 | bool "Include BDI-2000 user context switcher" |
| 145 | depends on DEBUG_KERNEL && PPC32 | 145 | depends on DEBUG_KERNEL && PPC32 |
| 146 | help | 146 | help |
| 147 | Include in-kernel support for the Abatron BDI2000 debugger. | 147 | Include in-kernel support for the Abatron BDI2000 debugger. |
| 148 | Unless you are intending to debug the kernel with one of these | 148 | Unless you are intending to debug the kernel with one of these |
| 149 | machines, say N here. | 149 | machines, say N here. |
| 150 | 150 | ||
| 151 | config BOOTX_TEXT | 151 | config BOOTX_TEXT |
| 152 | bool "Support for early boot text console (BootX or OpenFirmware only)" | 152 | bool "Support for early boot text console (BootX or OpenFirmware only)" |
| 153 | depends on PPC_OF && PPC_BOOK3S | 153 | depends on PPC_OF && PPC_BOOK3S |
| 154 | help | 154 | help |
| 155 | Say Y here to see progress messages from the boot firmware in text | 155 | Say Y here to see progress messages from the boot firmware in text |
| 156 | mode. Requires either BootX or Open Firmware. | 156 | mode. Requires either BootX or Open Firmware. |
| 157 | 157 | ||
| 158 | config PPC_EARLY_DEBUG | 158 | config PPC_EARLY_DEBUG |
| 159 | bool "Early debugging (dangerous)" | 159 | bool "Early debugging (dangerous)" |
| 160 | # PPC_EARLY_DEBUG on 440 leaves AS=1 mappings above the TLB high water | 160 | # PPC_EARLY_DEBUG on 440 leaves AS=1 mappings above the TLB high water |
| 161 | # mark, which doesn't work with current 440 KVM. | 161 | # mark, which doesn't work with current 440 KVM. |
| 162 | depends on !KVM | 162 | depends on !KVM |
| 163 | help | 163 | help |
| 164 | Say Y to enable some early debugging facilities that may be available | 164 | Say Y to enable some early debugging facilities that may be available |
| 165 | for your processor/board combination. Those facilities are hacks | 165 | for your processor/board combination. Those facilities are hacks |
| 166 | intended to debug problems early during boot, this should not be | 166 | intended to debug problems early during boot, this should not be |
| 167 | enabled in a production kernel. | 167 | enabled in a production kernel. |
| 168 | Note that enabling this will also cause the kernel default log level | 168 | Note that enabling this will also cause the kernel default log level |
| 169 | to be pushed to max automatically very early during boot | 169 | to be pushed to max automatically very early during boot |
| 170 | 170 | ||
| 171 | choice | 171 | choice |
| 172 | prompt "Early debugging console" | 172 | prompt "Early debugging console" |
| 173 | depends on PPC_EARLY_DEBUG | 173 | depends on PPC_EARLY_DEBUG |
| 174 | help | 174 | help |
| 175 | Use the selected console for early debugging. Careful, if you | 175 | Use the selected console for early debugging. Careful, if you |
| 176 | enable debugging for the wrong type of machine your kernel | 176 | enable debugging for the wrong type of machine your kernel |
| 177 | _will not boot_. | 177 | _will not boot_. |
| 178 | 178 | ||
| 179 | config PPC_EARLY_DEBUG_LPAR | 179 | config PPC_EARLY_DEBUG_LPAR |
| 180 | bool "LPAR HV Console" | 180 | bool "LPAR HV Console" |
| 181 | depends on PPC_PSERIES | 181 | depends on PPC_PSERIES |
| 182 | help | 182 | help |
| 183 | Select this to enable early debugging for a machine with a HVC | 183 | Select this to enable early debugging for a machine with a HVC |
| 184 | console on vterm 0. | 184 | console on vterm 0. |
| 185 | 185 | ||
| 186 | config PPC_EARLY_DEBUG_G5 | 186 | config PPC_EARLY_DEBUG_G5 |
| 187 | bool "Apple G5" | 187 | bool "Apple G5" |
| 188 | depends on PPC_PMAC64 | 188 | depends on PPC_PMAC64 |
| 189 | help | 189 | help |
| 190 | Select this to enable early debugging for Apple G5 machines. | 190 | Select this to enable early debugging for Apple G5 machines. |
| 191 | 191 | ||
| 192 | config PPC_EARLY_DEBUG_RTAS_PANEL | 192 | config PPC_EARLY_DEBUG_RTAS_PANEL |
| 193 | bool "RTAS Panel" | 193 | bool "RTAS Panel" |
| 194 | depends on PPC_RTAS | 194 | depends on PPC_RTAS |
| 195 | help | 195 | help |
| 196 | Select this to enable early debugging via the RTAS panel. | 196 | Select this to enable early debugging via the RTAS panel. |
| 197 | 197 | ||
| 198 | config PPC_EARLY_DEBUG_RTAS_CONSOLE | 198 | config PPC_EARLY_DEBUG_RTAS_CONSOLE |
| 199 | bool "RTAS Console" | 199 | bool "RTAS Console" |
| 200 | depends on PPC_RTAS | 200 | depends on PPC_RTAS |
| 201 | select UDBG_RTAS_CONSOLE | 201 | select UDBG_RTAS_CONSOLE |
| 202 | help | 202 | help |
| 203 | Select this to enable early debugging via the RTAS console. | 203 | Select this to enable early debugging via the RTAS console. |
| 204 | 204 | ||
| 205 | config PPC_EARLY_DEBUG_MAPLE | 205 | config PPC_EARLY_DEBUG_MAPLE |
| 206 | bool "Maple real mode" | 206 | bool "Maple real mode" |
| 207 | depends on PPC_MAPLE | 207 | depends on PPC_MAPLE |
| 208 | help | 208 | help |
| 209 | Select this to enable early debugging for Maple. | 209 | Select this to enable early debugging for Maple. |
| 210 | 210 | ||
| 211 | config PPC_EARLY_DEBUG_ISERIES | 211 | config PPC_EARLY_DEBUG_ISERIES |
| 212 | bool "iSeries HV Console" | 212 | bool "iSeries HV Console" |
| 213 | depends on PPC_ISERIES | 213 | depends on PPC_ISERIES |
| 214 | help | 214 | help |
| 215 | Select this to enable early debugging for legacy iSeries. You need | 215 | Select this to enable early debugging for legacy iSeries. You need |
| 216 | to hit "Ctrl-x Ctrl-x" to see the messages on the console. | 216 | to hit "Ctrl-x Ctrl-x" to see the messages on the console. |
| 217 | 217 | ||
| 218 | config PPC_EARLY_DEBUG_PAS_REALMODE | 218 | config PPC_EARLY_DEBUG_PAS_REALMODE |
| 219 | bool "PA Semi real mode" | 219 | bool "PA Semi real mode" |
| 220 | depends on PPC_PASEMI | 220 | depends on PPC_PASEMI |
| 221 | help | 221 | help |
| 222 | Select this to enable early debugging for PA Semi. | 222 | Select this to enable early debugging for PA Semi. |
| 223 | Output will be on UART0. | 223 | Output will be on UART0. |
| 224 | 224 | ||
| 225 | config PPC_EARLY_DEBUG_BEAT | 225 | config PPC_EARLY_DEBUG_BEAT |
| 226 | bool "Beat HV Console" | 226 | bool "Beat HV Console" |
| 227 | depends on PPC_CELLEB | 227 | depends on PPC_CELLEB |
| 228 | select PPC_UDBG_BEAT | 228 | select PPC_UDBG_BEAT |
| 229 | help | 229 | help |
| 230 | Select this to enable early debugging for Celleb with Beat. | 230 | Select this to enable early debugging for Celleb with Beat. |
| 231 | 231 | ||
| 232 | config PPC_EARLY_DEBUG_44x | 232 | config PPC_EARLY_DEBUG_44x |
| 233 | bool "Early serial debugging for IBM/AMCC 44x CPUs" | 233 | bool "Early serial debugging for IBM/AMCC 44x CPUs" |
| 234 | depends on 44x | 234 | depends on 44x |
| 235 | help | 235 | help |
| 236 | Select this to enable early debugging for IBM 44x chips via the | 236 | Select this to enable early debugging for IBM 44x chips via the |
| 237 | inbuilt serial port. If you enable this, ensure you set | 237 | inbuilt serial port. If you enable this, ensure you set |
| 238 | PPC_EARLY_DEBUG_44x_PHYSLOW below to suit your target board. | 238 | PPC_EARLY_DEBUG_44x_PHYSLOW below to suit your target board. |
| 239 | 239 | ||
| 240 | config PPC_EARLY_DEBUG_40x | 240 | config PPC_EARLY_DEBUG_40x |
| 241 | bool "Early serial debugging for IBM/AMCC 40x CPUs" | 241 | bool "Early serial debugging for IBM/AMCC 40x CPUs" |
| 242 | depends on 40x | 242 | depends on 40x |
| 243 | help | 243 | help |
| 244 | Select this to enable early debugging for IBM 40x chips via the | 244 | Select this to enable early debugging for IBM 40x chips via the |
| 245 | inbuilt serial port. This works on chips with a 16550 compatible | 245 | inbuilt serial port. This works on chips with a 16550 compatible |
| 246 | UART. Xilinx chips with uartlite cannot use this option. | 246 | UART. Xilinx chips with uartlite cannot use this option. |
| 247 | 247 | ||
| 248 | config PPC_EARLY_DEBUG_CPM | 248 | config PPC_EARLY_DEBUG_CPM |
| 249 | bool "Early serial debugging for Freescale CPM-based serial ports" | 249 | bool "Early serial debugging for Freescale CPM-based serial ports" |
| 250 | depends on SERIAL_CPM | 250 | depends on SERIAL_CPM |
| 251 | select PIN_TLB if PPC_8xx | 251 | select PIN_TLB if PPC_8xx |
| 252 | help | 252 | help |
| 253 | Select this to enable early debugging for Freescale chips | 253 | Select this to enable early debugging for Freescale chips |
| 254 | using a CPM-based serial port. This assumes that the bootwrapper | 254 | using a CPM-based serial port. This assumes that the bootwrapper |
| 255 | has run, and set up the CPM in a particular way. | 255 | has run, and set up the CPM in a particular way. |
| 256 | 256 | ||
| 257 | endchoice | 257 | endchoice |
| 258 | 258 | ||
| 259 | config PPC_EARLY_DEBUG_44x_PHYSLOW | 259 | config PPC_EARLY_DEBUG_44x_PHYSLOW |
| 260 | hex "Low 32 bits of early debug UART physical address" | 260 | hex "Low 32 bits of early debug UART physical address" |
| 261 | depends on PPC_EARLY_DEBUG_44x | 261 | depends on PPC_EARLY_DEBUG_44x |
| 262 | default "0x40000200" | 262 | default "0x40000200" |
| 263 | help | 263 | help |
| 264 | You probably want 0x40000200 for ebony boards and | 264 | You probably want 0x40000200 for ebony boards and |
| 265 | 0x40000300 for taishan | 265 | 0x40000300 for taishan |
| 266 | 266 | ||
| 267 | config PPC_EARLY_DEBUG_44x_PHYSHIGH | 267 | config PPC_EARLY_DEBUG_44x_PHYSHIGH |
| 268 | hex "EPRN of early debug UART physical address" | 268 | hex "EPRN of early debug UART physical address" |
| 269 | depends on PPC_EARLY_DEBUG_44x | 269 | depends on PPC_EARLY_DEBUG_44x |
| 270 | default "0x1" | 270 | default "0x1" |
| 271 | 271 | ||
| 272 | config PPC_EARLY_DEBUG_40x_PHYSADDR | 272 | config PPC_EARLY_DEBUG_40x_PHYSADDR |
| 273 | hex "Early debug UART physical address" | 273 | hex "Early debug UART physical address" |
| 274 | depends on PPC_EARLY_DEBUG_40x | 274 | depends on PPC_EARLY_DEBUG_40x |
| 275 | default "0xef600300" | 275 | default "0xef600300" |
| 276 | 276 | ||
| 277 | config PPC_EARLY_DEBUG_CPM_ADDR | 277 | config PPC_EARLY_DEBUG_CPM_ADDR |
| 278 | hex "CPM UART early debug transmit descriptor address" | 278 | hex "CPM UART early debug transmit descriptor address" |
| 279 | depends on PPC_EARLY_DEBUG_CPM | 279 | depends on PPC_EARLY_DEBUG_CPM |
| 280 | default "0xfa202008" if PPC_EP88XC | 280 | default "0xfa202008" if PPC_EP88XC |
| 281 | default "0xf0001ff8" if CPM2 | 281 | default "0xf0001ff8" if CPM2 |
| 282 | default "0xff002008" if CPM1 | 282 | default "0xff002008" if CPM1 |
| 283 | help | 283 | help |
| 284 | This specifies the address of the transmit descriptor | 284 | This specifies the address of the transmit descriptor |
| 285 | used for early debug output. Because it is needed before | 285 | used for early debug output. Because it is needed before |
| 286 | platform probing is done, all platforms selected must | 286 | platform probing is done, all platforms selected must |
| 287 | share the same address. | 287 | share the same address. |
| 288 | 288 | ||
| 289 | endmenu | 289 | endmenu |
| 290 | 290 |
arch/powerpc/configs/pseries_defconfig
| 1 | # | 1 | # |
| 2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
| 3 | # Linux kernel version: 2.6.28-rc3 | 3 | # Linux kernel version: 2.6.28-rc3 |
| 4 | # Tue Nov 11 19:37:06 2008 | 4 | # Tue Nov 11 19:37:06 2008 |
| 5 | # | 5 | # |
| 6 | CONFIG_PPC64=y | 6 | CONFIG_PPC64=y |
| 7 | 7 | ||
| 8 | # | 8 | # |
| 9 | # Processor support | 9 | # Processor support |
| 10 | # | 10 | # |
| 11 | # CONFIG_POWER4_ONLY is not set | 11 | # CONFIG_POWER4_ONLY is not set |
| 12 | CONFIG_POWER3=y | 12 | CONFIG_POWER3=y |
| 13 | CONFIG_POWER4=y | 13 | CONFIG_POWER4=y |
| 14 | # CONFIG_TUNE_CELL is not set | 14 | # CONFIG_TUNE_CELL is not set |
| 15 | CONFIG_PPC_FPU=y | 15 | CONFIG_PPC_FPU=y |
| 16 | CONFIG_ALTIVEC=y | 16 | CONFIG_ALTIVEC=y |
| 17 | CONFIG_VSX=y | 17 | CONFIG_VSX=y |
| 18 | CONFIG_PPC_STD_MMU=y | 18 | CONFIG_PPC_STD_MMU=y |
| 19 | CONFIG_PPC_MM_SLICES=y | 19 | CONFIG_PPC_MM_SLICES=y |
| 20 | CONFIG_VIRT_CPU_ACCOUNTING=y | 20 | CONFIG_VIRT_CPU_ACCOUNTING=y |
| 21 | CONFIG_SMP=y | 21 | CONFIG_SMP=y |
| 22 | CONFIG_NR_CPUS=128 | 22 | CONFIG_NR_CPUS=128 |
| 23 | CONFIG_64BIT=y | 23 | CONFIG_64BIT=y |
| 24 | CONFIG_WORD_SIZE=64 | 24 | CONFIG_WORD_SIZE=64 |
| 25 | CONFIG_ARCH_PHYS_ADDR_T_64BIT=y | 25 | CONFIG_ARCH_PHYS_ADDR_T_64BIT=y |
| 26 | CONFIG_MMU=y | 26 | CONFIG_MMU=y |
| 27 | CONFIG_GENERIC_CMOS_UPDATE=y | 27 | CONFIG_GENERIC_CMOS_UPDATE=y |
| 28 | CONFIG_GENERIC_TIME=y | 28 | CONFIG_GENERIC_TIME=y |
| 29 | CONFIG_GENERIC_TIME_VSYSCALL=y | 29 | CONFIG_GENERIC_TIME_VSYSCALL=y |
| 30 | CONFIG_GENERIC_CLOCKEVENTS=y | 30 | CONFIG_GENERIC_CLOCKEVENTS=y |
| 31 | CONFIG_GENERIC_HARDIRQS=y | 31 | CONFIG_GENERIC_HARDIRQS=y |
| 32 | CONFIG_HAVE_SETUP_PER_CPU_AREA=y | 32 | CONFIG_HAVE_SETUP_PER_CPU_AREA=y |
| 33 | CONFIG_IRQ_PER_CPU=y | 33 | CONFIG_IRQ_PER_CPU=y |
| 34 | CONFIG_STACKTRACE_SUPPORT=y | 34 | CONFIG_STACKTRACE_SUPPORT=y |
| 35 | CONFIG_HAVE_LATENCYTOP_SUPPORT=y | 35 | CONFIG_HAVE_LATENCYTOP_SUPPORT=y |
| 36 | CONFIG_TRACE_IRQFLAGS_SUPPORT=y | 36 | CONFIG_TRACE_IRQFLAGS_SUPPORT=y |
| 37 | CONFIG_LOCKDEP_SUPPORT=y | 37 | CONFIG_LOCKDEP_SUPPORT=y |
| 38 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y | 38 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y |
| 39 | CONFIG_ARCH_HAS_ILOG2_U32=y | 39 | CONFIG_ARCH_HAS_ILOG2_U32=y |
| 40 | CONFIG_ARCH_HAS_ILOG2_U64=y | 40 | CONFIG_ARCH_HAS_ILOG2_U64=y |
| 41 | CONFIG_GENERIC_HWEIGHT=y | 41 | CONFIG_GENERIC_HWEIGHT=y |
| 42 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 42 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
| 43 | CONFIG_GENERIC_FIND_NEXT_BIT=y | 43 | CONFIG_GENERIC_FIND_NEXT_BIT=y |
| 44 | CONFIG_ARCH_NO_VIRT_TO_BUS=y | 44 | CONFIG_ARCH_NO_VIRT_TO_BUS=y |
| 45 | CONFIG_PPC=y | 45 | CONFIG_PPC=y |
| 46 | CONFIG_EARLY_PRINTK=y | 46 | CONFIG_EARLY_PRINTK=y |
| 47 | CONFIG_COMPAT=y | 47 | CONFIG_COMPAT=y |
| 48 | CONFIG_SYSVIPC_COMPAT=y | 48 | CONFIG_SYSVIPC_COMPAT=y |
| 49 | CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y | 49 | CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y |
| 50 | CONFIG_ARCH_MAY_HAVE_PC_FDC=y | 50 | CONFIG_ARCH_MAY_HAVE_PC_FDC=y |
| 51 | CONFIG_PPC_OF=y | 51 | CONFIG_PPC_OF=y |
| 52 | CONFIG_OF=y | 52 | CONFIG_OF=y |
| 53 | CONFIG_PPC_UDBG_16550=y | 53 | CONFIG_PPC_UDBG_16550=y |
| 54 | # CONFIG_GENERIC_TBSYNC is not set | 54 | # CONFIG_GENERIC_TBSYNC is not set |
| 55 | CONFIG_AUDIT_ARCH=y | 55 | CONFIG_AUDIT_ARCH=y |
| 56 | CONFIG_GENERIC_BUG=y | 56 | CONFIG_GENERIC_BUG=y |
| 57 | # CONFIG_DEFAULT_UIMAGE is not set | 57 | # CONFIG_DEFAULT_UIMAGE is not set |
| 58 | # CONFIG_PPC_DCR_NATIVE is not set | 58 | # CONFIG_PPC_DCR_NATIVE is not set |
| 59 | # CONFIG_PPC_DCR_MMIO is not set | 59 | # CONFIG_PPC_DCR_MMIO is not set |
| 60 | # CONFIG_PPC_OF_PLATFORM_PCI is not set | 60 | # CONFIG_PPC_OF_PLATFORM_PCI is not set |
| 61 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | 61 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" |
| 62 | 62 | ||
| 63 | # | 63 | # |
| 64 | # General setup | 64 | # General setup |
| 65 | # | 65 | # |
| 66 | CONFIG_EXPERIMENTAL=y | 66 | CONFIG_EXPERIMENTAL=y |
| 67 | CONFIG_LOCK_KERNEL=y | 67 | CONFIG_LOCK_KERNEL=y |
| 68 | CONFIG_INIT_ENV_ARG_LIMIT=32 | 68 | CONFIG_INIT_ENV_ARG_LIMIT=32 |
| 69 | CONFIG_LOCALVERSION="" | 69 | CONFIG_LOCALVERSION="" |
| 70 | CONFIG_LOCALVERSION_AUTO=y | 70 | CONFIG_LOCALVERSION_AUTO=y |
| 71 | CONFIG_SWAP=y | 71 | CONFIG_SWAP=y |
| 72 | CONFIG_SYSVIPC=y | 72 | CONFIG_SYSVIPC=y |
| 73 | CONFIG_SYSVIPC_SYSCTL=y | 73 | CONFIG_SYSVIPC_SYSCTL=y |
| 74 | CONFIG_POSIX_MQUEUE=y | 74 | CONFIG_POSIX_MQUEUE=y |
| 75 | # CONFIG_BSD_PROCESS_ACCT is not set | 75 | # CONFIG_BSD_PROCESS_ACCT is not set |
| 76 | CONFIG_TASKSTATS=y | 76 | CONFIG_TASKSTATS=y |
| 77 | CONFIG_TASK_DELAY_ACCT=y | 77 | CONFIG_TASK_DELAY_ACCT=y |
| 78 | CONFIG_TASK_XACCT=y | 78 | CONFIG_TASK_XACCT=y |
| 79 | CONFIG_TASK_IO_ACCOUNTING=y | 79 | CONFIG_TASK_IO_ACCOUNTING=y |
| 80 | CONFIG_AUDIT=y | 80 | CONFIG_AUDIT=y |
| 81 | CONFIG_AUDITSYSCALL=y | 81 | CONFIG_AUDITSYSCALL=y |
| 82 | CONFIG_AUDIT_TREE=y | 82 | CONFIG_AUDIT_TREE=y |
| 83 | CONFIG_IKCONFIG=y | 83 | CONFIG_IKCONFIG=y |
| 84 | CONFIG_IKCONFIG_PROC=y | 84 | CONFIG_IKCONFIG_PROC=y |
| 85 | CONFIG_LOG_BUF_SHIFT=17 | 85 | CONFIG_LOG_BUF_SHIFT=17 |
| 86 | CONFIG_CGROUPS=y | 86 | CONFIG_CGROUPS=y |
| 87 | # CONFIG_CGROUP_DEBUG is not set | 87 | # CONFIG_CGROUP_DEBUG is not set |
| 88 | CONFIG_CGROUP_NS=y | 88 | CONFIG_CGROUP_NS=y |
| 89 | CONFIG_CGROUP_FREEZER=y | 89 | CONFIG_CGROUP_FREEZER=y |
| 90 | CONFIG_CGROUP_DEVICE=y | 90 | CONFIG_CGROUP_DEVICE=y |
| 91 | CONFIG_CPUSETS=y | 91 | CONFIG_CPUSETS=y |
| 92 | # CONFIG_GROUP_SCHED is not set | 92 | # CONFIG_GROUP_SCHED is not set |
| 93 | CONFIG_CGROUP_CPUACCT=y | 93 | CONFIG_CGROUP_CPUACCT=y |
| 94 | # CONFIG_RESOURCE_COUNTERS is not set | 94 | # CONFIG_RESOURCE_COUNTERS is not set |
| 95 | CONFIG_SYSFS_DEPRECATED=y | 95 | CONFIG_SYSFS_DEPRECATED=y |
| 96 | CONFIG_SYSFS_DEPRECATED_V2=y | 96 | CONFIG_SYSFS_DEPRECATED_V2=y |
| 97 | CONFIG_PROC_PID_CPUSET=y | 97 | CONFIG_PROC_PID_CPUSET=y |
| 98 | CONFIG_RELAY=y | 98 | CONFIG_RELAY=y |
| 99 | CONFIG_NAMESPACES=y | 99 | CONFIG_NAMESPACES=y |
| 100 | # CONFIG_UTS_NS is not set | 100 | # CONFIG_UTS_NS is not set |
| 101 | # CONFIG_IPC_NS is not set | 101 | # CONFIG_IPC_NS is not set |
| 102 | # CONFIG_USER_NS is not set | 102 | # CONFIG_USER_NS is not set |
| 103 | # CONFIG_PID_NS is not set | 103 | # CONFIG_PID_NS is not set |
| 104 | CONFIG_BLK_DEV_INITRD=y | 104 | CONFIG_BLK_DEV_INITRD=y |
| 105 | CONFIG_INITRAMFS_SOURCE="" | 105 | CONFIG_INITRAMFS_SOURCE="" |
| 106 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y | 106 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y |
| 107 | CONFIG_SYSCTL=y | 107 | CONFIG_SYSCTL=y |
| 108 | # CONFIG_EMBEDDED is not set | 108 | # CONFIG_EMBEDDED is not set |
| 109 | CONFIG_SYSCTL_SYSCALL=y | 109 | CONFIG_SYSCTL_SYSCALL=y |
| 110 | CONFIG_KALLSYMS=y | 110 | CONFIG_KALLSYMS=y |
| 111 | CONFIG_KALLSYMS_ALL=y | 111 | CONFIG_KALLSYMS_ALL=y |
| 112 | # CONFIG_KALLSYMS_EXTRA_PASS is not set | 112 | # CONFIG_KALLSYMS_EXTRA_PASS is not set |
| 113 | CONFIG_HOTPLUG=y | 113 | CONFIG_HOTPLUG=y |
| 114 | CONFIG_PRINTK=y | 114 | CONFIG_PRINTK=y |
| 115 | CONFIG_BUG=y | 115 | CONFIG_BUG=y |
| 116 | CONFIG_ELF_CORE=y | 116 | CONFIG_ELF_CORE=y |
| 117 | CONFIG_PCSPKR_PLATFORM=y | 117 | CONFIG_PCSPKR_PLATFORM=y |
| 118 | # CONFIG_COMPAT_BRK is not set | 118 | # CONFIG_COMPAT_BRK is not set |
| 119 | CONFIG_BASE_FULL=y | 119 | CONFIG_BASE_FULL=y |
| 120 | CONFIG_FUTEX=y | 120 | CONFIG_FUTEX=y |
| 121 | CONFIG_ANON_INODES=y | 121 | CONFIG_ANON_INODES=y |
| 122 | CONFIG_EPOLL=y | 122 | CONFIG_EPOLL=y |
| 123 | CONFIG_SIGNALFD=y | 123 | CONFIG_SIGNALFD=y |
| 124 | CONFIG_TIMERFD=y | 124 | CONFIG_TIMERFD=y |
| 125 | CONFIG_EVENTFD=y | 125 | CONFIG_EVENTFD=y |
| 126 | CONFIG_SHMEM=y | 126 | CONFIG_SHMEM=y |
| 127 | CONFIG_AIO=y | 127 | CONFIG_AIO=y |
| 128 | CONFIG_VM_EVENT_COUNTERS=y | 128 | CONFIG_VM_EVENT_COUNTERS=y |
| 129 | CONFIG_PCI_QUIRKS=y | 129 | CONFIG_PCI_QUIRKS=y |
| 130 | CONFIG_SLUB_DEBUG=y | 130 | CONFIG_SLUB_DEBUG=y |
| 131 | # CONFIG_SLAB is not set | 131 | # CONFIG_SLAB is not set |
| 132 | CONFIG_SLUB=y | 132 | CONFIG_SLUB=y |
| 133 | # CONFIG_SLOB is not set | 133 | # CONFIG_SLOB is not set |
| 134 | CONFIG_PROFILING=y | 134 | CONFIG_PROFILING=y |
| 135 | CONFIG_TRACEPOINTS=y | 135 | CONFIG_TRACEPOINTS=y |
| 136 | CONFIG_MARKERS=y | 136 | CONFIG_MARKERS=y |
| 137 | CONFIG_OPROFILE=y | 137 | CONFIG_OPROFILE=y |
| 138 | CONFIG_HAVE_OPROFILE=y | 138 | CONFIG_HAVE_OPROFILE=y |
| 139 | CONFIG_KPROBES=y | 139 | CONFIG_KPROBES=y |
| 140 | CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y | 140 | CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y |
| 141 | CONFIG_KRETPROBES=y | 141 | CONFIG_KRETPROBES=y |
| 142 | CONFIG_HAVE_IOREMAP_PROT=y | 142 | CONFIG_HAVE_IOREMAP_PROT=y |
| 143 | CONFIG_HAVE_KPROBES=y | 143 | CONFIG_HAVE_KPROBES=y |
| 144 | CONFIG_HAVE_KRETPROBES=y | 144 | CONFIG_HAVE_KRETPROBES=y |
| 145 | CONFIG_HAVE_ARCH_TRACEHOOK=y | 145 | CONFIG_HAVE_ARCH_TRACEHOOK=y |
| 146 | CONFIG_HAVE_DMA_ATTRS=y | 146 | CONFIG_HAVE_DMA_ATTRS=y |
| 147 | CONFIG_USE_GENERIC_SMP_HELPERS=y | 147 | CONFIG_USE_GENERIC_SMP_HELPERS=y |
| 148 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set | 148 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set |
| 149 | CONFIG_SLABINFO=y | 149 | CONFIG_SLABINFO=y |
| 150 | CONFIG_RT_MUTEXES=y | 150 | CONFIG_RT_MUTEXES=y |
| 151 | # CONFIG_TINY_SHMEM is not set | 151 | # CONFIG_TINY_SHMEM is not set |
| 152 | CONFIG_BASE_SMALL=0 | 152 | CONFIG_BASE_SMALL=0 |
| 153 | CONFIG_MODULES=y | 153 | CONFIG_MODULES=y |
| 154 | # CONFIG_MODULE_FORCE_LOAD is not set | 154 | # CONFIG_MODULE_FORCE_LOAD is not set |
| 155 | CONFIG_MODULE_UNLOAD=y | 155 | CONFIG_MODULE_UNLOAD=y |
| 156 | # CONFIG_MODULE_FORCE_UNLOAD is not set | 156 | # CONFIG_MODULE_FORCE_UNLOAD is not set |
| 157 | CONFIG_MODVERSIONS=y | 157 | CONFIG_MODVERSIONS=y |
| 158 | CONFIG_MODULE_SRCVERSION_ALL=y | 158 | CONFIG_MODULE_SRCVERSION_ALL=y |
| 159 | CONFIG_KMOD=y | 159 | CONFIG_KMOD=y |
| 160 | CONFIG_STOP_MACHINE=y | 160 | CONFIG_STOP_MACHINE=y |
| 161 | CONFIG_BLOCK=y | 161 | CONFIG_BLOCK=y |
| 162 | # CONFIG_BLK_DEV_IO_TRACE is not set | 162 | # CONFIG_BLK_DEV_IO_TRACE is not set |
| 163 | CONFIG_BLK_DEV_BSG=y | 163 | CONFIG_BLK_DEV_BSG=y |
| 164 | # CONFIG_BLK_DEV_INTEGRITY is not set | 164 | # CONFIG_BLK_DEV_INTEGRITY is not set |
| 165 | CONFIG_BLOCK_COMPAT=y | 165 | CONFIG_BLOCK_COMPAT=y |
| 166 | 166 | ||
| 167 | # | 167 | # |
| 168 | # IO Schedulers | 168 | # IO Schedulers |
| 169 | # | 169 | # |
| 170 | CONFIG_IOSCHED_NOOP=y | 170 | CONFIG_IOSCHED_NOOP=y |
| 171 | CONFIG_IOSCHED_AS=y | 171 | CONFIG_IOSCHED_AS=y |
| 172 | CONFIG_IOSCHED_DEADLINE=y | 172 | CONFIG_IOSCHED_DEADLINE=y |
| 173 | CONFIG_IOSCHED_CFQ=y | 173 | CONFIG_IOSCHED_CFQ=y |
| 174 | CONFIG_DEFAULT_AS=y | 174 | CONFIG_DEFAULT_AS=y |
| 175 | # CONFIG_DEFAULT_DEADLINE is not set | 175 | # CONFIG_DEFAULT_DEADLINE is not set |
| 176 | # CONFIG_DEFAULT_CFQ is not set | 176 | # CONFIG_DEFAULT_CFQ is not set |
| 177 | # CONFIG_DEFAULT_NOOP is not set | 177 | # CONFIG_DEFAULT_NOOP is not set |
| 178 | CONFIG_DEFAULT_IOSCHED="anticipatory" | 178 | CONFIG_DEFAULT_IOSCHED="anticipatory" |
| 179 | CONFIG_CLASSIC_RCU=y | 179 | CONFIG_CLASSIC_RCU=y |
| 180 | CONFIG_FREEZER=y | 180 | CONFIG_FREEZER=y |
| 181 | CONFIG_PPC_MSI_BITMAP=y | 181 | CONFIG_PPC_MSI_BITMAP=y |
| 182 | 182 | ||
| 183 | # | 183 | # |
| 184 | # Platform support | 184 | # Platform support |
| 185 | # | 185 | # |
| 186 | CONFIG_PPC_MULTIPLATFORM=y | 186 | CONFIG_PPC_MULTIPLATFORM=y |
| 187 | CONFIG_PPC_PSERIES=y | 187 | CONFIG_PPC_PSERIES=y |
| 188 | CONFIG_PPC_SPLPAR=y | 188 | CONFIG_PPC_SPLPAR=y |
| 189 | CONFIG_EEH=y | 189 | CONFIG_EEH=y |
| 190 | CONFIG_SCANLOG=m | 190 | CONFIG_SCANLOG=m |
| 191 | CONFIG_LPARCFG=y | 191 | CONFIG_LPARCFG=y |
| 192 | CONFIG_PPC_SMLPAR=y | 192 | CONFIG_PPC_SMLPAR=y |
| 193 | CONFIG_CMM=y | 193 | CONFIG_CMM=y |
| 194 | # CONFIG_PPC_ISERIES is not set | 194 | # CONFIG_PPC_ISERIES is not set |
| 195 | # CONFIG_PPC_PMAC is not set | 195 | # CONFIG_PPC_PMAC is not set |
| 196 | # CONFIG_PPC_MAPLE is not set | 196 | # CONFIG_PPC_MAPLE is not set |
| 197 | # CONFIG_PPC_PASEMI is not set | 197 | # CONFIG_PPC_PASEMI is not set |
| 198 | # CONFIG_PPC_PS3 is not set | 198 | # CONFIG_PPC_PS3 is not set |
| 199 | # CONFIG_PPC_CELL is not set | 199 | # CONFIG_PPC_CELL is not set |
| 200 | # CONFIG_PPC_CELL_NATIVE is not set | 200 | # CONFIG_PPC_CELL_NATIVE is not set |
| 201 | # CONFIG_PPC_IBM_CELL_BLADE is not set | 201 | # CONFIG_PPC_IBM_CELL_BLADE is not set |
| 202 | # CONFIG_PPC_CELLEB is not set | 202 | # CONFIG_PPC_CELLEB is not set |
| 203 | # CONFIG_PQ2ADS is not set | 203 | # CONFIG_PQ2ADS is not set |
| 204 | CONFIG_PPC_NATIVE=y | 204 | CONFIG_PPC_NATIVE=y |
| 205 | # CONFIG_UDBG_RTAS_CONSOLE is not set | 205 | # CONFIG_UDBG_RTAS_CONSOLE is not set |
| 206 | CONFIG_XICS=y | 206 | CONFIG_XICS=y |
| 207 | # CONFIG_IPIC is not set | 207 | # CONFIG_IPIC is not set |
| 208 | CONFIG_MPIC=y | 208 | CONFIG_MPIC=y |
| 209 | # CONFIG_MPIC_WEIRD is not set | 209 | # CONFIG_MPIC_WEIRD is not set |
| 210 | CONFIG_PPC_I8259=y | 210 | CONFIG_PPC_I8259=y |
| 211 | # CONFIG_U3_DART is not set | 211 | # CONFIG_U3_DART is not set |
| 212 | CONFIG_PPC_RTAS=y | 212 | CONFIG_PPC_RTAS=y |
| 213 | CONFIG_RTAS_ERROR_LOGGING=y | 213 | CONFIG_RTAS_ERROR_LOGGING=y |
| 214 | CONFIG_RTAS_PROC=y | 214 | CONFIG_RTAS_PROC=y |
| 215 | CONFIG_RTAS_FLASH=m | 215 | CONFIG_RTAS_FLASH=m |
| 216 | # CONFIG_MMIO_NVRAM is not set | 216 | # CONFIG_MMIO_NVRAM is not set |
| 217 | CONFIG_IBMVIO=y | 217 | CONFIG_IBMVIO=y |
| 218 | CONFIG_IBMEBUS=y | 218 | CONFIG_IBMEBUS=y |
| 219 | # CONFIG_PPC_MPC106 is not set | 219 | # CONFIG_PPC_MPC106 is not set |
| 220 | # CONFIG_PPC_970_NAP is not set | 220 | # CONFIG_PPC_970_NAP is not set |
| 221 | # CONFIG_PPC_INDIRECT_IO is not set | 221 | # CONFIG_PPC_INDIRECT_IO is not set |
| 222 | # CONFIG_GENERIC_IOMAP is not set | 222 | # CONFIG_GENERIC_IOMAP is not set |
| 223 | # CONFIG_CPU_FREQ is not set | 223 | # CONFIG_CPU_FREQ is not set |
| 224 | # CONFIG_FSL_ULI1575 is not set | 224 | # CONFIG_FSL_ULI1575 is not set |
| 225 | 225 | ||
| 226 | # | 226 | # |
| 227 | # Kernel options | 227 | # Kernel options |
| 228 | # | 228 | # |
| 229 | CONFIG_TICK_ONESHOT=y | 229 | CONFIG_TICK_ONESHOT=y |
| 230 | CONFIG_NO_HZ=y | 230 | CONFIG_NO_HZ=y |
| 231 | CONFIG_HIGH_RES_TIMERS=y | 231 | CONFIG_HIGH_RES_TIMERS=y |
| 232 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | 232 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y |
| 233 | # CONFIG_HZ_100 is not set | 233 | # CONFIG_HZ_100 is not set |
| 234 | CONFIG_HZ_250=y | 234 | CONFIG_HZ_250=y |
| 235 | # CONFIG_HZ_300 is not set | 235 | # CONFIG_HZ_300 is not set |
| 236 | # CONFIG_HZ_1000 is not set | 236 | # CONFIG_HZ_1000 is not set |
| 237 | CONFIG_HZ=250 | 237 | CONFIG_HZ=250 |
| 238 | CONFIG_SCHED_HRTICK=y | 238 | CONFIG_SCHED_HRTICK=y |
| 239 | CONFIG_PREEMPT_NONE=y | 239 | CONFIG_PREEMPT_NONE=y |
| 240 | # CONFIG_PREEMPT_VOLUNTARY is not set | 240 | # CONFIG_PREEMPT_VOLUNTARY is not set |
| 241 | # CONFIG_PREEMPT is not set | 241 | # CONFIG_PREEMPT is not set |
| 242 | CONFIG_BINFMT_ELF=y | 242 | CONFIG_BINFMT_ELF=y |
| 243 | CONFIG_COMPAT_BINFMT_ELF=y | 243 | CONFIG_COMPAT_BINFMT_ELF=y |
| 244 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 244 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
| 245 | # CONFIG_HAVE_AOUT is not set | 245 | # CONFIG_HAVE_AOUT is not set |
| 246 | CONFIG_BINFMT_MISC=m | 246 | CONFIG_BINFMT_MISC=m |
| 247 | CONFIG_HUGETLB_PAGE_SIZE_VARIABLE=y | 247 | CONFIG_HUGETLB_PAGE_SIZE_VARIABLE=y |
| 248 | CONFIG_IOMMU_VMERGE=y | 248 | CONFIG_IOMMU_VMERGE=y |
| 249 | CONFIG_IOMMU_HELPER=y | 249 | CONFIG_IOMMU_HELPER=y |
| 250 | CONFIG_HOTPLUG_CPU=y | 250 | CONFIG_HOTPLUG_CPU=y |
| 251 | CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y | 251 | CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y |
| 252 | CONFIG_ARCH_HAS_WALK_MEMORY=y | 252 | CONFIG_ARCH_HAS_WALK_MEMORY=y |
| 253 | CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y | 253 | CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y |
| 254 | CONFIG_KEXEC=y | 254 | CONFIG_KEXEC=y |
| 255 | # CONFIG_PHYP_DUMP is not set | 255 | # CONFIG_PHYP_DUMP is not set |
| 256 | CONFIG_IRQ_ALL_CPUS=y | 256 | CONFIG_IRQ_ALL_CPUS=y |
| 257 | CONFIG_NUMA=y | 257 | CONFIG_NUMA=y |
| 258 | CONFIG_NODES_SHIFT=4 | 258 | CONFIG_NODES_SHIFT=4 |
| 259 | CONFIG_ARCH_SELECT_MEMORY_MODEL=y | 259 | CONFIG_ARCH_SELECT_MEMORY_MODEL=y |
| 260 | CONFIG_ARCH_SPARSEMEM_ENABLE=y | 260 | CONFIG_ARCH_SPARSEMEM_ENABLE=y |
| 261 | CONFIG_ARCH_SPARSEMEM_DEFAULT=y | 261 | CONFIG_ARCH_SPARSEMEM_DEFAULT=y |
| 262 | CONFIG_ARCH_POPULATES_NODE_MAP=y | 262 | CONFIG_ARCH_POPULATES_NODE_MAP=y |
| 263 | CONFIG_SELECT_MEMORY_MODEL=y | 263 | CONFIG_SELECT_MEMORY_MODEL=y |
| 264 | # CONFIG_FLATMEM_MANUAL is not set | 264 | # CONFIG_FLATMEM_MANUAL is not set |
| 265 | # CONFIG_DISCONTIGMEM_MANUAL is not set | 265 | # CONFIG_DISCONTIGMEM_MANUAL is not set |
| 266 | CONFIG_SPARSEMEM_MANUAL=y | 266 | CONFIG_SPARSEMEM_MANUAL=y |
| 267 | CONFIG_SPARSEMEM=y | 267 | CONFIG_SPARSEMEM=y |
| 268 | CONFIG_NEED_MULTIPLE_NODES=y | 268 | CONFIG_NEED_MULTIPLE_NODES=y |
| 269 | CONFIG_HAVE_MEMORY_PRESENT=y | 269 | CONFIG_HAVE_MEMORY_PRESENT=y |
| 270 | CONFIG_SPARSEMEM_EXTREME=y | 270 | CONFIG_SPARSEMEM_EXTREME=y |
| 271 | CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y | 271 | CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y |
| 272 | CONFIG_SPARSEMEM_VMEMMAP=y | 272 | CONFIG_SPARSEMEM_VMEMMAP=y |
| 273 | # CONFIG_MEMORY_HOTPLUG is not set | 273 | # CONFIG_MEMORY_HOTPLUG is not set |
| 274 | CONFIG_PAGEFLAGS_EXTENDED=y | 274 | CONFIG_PAGEFLAGS_EXTENDED=y |
| 275 | CONFIG_SPLIT_PTLOCK_CPUS=4 | 275 | CONFIG_SPLIT_PTLOCK_CPUS=4 |
| 276 | CONFIG_MIGRATION=y | 276 | CONFIG_MIGRATION=y |
| 277 | CONFIG_RESOURCES_64BIT=y | 277 | CONFIG_RESOURCES_64BIT=y |
| 278 | CONFIG_PHYS_ADDR_T_64BIT=y | 278 | CONFIG_PHYS_ADDR_T_64BIT=y |
| 279 | CONFIG_ZONE_DMA_FLAG=1 | 279 | CONFIG_ZONE_DMA_FLAG=1 |
| 280 | CONFIG_BOUNCE=y | 280 | CONFIG_BOUNCE=y |
| 281 | CONFIG_UNEVICTABLE_LRU=y | 281 | CONFIG_UNEVICTABLE_LRU=y |
| 282 | CONFIG_NODES_SPAN_OTHER_NODES=y | 282 | CONFIG_NODES_SPAN_OTHER_NODES=y |
| 283 | # CONFIG_PPC_HAS_HASH_64K is not set | 283 | # CONFIG_PPC_HAS_HASH_64K is not set |
| 284 | # CONFIG_PPC_64K_PAGES is not set | 284 | # CONFIG_PPC_64K_PAGES is not set |
| 285 | CONFIG_FORCE_MAX_ZONEORDER=13 | 285 | CONFIG_FORCE_MAX_ZONEORDER=13 |
| 286 | CONFIG_SCHED_SMT=y | 286 | CONFIG_SCHED_SMT=y |
| 287 | CONFIG_PROC_DEVICETREE=y | 287 | CONFIG_PROC_DEVICETREE=y |
| 288 | # CONFIG_CMDLINE_BOOL is not set | 288 | # CONFIG_CMDLINE_BOOL is not set |
| 289 | CONFIG_EXTRA_TARGETS="" | 289 | CONFIG_EXTRA_TARGETS="" |
| 290 | # CONFIG_PM is not set | 290 | # CONFIG_PM is not set |
| 291 | CONFIG_SECCOMP=y | 291 | CONFIG_SECCOMP=y |
| 292 | CONFIG_ISA_DMA_API=y | 292 | CONFIG_ISA_DMA_API=y |
| 293 | 293 | ||
| 294 | # | 294 | # |
| 295 | # Bus options | 295 | # Bus options |
| 296 | # | 296 | # |
| 297 | CONFIG_ZONE_DMA=y | 297 | CONFIG_ZONE_DMA=y |
| 298 | CONFIG_GENERIC_ISA_DMA=y | 298 | CONFIG_GENERIC_ISA_DMA=y |
| 299 | # CONFIG_PPC_INDIRECT_PCI is not set | 299 | # CONFIG_PPC_INDIRECT_PCI is not set |
| 300 | CONFIG_PCI=y | 300 | CONFIG_PCI=y |
| 301 | CONFIG_PCI_DOMAINS=y | 301 | CONFIG_PCI_DOMAINS=y |
| 302 | CONFIG_PCI_SYSCALL=y | 302 | CONFIG_PCI_SYSCALL=y |
| 303 | # CONFIG_PCIEPORTBUS is not set | 303 | # CONFIG_PCIEPORTBUS is not set |
| 304 | CONFIG_ARCH_SUPPORTS_MSI=y | 304 | CONFIG_ARCH_SUPPORTS_MSI=y |
| 305 | CONFIG_PCI_MSI=y | 305 | CONFIG_PCI_MSI=y |
| 306 | # CONFIG_PCI_LEGACY is not set | 306 | # CONFIG_PCI_LEGACY is not set |
| 307 | # CONFIG_PCI_DEBUG is not set | 307 | # CONFIG_PCI_DEBUG is not set |
| 308 | # CONFIG_PCCARD is not set | 308 | # CONFIG_PCCARD is not set |
| 309 | CONFIG_HOTPLUG_PCI=m | 309 | CONFIG_HOTPLUG_PCI=m |
| 310 | # CONFIG_HOTPLUG_PCI_FAKE is not set | 310 | # CONFIG_HOTPLUG_PCI_FAKE is not set |
| 311 | # CONFIG_HOTPLUG_PCI_CPCI is not set | 311 | # CONFIG_HOTPLUG_PCI_CPCI is not set |
| 312 | # CONFIG_HOTPLUG_PCI_SHPC is not set | 312 | # CONFIG_HOTPLUG_PCI_SHPC is not set |
| 313 | CONFIG_HOTPLUG_PCI_RPA=m | 313 | CONFIG_HOTPLUG_PCI_RPA=m |
| 314 | CONFIG_HOTPLUG_PCI_RPA_DLPAR=m | 314 | CONFIG_HOTPLUG_PCI_RPA_DLPAR=m |
| 315 | # CONFIG_HAS_RAPIDIO is not set | 315 | # CONFIG_HAS_RAPIDIO is not set |
| 316 | # CONFIG_RELOCATABLE is not set | 316 | # CONFIG_RELOCATABLE is not set |
| 317 | CONFIG_PAGE_OFFSET=0xc000000000000000 | 317 | CONFIG_PAGE_OFFSET=0xc000000000000000 |
| 318 | CONFIG_KERNEL_START=0xc000000000000000 | 318 | CONFIG_KERNEL_START=0xc000000000000000 |
| 319 | CONFIG_PHYSICAL_START=0x00000000 | 319 | CONFIG_PHYSICAL_START=0x00000000 |
| 320 | CONFIG_NET=y | 320 | CONFIG_NET=y |
| 321 | 321 | ||
| 322 | # | 322 | # |
| 323 | # Networking options | 323 | # Networking options |
| 324 | # | 324 | # |
| 325 | CONFIG_PACKET=y | 325 | CONFIG_PACKET=y |
| 326 | # CONFIG_PACKET_MMAP is not set | 326 | # CONFIG_PACKET_MMAP is not set |
| 327 | CONFIG_UNIX=y | 327 | CONFIG_UNIX=y |
| 328 | CONFIG_XFRM=y | 328 | CONFIG_XFRM=y |
| 329 | CONFIG_XFRM_USER=m | 329 | CONFIG_XFRM_USER=m |
| 330 | # CONFIG_XFRM_SUB_POLICY is not set | 330 | # CONFIG_XFRM_SUB_POLICY is not set |
| 331 | # CONFIG_XFRM_MIGRATE is not set | 331 | # CONFIG_XFRM_MIGRATE is not set |
| 332 | # CONFIG_XFRM_STATISTICS is not set | 332 | # CONFIG_XFRM_STATISTICS is not set |
| 333 | CONFIG_XFRM_IPCOMP=m | 333 | CONFIG_XFRM_IPCOMP=m |
| 334 | CONFIG_NET_KEY=m | 334 | CONFIG_NET_KEY=m |
| 335 | # CONFIG_NET_KEY_MIGRATE is not set | 335 | # CONFIG_NET_KEY_MIGRATE is not set |
| 336 | CONFIG_INET=y | 336 | CONFIG_INET=y |
| 337 | CONFIG_IP_MULTICAST=y | 337 | CONFIG_IP_MULTICAST=y |
| 338 | # CONFIG_IP_ADVANCED_ROUTER is not set | 338 | # CONFIG_IP_ADVANCED_ROUTER is not set |
| 339 | CONFIG_IP_FIB_HASH=y | 339 | CONFIG_IP_FIB_HASH=y |
| 340 | # CONFIG_IP_PNP is not set | 340 | # CONFIG_IP_PNP is not set |
| 341 | CONFIG_NET_IPIP=y | 341 | CONFIG_NET_IPIP=y |
| 342 | # CONFIG_NET_IPGRE is not set | 342 | # CONFIG_NET_IPGRE is not set |
| 343 | # CONFIG_IP_MROUTE is not set | 343 | # CONFIG_IP_MROUTE is not set |
| 344 | # CONFIG_ARPD is not set | 344 | # CONFIG_ARPD is not set |
| 345 | CONFIG_SYN_COOKIES=y | 345 | CONFIG_SYN_COOKIES=y |
| 346 | CONFIG_INET_AH=m | 346 | CONFIG_INET_AH=m |
| 347 | CONFIG_INET_ESP=m | 347 | CONFIG_INET_ESP=m |
| 348 | CONFIG_INET_IPCOMP=m | 348 | CONFIG_INET_IPCOMP=m |
| 349 | CONFIG_INET_XFRM_TUNNEL=m | 349 | CONFIG_INET_XFRM_TUNNEL=m |
| 350 | CONFIG_INET_TUNNEL=y | 350 | CONFIG_INET_TUNNEL=y |
| 351 | CONFIG_INET_XFRM_MODE_TRANSPORT=y | 351 | CONFIG_INET_XFRM_MODE_TRANSPORT=y |
| 352 | CONFIG_INET_XFRM_MODE_TUNNEL=y | 352 | CONFIG_INET_XFRM_MODE_TUNNEL=y |
| 353 | CONFIG_INET_XFRM_MODE_BEET=y | 353 | CONFIG_INET_XFRM_MODE_BEET=y |
| 354 | CONFIG_INET_LRO=y | 354 | CONFIG_INET_LRO=y |
| 355 | CONFIG_INET_DIAG=y | 355 | CONFIG_INET_DIAG=y |
| 356 | CONFIG_INET_TCP_DIAG=y | 356 | CONFIG_INET_TCP_DIAG=y |
| 357 | # CONFIG_TCP_CONG_ADVANCED is not set | 357 | # CONFIG_TCP_CONG_ADVANCED is not set |
| 358 | CONFIG_TCP_CONG_CUBIC=y | 358 | CONFIG_TCP_CONG_CUBIC=y |
| 359 | CONFIG_DEFAULT_TCP_CONG="cubic" | 359 | CONFIG_DEFAULT_TCP_CONG="cubic" |
| 360 | # CONFIG_TCP_MD5SIG is not set | 360 | # CONFIG_TCP_MD5SIG is not set |
| 361 | # CONFIG_IPV6 is not set | 361 | # CONFIG_IPV6 is not set |
| 362 | # CONFIG_NETWORK_SECMARK is not set | 362 | # CONFIG_NETWORK_SECMARK is not set |
| 363 | CONFIG_NETFILTER=y | 363 | CONFIG_NETFILTER=y |
| 364 | # CONFIG_NETFILTER_DEBUG is not set | 364 | # CONFIG_NETFILTER_DEBUG is not set |
| 365 | CONFIG_NETFILTER_ADVANCED=y | 365 | CONFIG_NETFILTER_ADVANCED=y |
| 366 | 366 | ||
| 367 | # | 367 | # |
| 368 | # Core Netfilter Configuration | 368 | # Core Netfilter Configuration |
| 369 | # | 369 | # |
| 370 | CONFIG_NETFILTER_NETLINK=m | 370 | CONFIG_NETFILTER_NETLINK=m |
| 371 | CONFIG_NETFILTER_NETLINK_QUEUE=m | 371 | CONFIG_NETFILTER_NETLINK_QUEUE=m |
| 372 | CONFIG_NETFILTER_NETLINK_LOG=m | 372 | CONFIG_NETFILTER_NETLINK_LOG=m |
| 373 | CONFIG_NF_CONNTRACK=m | 373 | CONFIG_NF_CONNTRACK=m |
| 374 | CONFIG_NF_CT_ACCT=y | 374 | CONFIG_NF_CT_ACCT=y |
| 375 | CONFIG_NF_CONNTRACK_MARK=y | 375 | CONFIG_NF_CONNTRACK_MARK=y |
| 376 | CONFIG_NF_CONNTRACK_EVENTS=y | 376 | CONFIG_NF_CONNTRACK_EVENTS=y |
| 377 | # CONFIG_NF_CT_PROTO_DCCP is not set | 377 | # CONFIG_NF_CT_PROTO_DCCP is not set |
| 378 | # CONFIG_NF_CT_PROTO_SCTP is not set | 378 | # CONFIG_NF_CT_PROTO_SCTP is not set |
| 379 | CONFIG_NF_CT_PROTO_UDPLITE=m | 379 | CONFIG_NF_CT_PROTO_UDPLITE=m |
| 380 | # CONFIG_NF_CONNTRACK_AMANDA is not set | 380 | # CONFIG_NF_CONNTRACK_AMANDA is not set |
| 381 | CONFIG_NF_CONNTRACK_FTP=m | 381 | CONFIG_NF_CONNTRACK_FTP=m |
| 382 | # CONFIG_NF_CONNTRACK_H323 is not set | 382 | # CONFIG_NF_CONNTRACK_H323 is not set |
| 383 | CONFIG_NF_CONNTRACK_IRC=m | 383 | CONFIG_NF_CONNTRACK_IRC=m |
| 384 | # CONFIG_NF_CONNTRACK_NETBIOS_NS is not set | 384 | # CONFIG_NF_CONNTRACK_NETBIOS_NS is not set |
| 385 | # CONFIG_NF_CONNTRACK_PPTP is not set | 385 | # CONFIG_NF_CONNTRACK_PPTP is not set |
| 386 | # CONFIG_NF_CONNTRACK_SANE is not set | 386 | # CONFIG_NF_CONNTRACK_SANE is not set |
| 387 | # CONFIG_NF_CONNTRACK_SIP is not set | 387 | # CONFIG_NF_CONNTRACK_SIP is not set |
| 388 | CONFIG_NF_CONNTRACK_TFTP=m | 388 | CONFIG_NF_CONNTRACK_TFTP=m |
| 389 | CONFIG_NF_CT_NETLINK=m | 389 | CONFIG_NF_CT_NETLINK=m |
| 390 | CONFIG_NETFILTER_XTABLES=m | 390 | CONFIG_NETFILTER_XTABLES=m |
| 391 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m | 391 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m |
| 392 | CONFIG_NETFILTER_XT_TARGET_CONNMARK=m | 392 | CONFIG_NETFILTER_XT_TARGET_CONNMARK=m |
| 393 | CONFIG_NETFILTER_XT_TARGET_MARK=m | 393 | CONFIG_NETFILTER_XT_TARGET_MARK=m |
| 394 | CONFIG_NETFILTER_XT_TARGET_NFLOG=m | 394 | CONFIG_NETFILTER_XT_TARGET_NFLOG=m |
| 395 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m | 395 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m |
| 396 | CONFIG_NETFILTER_XT_TARGET_RATEEST=m | 396 | CONFIG_NETFILTER_XT_TARGET_RATEEST=m |
| 397 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m | 397 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m |
| 398 | CONFIG_NETFILTER_XT_MATCH_COMMENT=m | 398 | CONFIG_NETFILTER_XT_MATCH_COMMENT=m |
| 399 | CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m | 399 | CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m |
| 400 | CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m | 400 | CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m |
| 401 | CONFIG_NETFILTER_XT_MATCH_CONNMARK=m | 401 | CONFIG_NETFILTER_XT_MATCH_CONNMARK=m |
| 402 | CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m | 402 | CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m |
| 403 | CONFIG_NETFILTER_XT_MATCH_DCCP=m | 403 | CONFIG_NETFILTER_XT_MATCH_DCCP=m |
| 404 | CONFIG_NETFILTER_XT_MATCH_DSCP=m | 404 | CONFIG_NETFILTER_XT_MATCH_DSCP=m |
| 405 | CONFIG_NETFILTER_XT_MATCH_ESP=m | 405 | CONFIG_NETFILTER_XT_MATCH_ESP=m |
| 406 | CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m | 406 | CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m |
| 407 | CONFIG_NETFILTER_XT_MATCH_HELPER=m | 407 | CONFIG_NETFILTER_XT_MATCH_HELPER=m |
| 408 | CONFIG_NETFILTER_XT_MATCH_IPRANGE=m | 408 | CONFIG_NETFILTER_XT_MATCH_IPRANGE=m |
| 409 | CONFIG_NETFILTER_XT_MATCH_LENGTH=m | 409 | CONFIG_NETFILTER_XT_MATCH_LENGTH=m |
| 410 | CONFIG_NETFILTER_XT_MATCH_LIMIT=m | 410 | CONFIG_NETFILTER_XT_MATCH_LIMIT=m |
| 411 | CONFIG_NETFILTER_XT_MATCH_MAC=m | 411 | CONFIG_NETFILTER_XT_MATCH_MAC=m |
| 412 | CONFIG_NETFILTER_XT_MATCH_MARK=m | 412 | CONFIG_NETFILTER_XT_MATCH_MARK=m |
| 413 | CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m | 413 | CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m |
| 414 | CONFIG_NETFILTER_XT_MATCH_OWNER=m | 414 | CONFIG_NETFILTER_XT_MATCH_OWNER=m |
| 415 | CONFIG_NETFILTER_XT_MATCH_POLICY=m | 415 | CONFIG_NETFILTER_XT_MATCH_POLICY=m |
| 416 | CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m | 416 | CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m |
| 417 | CONFIG_NETFILTER_XT_MATCH_QUOTA=m | 417 | CONFIG_NETFILTER_XT_MATCH_QUOTA=m |
| 418 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m | 418 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m |
| 419 | CONFIG_NETFILTER_XT_MATCH_REALM=m | 419 | CONFIG_NETFILTER_XT_MATCH_REALM=m |
| 420 | CONFIG_NETFILTER_XT_MATCH_RECENT=m | 420 | CONFIG_NETFILTER_XT_MATCH_RECENT=m |
| 421 | # CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT is not set | 421 | # CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT is not set |
| 422 | CONFIG_NETFILTER_XT_MATCH_SCTP=m | 422 | CONFIG_NETFILTER_XT_MATCH_SCTP=m |
| 423 | CONFIG_NETFILTER_XT_MATCH_STATE=m | 423 | CONFIG_NETFILTER_XT_MATCH_STATE=m |
| 424 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m | 424 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m |
| 425 | CONFIG_NETFILTER_XT_MATCH_STRING=m | 425 | CONFIG_NETFILTER_XT_MATCH_STRING=m |
| 426 | CONFIG_NETFILTER_XT_MATCH_TCPMSS=m | 426 | CONFIG_NETFILTER_XT_MATCH_TCPMSS=m |
| 427 | CONFIG_NETFILTER_XT_MATCH_TIME=m | 427 | CONFIG_NETFILTER_XT_MATCH_TIME=m |
| 428 | CONFIG_NETFILTER_XT_MATCH_U32=m | 428 | CONFIG_NETFILTER_XT_MATCH_U32=m |
| 429 | # CONFIG_IP_VS is not set | 429 | # CONFIG_IP_VS is not set |
| 430 | 430 | ||
| 431 | # | 431 | # |
| 432 | # IP: Netfilter Configuration | 432 | # IP: Netfilter Configuration |
| 433 | # | 433 | # |
| 434 | CONFIG_NF_DEFRAG_IPV4=m | 434 | CONFIG_NF_DEFRAG_IPV4=m |
| 435 | CONFIG_NF_CONNTRACK_IPV4=m | 435 | CONFIG_NF_CONNTRACK_IPV4=m |
| 436 | CONFIG_NF_CONNTRACK_PROC_COMPAT=y | 436 | CONFIG_NF_CONNTRACK_PROC_COMPAT=y |
| 437 | CONFIG_IP_NF_QUEUE=m | 437 | CONFIG_IP_NF_QUEUE=m |
| 438 | CONFIG_IP_NF_IPTABLES=m | 438 | CONFIG_IP_NF_IPTABLES=m |
| 439 | CONFIG_IP_NF_MATCH_ADDRTYPE=m | 439 | CONFIG_IP_NF_MATCH_ADDRTYPE=m |
| 440 | CONFIG_IP_NF_MATCH_AH=m | 440 | CONFIG_IP_NF_MATCH_AH=m |
| 441 | CONFIG_IP_NF_MATCH_ECN=m | 441 | CONFIG_IP_NF_MATCH_ECN=m |
| 442 | CONFIG_IP_NF_MATCH_TTL=m | 442 | CONFIG_IP_NF_MATCH_TTL=m |
| 443 | CONFIG_IP_NF_FILTER=m | 443 | CONFIG_IP_NF_FILTER=m |
| 444 | CONFIG_IP_NF_TARGET_REJECT=m | 444 | CONFIG_IP_NF_TARGET_REJECT=m |
| 445 | CONFIG_IP_NF_TARGET_LOG=m | 445 | CONFIG_IP_NF_TARGET_LOG=m |
| 446 | CONFIG_IP_NF_TARGET_ULOG=m | 446 | CONFIG_IP_NF_TARGET_ULOG=m |
| 447 | CONFIG_NF_NAT=m | 447 | CONFIG_NF_NAT=m |
| 448 | CONFIG_NF_NAT_NEEDED=y | 448 | CONFIG_NF_NAT_NEEDED=y |
| 449 | CONFIG_IP_NF_TARGET_MASQUERADE=m | 449 | CONFIG_IP_NF_TARGET_MASQUERADE=m |
| 450 | CONFIG_IP_NF_TARGET_NETMAP=m | 450 | CONFIG_IP_NF_TARGET_NETMAP=m |
| 451 | CONFIG_IP_NF_TARGET_REDIRECT=m | 451 | CONFIG_IP_NF_TARGET_REDIRECT=m |
| 452 | CONFIG_NF_NAT_SNMP_BASIC=m | 452 | CONFIG_NF_NAT_SNMP_BASIC=m |
| 453 | CONFIG_NF_NAT_PROTO_UDPLITE=m | 453 | CONFIG_NF_NAT_PROTO_UDPLITE=m |
| 454 | CONFIG_NF_NAT_FTP=m | 454 | CONFIG_NF_NAT_FTP=m |
| 455 | CONFIG_NF_NAT_IRC=m | 455 | CONFIG_NF_NAT_IRC=m |
| 456 | CONFIG_NF_NAT_TFTP=m | 456 | CONFIG_NF_NAT_TFTP=m |
| 457 | # CONFIG_NF_NAT_AMANDA is not set | 457 | # CONFIG_NF_NAT_AMANDA is not set |
| 458 | # CONFIG_NF_NAT_PPTP is not set | 458 | # CONFIG_NF_NAT_PPTP is not set |
| 459 | # CONFIG_NF_NAT_H323 is not set | 459 | # CONFIG_NF_NAT_H323 is not set |
| 460 | # CONFIG_NF_NAT_SIP is not set | 460 | # CONFIG_NF_NAT_SIP is not set |
| 461 | # CONFIG_IP_NF_MANGLE is not set | 461 | # CONFIG_IP_NF_MANGLE is not set |
| 462 | # CONFIG_IP_NF_RAW is not set | 462 | # CONFIG_IP_NF_RAW is not set |
| 463 | # CONFIG_IP_NF_ARPTABLES is not set | 463 | # CONFIG_IP_NF_ARPTABLES is not set |
| 464 | # CONFIG_IP_DCCP is not set | 464 | # CONFIG_IP_DCCP is not set |
| 465 | # CONFIG_IP_SCTP is not set | 465 | # CONFIG_IP_SCTP is not set |
| 466 | # CONFIG_TIPC is not set | 466 | # CONFIG_TIPC is not set |
| 467 | # CONFIG_ATM is not set | 467 | # CONFIG_ATM is not set |
| 468 | # CONFIG_BRIDGE is not set | 468 | # CONFIG_BRIDGE is not set |
| 469 | # CONFIG_NET_DSA is not set | 469 | # CONFIG_NET_DSA is not set |
| 470 | # CONFIG_VLAN_8021Q is not set | 470 | # CONFIG_VLAN_8021Q is not set |
| 471 | # CONFIG_DECNET is not set | 471 | # CONFIG_DECNET is not set |
| 472 | CONFIG_LLC=y | 472 | CONFIG_LLC=y |
| 473 | # CONFIG_LLC2 is not set | 473 | # CONFIG_LLC2 is not set |
| 474 | # CONFIG_IPX is not set | 474 | # CONFIG_IPX is not set |
| 475 | # CONFIG_ATALK is not set | 475 | # CONFIG_ATALK is not set |
| 476 | # CONFIG_X25 is not set | 476 | # CONFIG_X25 is not set |
| 477 | # CONFIG_LAPB is not set | 477 | # CONFIG_LAPB is not set |
| 478 | # CONFIG_ECONET is not set | 478 | # CONFIG_ECONET is not set |
| 479 | # CONFIG_WAN_ROUTER is not set | 479 | # CONFIG_WAN_ROUTER is not set |
| 480 | # CONFIG_NET_SCHED is not set | 480 | # CONFIG_NET_SCHED is not set |
| 481 | CONFIG_NET_CLS_ROUTE=y | 481 | CONFIG_NET_CLS_ROUTE=y |
| 482 | 482 | ||
| 483 | # | 483 | # |
| 484 | # Network testing | 484 | # Network testing |
| 485 | # | 485 | # |
| 486 | # CONFIG_NET_PKTGEN is not set | 486 | # CONFIG_NET_PKTGEN is not set |
| 487 | # CONFIG_NET_TCPPROBE is not set | 487 | # CONFIG_NET_TCPPROBE is not set |
| 488 | # CONFIG_HAMRADIO is not set | 488 | # CONFIG_HAMRADIO is not set |
| 489 | # CONFIG_CAN is not set | 489 | # CONFIG_CAN is not set |
| 490 | # CONFIG_IRDA is not set | 490 | # CONFIG_IRDA is not set |
| 491 | # CONFIG_BT is not set | 491 | # CONFIG_BT is not set |
| 492 | # CONFIG_AF_RXRPC is not set | 492 | # CONFIG_AF_RXRPC is not set |
| 493 | # CONFIG_PHONET is not set | 493 | # CONFIG_PHONET is not set |
| 494 | # CONFIG_WIRELESS is not set | 494 | # CONFIG_WIRELESS is not set |
| 495 | # CONFIG_RFKILL is not set | 495 | # CONFIG_RFKILL is not set |
| 496 | # CONFIG_NET_9P is not set | 496 | # CONFIG_NET_9P is not set |
| 497 | 497 | ||
| 498 | # | 498 | # |
| 499 | # Device Drivers | 499 | # Device Drivers |
| 500 | # | 500 | # |
| 501 | 501 | ||
| 502 | # | 502 | # |
| 503 | # Generic Driver Options | 503 | # Generic Driver Options |
| 504 | # | 504 | # |
| 505 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 505 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
| 506 | CONFIG_STANDALONE=y | 506 | CONFIG_STANDALONE=y |
| 507 | CONFIG_PREVENT_FIRMWARE_BUILD=y | 507 | CONFIG_PREVENT_FIRMWARE_BUILD=y |
| 508 | CONFIG_FW_LOADER=y | 508 | CONFIG_FW_LOADER=y |
| 509 | CONFIG_FIRMWARE_IN_KERNEL=y | 509 | CONFIG_FIRMWARE_IN_KERNEL=y |
| 510 | CONFIG_EXTRA_FIRMWARE="" | 510 | CONFIG_EXTRA_FIRMWARE="" |
| 511 | # CONFIG_DEBUG_DRIVER is not set | 511 | # CONFIG_DEBUG_DRIVER is not set |
| 512 | # CONFIG_DEBUG_DEVRES is not set | 512 | # CONFIG_DEBUG_DEVRES is not set |
| 513 | # CONFIG_SYS_HYPERVISOR is not set | 513 | # CONFIG_SYS_HYPERVISOR is not set |
| 514 | # CONFIG_CONNECTOR is not set | 514 | # CONFIG_CONNECTOR is not set |
| 515 | # CONFIG_MTD is not set | 515 | # CONFIG_MTD is not set |
| 516 | CONFIG_OF_DEVICE=y | 516 | CONFIG_OF_DEVICE=y |
| 517 | CONFIG_OF_I2C=y | 517 | CONFIG_OF_I2C=y |
| 518 | CONFIG_PARPORT=m | 518 | CONFIG_PARPORT=m |
| 519 | CONFIG_PARPORT_PC=m | 519 | CONFIG_PARPORT_PC=m |
| 520 | # CONFIG_PARPORT_SERIAL is not set | 520 | # CONFIG_PARPORT_SERIAL is not set |
| 521 | # CONFIG_PARPORT_PC_FIFO is not set | 521 | # CONFIG_PARPORT_PC_FIFO is not set |
| 522 | # CONFIG_PARPORT_PC_SUPERIO is not set | 522 | # CONFIG_PARPORT_PC_SUPERIO is not set |
| 523 | # CONFIG_PARPORT_GSC is not set | 523 | # CONFIG_PARPORT_GSC is not set |
| 524 | # CONFIG_PARPORT_AX88796 is not set | 524 | # CONFIG_PARPORT_AX88796 is not set |
| 525 | # CONFIG_PARPORT_1284 is not set | 525 | # CONFIG_PARPORT_1284 is not set |
| 526 | CONFIG_BLK_DEV=y | 526 | CONFIG_BLK_DEV=y |
| 527 | CONFIG_BLK_DEV_FD=m | 527 | CONFIG_BLK_DEV_FD=m |
| 528 | # CONFIG_PARIDE is not set | 528 | # CONFIG_PARIDE is not set |
| 529 | # CONFIG_BLK_CPQ_CISS_DA is not set | 529 | # CONFIG_BLK_CPQ_CISS_DA is not set |
| 530 | # CONFIG_BLK_DEV_DAC960 is not set | 530 | # CONFIG_BLK_DEV_DAC960 is not set |
| 531 | # CONFIG_BLK_DEV_UMEM is not set | 531 | # CONFIG_BLK_DEV_UMEM is not set |
| 532 | # CONFIG_BLK_DEV_COW_COMMON is not set | 532 | # CONFIG_BLK_DEV_COW_COMMON is not set |
| 533 | CONFIG_BLK_DEV_LOOP=y | 533 | CONFIG_BLK_DEV_LOOP=y |
| 534 | # CONFIG_BLK_DEV_CRYPTOLOOP is not set | 534 | # CONFIG_BLK_DEV_CRYPTOLOOP is not set |
| 535 | CONFIG_BLK_DEV_NBD=m | 535 | CONFIG_BLK_DEV_NBD=m |
| 536 | # CONFIG_BLK_DEV_SX8 is not set | 536 | # CONFIG_BLK_DEV_SX8 is not set |
| 537 | # CONFIG_BLK_DEV_UB is not set | 537 | # CONFIG_BLK_DEV_UB is not set |
| 538 | CONFIG_BLK_DEV_RAM=y | 538 | CONFIG_BLK_DEV_RAM=y |
| 539 | CONFIG_BLK_DEV_RAM_COUNT=16 | 539 | CONFIG_BLK_DEV_RAM_COUNT=16 |
| 540 | CONFIG_BLK_DEV_RAM_SIZE=65536 | 540 | CONFIG_BLK_DEV_RAM_SIZE=65536 |
| 541 | # CONFIG_BLK_DEV_XIP is not set | 541 | # CONFIG_BLK_DEV_XIP is not set |
| 542 | # CONFIG_CDROM_PKTCDVD is not set | 542 | # CONFIG_CDROM_PKTCDVD is not set |
| 543 | # CONFIG_ATA_OVER_ETH is not set | 543 | # CONFIG_ATA_OVER_ETH is not set |
| 544 | # CONFIG_BLK_DEV_HD is not set | 544 | # CONFIG_BLK_DEV_HD is not set |
| 545 | CONFIG_MISC_DEVICES=y | 545 | CONFIG_MISC_DEVICES=y |
| 546 | # CONFIG_PHANTOM is not set | 546 | # CONFIG_PHANTOM is not set |
| 547 | # CONFIG_EEPROM_93CX6 is not set | 547 | # CONFIG_EEPROM_93CX6 is not set |
| 548 | # CONFIG_SGI_IOC4 is not set | 548 | # CONFIG_SGI_IOC4 is not set |
| 549 | # CONFIG_TIFM_CORE is not set | 549 | # CONFIG_TIFM_CORE is not set |
| 550 | # CONFIG_ENCLOSURE_SERVICES is not set | 550 | # CONFIG_ENCLOSURE_SERVICES is not set |
| 551 | # CONFIG_HP_ILO is not set | 551 | # CONFIG_HP_ILO is not set |
| 552 | CONFIG_HAVE_IDE=y | 552 | CONFIG_HAVE_IDE=y |
| 553 | CONFIG_IDE=y | 553 | CONFIG_IDE=y |
| 554 | 554 | ||
| 555 | # | 555 | # |
| 556 | # Please see Documentation/ide/ide.txt for help/info on IDE drives | 556 | # Please see Documentation/ide/ide.txt for help/info on IDE drives |
| 557 | # | 557 | # |
| 558 | CONFIG_IDE_TIMINGS=y | 558 | CONFIG_IDE_TIMINGS=y |
| 559 | # CONFIG_BLK_DEV_IDE_SATA is not set | 559 | # CONFIG_BLK_DEV_IDE_SATA is not set |
| 560 | CONFIG_IDE_GD=y | 560 | CONFIG_IDE_GD=y |
| 561 | CONFIG_IDE_GD_ATA=y | 561 | CONFIG_IDE_GD_ATA=y |
| 562 | # CONFIG_IDE_GD_ATAPI is not set | 562 | # CONFIG_IDE_GD_ATAPI is not set |
| 563 | CONFIG_BLK_DEV_IDECD=y | 563 | CONFIG_BLK_DEV_IDECD=y |
| 564 | CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y | 564 | CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y |
| 565 | # CONFIG_BLK_DEV_IDETAPE is not set | 565 | # CONFIG_BLK_DEV_IDETAPE is not set |
| 566 | # CONFIG_BLK_DEV_IDESCSI is not set | 566 | # CONFIG_BLK_DEV_IDESCSI is not set |
| 567 | # CONFIG_IDE_TASK_IOCTL is not set | 567 | # CONFIG_IDE_TASK_IOCTL is not set |
| 568 | CONFIG_IDE_PROC_FS=y | 568 | CONFIG_IDE_PROC_FS=y |
| 569 | 569 | ||
| 570 | # | 570 | # |
| 571 | # IDE chipset support/bugfixes | 571 | # IDE chipset support/bugfixes |
| 572 | # | 572 | # |
| 573 | # CONFIG_BLK_DEV_PLATFORM is not set | 573 | # CONFIG_BLK_DEV_PLATFORM is not set |
| 574 | CONFIG_BLK_DEV_IDEDMA_SFF=y | 574 | CONFIG_BLK_DEV_IDEDMA_SFF=y |
| 575 | 575 | ||
| 576 | # | 576 | # |
| 577 | # PCI IDE chipsets support | 577 | # PCI IDE chipsets support |
| 578 | # | 578 | # |
| 579 | CONFIG_BLK_DEV_IDEPCI=y | 579 | CONFIG_BLK_DEV_IDEPCI=y |
| 580 | CONFIG_IDEPCI_PCIBUS_ORDER=y | 580 | CONFIG_IDEPCI_PCIBUS_ORDER=y |
| 581 | # CONFIG_BLK_DEV_OFFBOARD is not set | 581 | # CONFIG_BLK_DEV_OFFBOARD is not set |
| 582 | CONFIG_BLK_DEV_GENERIC=y | 582 | CONFIG_BLK_DEV_GENERIC=y |
| 583 | # CONFIG_BLK_DEV_OPTI621 is not set | 583 | # CONFIG_BLK_DEV_OPTI621 is not set |
| 584 | CONFIG_BLK_DEV_IDEDMA_PCI=y | 584 | CONFIG_BLK_DEV_IDEDMA_PCI=y |
| 585 | # CONFIG_BLK_DEV_AEC62XX is not set | 585 | # CONFIG_BLK_DEV_AEC62XX is not set |
| 586 | # CONFIG_BLK_DEV_ALI15X3 is not set | 586 | # CONFIG_BLK_DEV_ALI15X3 is not set |
| 587 | CONFIG_BLK_DEV_AMD74XX=y | 587 | CONFIG_BLK_DEV_AMD74XX=y |
| 588 | # CONFIG_BLK_DEV_CMD64X is not set | 588 | # CONFIG_BLK_DEV_CMD64X is not set |
| 589 | # CONFIG_BLK_DEV_TRIFLEX is not set | 589 | # CONFIG_BLK_DEV_TRIFLEX is not set |
| 590 | # CONFIG_BLK_DEV_CS5520 is not set | 590 | # CONFIG_BLK_DEV_CS5520 is not set |
| 591 | # CONFIG_BLK_DEV_CS5530 is not set | 591 | # CONFIG_BLK_DEV_CS5530 is not set |
| 592 | # CONFIG_BLK_DEV_HPT366 is not set | 592 | # CONFIG_BLK_DEV_HPT366 is not set |
| 593 | # CONFIG_BLK_DEV_JMICRON is not set | 593 | # CONFIG_BLK_DEV_JMICRON is not set |
| 594 | # CONFIG_BLK_DEV_SC1200 is not set | 594 | # CONFIG_BLK_DEV_SC1200 is not set |
| 595 | # CONFIG_BLK_DEV_PIIX is not set | 595 | # CONFIG_BLK_DEV_PIIX is not set |
| 596 | # CONFIG_BLK_DEV_IT8213 is not set | 596 | # CONFIG_BLK_DEV_IT8213 is not set |
| 597 | # CONFIG_BLK_DEV_IT821X is not set | 597 | # CONFIG_BLK_DEV_IT821X is not set |
| 598 | # CONFIG_BLK_DEV_NS87415 is not set | 598 | # CONFIG_BLK_DEV_NS87415 is not set |
| 599 | # CONFIG_BLK_DEV_PDC202XX_OLD is not set | 599 | # CONFIG_BLK_DEV_PDC202XX_OLD is not set |
| 600 | # CONFIG_BLK_DEV_PDC202XX_NEW is not set | 600 | # CONFIG_BLK_DEV_PDC202XX_NEW is not set |
| 601 | # CONFIG_BLK_DEV_SVWKS is not set | 601 | # CONFIG_BLK_DEV_SVWKS is not set |
| 602 | # CONFIG_BLK_DEV_SIIMAGE is not set | 602 | # CONFIG_BLK_DEV_SIIMAGE is not set |
| 603 | # CONFIG_BLK_DEV_SL82C105 is not set | 603 | # CONFIG_BLK_DEV_SL82C105 is not set |
| 604 | # CONFIG_BLK_DEV_SLC90E66 is not set | 604 | # CONFIG_BLK_DEV_SLC90E66 is not set |
| 605 | # CONFIG_BLK_DEV_TRM290 is not set | 605 | # CONFIG_BLK_DEV_TRM290 is not set |
| 606 | # CONFIG_BLK_DEV_VIA82CXXX is not set | 606 | # CONFIG_BLK_DEV_VIA82CXXX is not set |
| 607 | # CONFIG_BLK_DEV_TC86C001 is not set | 607 | # CONFIG_BLK_DEV_TC86C001 is not set |
| 608 | CONFIG_BLK_DEV_IDEDMA=y | 608 | CONFIG_BLK_DEV_IDEDMA=y |
| 609 | 609 | ||
| 610 | # | 610 | # |
| 611 | # SCSI device support | 611 | # SCSI device support |
| 612 | # | 612 | # |
| 613 | # CONFIG_RAID_ATTRS is not set | 613 | # CONFIG_RAID_ATTRS is not set |
| 614 | CONFIG_SCSI=y | 614 | CONFIG_SCSI=y |
| 615 | CONFIG_SCSI_DMA=y | 615 | CONFIG_SCSI_DMA=y |
| 616 | # CONFIG_SCSI_TGT is not set | 616 | # CONFIG_SCSI_TGT is not set |
| 617 | CONFIG_SCSI_NETLINK=y | 617 | CONFIG_SCSI_NETLINK=y |
| 618 | CONFIG_SCSI_PROC_FS=y | 618 | CONFIG_SCSI_PROC_FS=y |
| 619 | 619 | ||
| 620 | # | 620 | # |
| 621 | # SCSI support type (disk, tape, CD-ROM) | 621 | # SCSI support type (disk, tape, CD-ROM) |
| 622 | # | 622 | # |
| 623 | CONFIG_BLK_DEV_SD=y | 623 | CONFIG_BLK_DEV_SD=y |
| 624 | CONFIG_CHR_DEV_ST=y | 624 | CONFIG_CHR_DEV_ST=y |
| 625 | # CONFIG_CHR_DEV_OSST is not set | 625 | # CONFIG_CHR_DEV_OSST is not set |
| 626 | CONFIG_BLK_DEV_SR=y | 626 | CONFIG_BLK_DEV_SR=y |
| 627 | CONFIG_BLK_DEV_SR_VENDOR=y | 627 | CONFIG_BLK_DEV_SR_VENDOR=y |
| 628 | CONFIG_CHR_DEV_SG=y | 628 | CONFIG_CHR_DEV_SG=y |
| 629 | # CONFIG_CHR_DEV_SCH is not set | 629 | # CONFIG_CHR_DEV_SCH is not set |
| 630 | 630 | ||
| 631 | # | 631 | # |
| 632 | # Some SCSI devices (e.g. CD jukebox) support multiple LUNs | 632 | # Some SCSI devices (e.g. CD jukebox) support multiple LUNs |
| 633 | # | 633 | # |
| 634 | CONFIG_SCSI_MULTI_LUN=y | 634 | CONFIG_SCSI_MULTI_LUN=y |
| 635 | CONFIG_SCSI_CONSTANTS=y | 635 | CONFIG_SCSI_CONSTANTS=y |
| 636 | # CONFIG_SCSI_LOGGING is not set | 636 | # CONFIG_SCSI_LOGGING is not set |
| 637 | # CONFIG_SCSI_SCAN_ASYNC is not set | 637 | # CONFIG_SCSI_SCAN_ASYNC is not set |
| 638 | CONFIG_SCSI_WAIT_SCAN=m | 638 | CONFIG_SCSI_WAIT_SCAN=m |
| 639 | 639 | ||
| 640 | # | 640 | # |
| 641 | # SCSI Transports | 641 | # SCSI Transports |
| 642 | # | 642 | # |
| 643 | CONFIG_SCSI_SPI_ATTRS=y | 643 | CONFIG_SCSI_SPI_ATTRS=y |
| 644 | CONFIG_SCSI_FC_ATTRS=y | 644 | CONFIG_SCSI_FC_ATTRS=y |
| 645 | CONFIG_SCSI_ISCSI_ATTRS=m | 645 | CONFIG_SCSI_ISCSI_ATTRS=m |
| 646 | CONFIG_SCSI_SAS_ATTRS=m | 646 | CONFIG_SCSI_SAS_ATTRS=m |
| 647 | # CONFIG_SCSI_SAS_LIBSAS is not set | 647 | # CONFIG_SCSI_SAS_LIBSAS is not set |
| 648 | CONFIG_SCSI_SRP_ATTRS=y | 648 | CONFIG_SCSI_SRP_ATTRS=y |
| 649 | CONFIG_SCSI_LOWLEVEL=y | 649 | CONFIG_SCSI_LOWLEVEL=y |
| 650 | # CONFIG_ISCSI_TCP is not set | 650 | # CONFIG_ISCSI_TCP is not set |
| 651 | # CONFIG_BLK_DEV_3W_XXXX_RAID is not set | 651 | # CONFIG_BLK_DEV_3W_XXXX_RAID is not set |
| 652 | # CONFIG_SCSI_3W_9XXX is not set | 652 | # CONFIG_SCSI_3W_9XXX is not set |
| 653 | # CONFIG_SCSI_ACARD is not set | 653 | # CONFIG_SCSI_ACARD is not set |
| 654 | # CONFIG_SCSI_AACRAID is not set | 654 | # CONFIG_SCSI_AACRAID is not set |
| 655 | # CONFIG_SCSI_AIC7XXX is not set | 655 | # CONFIG_SCSI_AIC7XXX is not set |
| 656 | # CONFIG_SCSI_AIC7XXX_OLD is not set | 656 | # CONFIG_SCSI_AIC7XXX_OLD is not set |
| 657 | # CONFIG_SCSI_AIC79XX is not set | 657 | # CONFIG_SCSI_AIC79XX is not set |
| 658 | # CONFIG_SCSI_AIC94XX is not set | 658 | # CONFIG_SCSI_AIC94XX is not set |
| 659 | # CONFIG_SCSI_ARCMSR is not set | 659 | # CONFIG_SCSI_ARCMSR is not set |
| 660 | # CONFIG_MEGARAID_NEWGEN is not set | 660 | # CONFIG_MEGARAID_NEWGEN is not set |
| 661 | # CONFIG_MEGARAID_LEGACY is not set | 661 | # CONFIG_MEGARAID_LEGACY is not set |
| 662 | # CONFIG_MEGARAID_SAS is not set | 662 | # CONFIG_MEGARAID_SAS is not set |
| 663 | # CONFIG_SCSI_HPTIOP is not set | 663 | # CONFIG_SCSI_HPTIOP is not set |
| 664 | # CONFIG_SCSI_DMX3191D is not set | 664 | # CONFIG_SCSI_DMX3191D is not set |
| 665 | # CONFIG_SCSI_EATA is not set | 665 | # CONFIG_SCSI_EATA is not set |
| 666 | # CONFIG_SCSI_FUTURE_DOMAIN is not set | 666 | # CONFIG_SCSI_FUTURE_DOMAIN is not set |
| 667 | # CONFIG_SCSI_GDTH is not set | 667 | # CONFIG_SCSI_GDTH is not set |
| 668 | # CONFIG_SCSI_IPS is not set | 668 | # CONFIG_SCSI_IPS is not set |
| 669 | CONFIG_SCSI_IBMVSCSI=y | 669 | CONFIG_SCSI_IBMVSCSI=y |
| 670 | CONFIG_SCSI_IBMVFC=m | 670 | CONFIG_SCSI_IBMVFC=m |
| 671 | CONFIG_SCSI_IBMVFC_TRACE=y | 671 | CONFIG_SCSI_IBMVFC_TRACE=y |
| 672 | # CONFIG_SCSI_INITIO is not set | 672 | # CONFIG_SCSI_INITIO is not set |
| 673 | # CONFIG_SCSI_INIA100 is not set | 673 | # CONFIG_SCSI_INIA100 is not set |
| 674 | # CONFIG_SCSI_PPA is not set | 674 | # CONFIG_SCSI_PPA is not set |
| 675 | # CONFIG_SCSI_IMM is not set | 675 | # CONFIG_SCSI_IMM is not set |
| 676 | # CONFIG_SCSI_MVSAS is not set | 676 | # CONFIG_SCSI_MVSAS is not set |
| 677 | # CONFIG_SCSI_STEX is not set | 677 | # CONFIG_SCSI_STEX is not set |
| 678 | CONFIG_SCSI_SYM53C8XX_2=y | 678 | CONFIG_SCSI_SYM53C8XX_2=y |
| 679 | CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 | 679 | CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 |
| 680 | CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 | 680 | CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 |
| 681 | CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 | 681 | CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 |
| 682 | CONFIG_SCSI_SYM53C8XX_MMIO=y | 682 | CONFIG_SCSI_SYM53C8XX_MMIO=y |
| 683 | CONFIG_SCSI_IPR=y | 683 | CONFIG_SCSI_IPR=y |
| 684 | CONFIG_SCSI_IPR_TRACE=y | 684 | CONFIG_SCSI_IPR_TRACE=y |
| 685 | CONFIG_SCSI_IPR_DUMP=y | 685 | CONFIG_SCSI_IPR_DUMP=y |
| 686 | # CONFIG_SCSI_QLOGIC_1280 is not set | 686 | # CONFIG_SCSI_QLOGIC_1280 is not set |
| 687 | CONFIG_SCSI_QLA_FC=m | 687 | CONFIG_SCSI_QLA_FC=m |
| 688 | # CONFIG_SCSI_QLA_ISCSI is not set | 688 | # CONFIG_SCSI_QLA_ISCSI is not set |
| 689 | CONFIG_SCSI_LPFC=m | 689 | CONFIG_SCSI_LPFC=m |
| 690 | # CONFIG_SCSI_DC395x is not set | 690 | # CONFIG_SCSI_DC395x is not set |
| 691 | # CONFIG_SCSI_DC390T is not set | 691 | # CONFIG_SCSI_DC390T is not set |
| 692 | # CONFIG_SCSI_DEBUG is not set | 692 | # CONFIG_SCSI_DEBUG is not set |
| 693 | # CONFIG_SCSI_SRP is not set | 693 | # CONFIG_SCSI_SRP is not set |
| 694 | # CONFIG_SCSI_DH is not set | 694 | # CONFIG_SCSI_DH is not set |
| 695 | CONFIG_ATA=y | 695 | CONFIG_ATA=y |
| 696 | # CONFIG_ATA_NONSTANDARD is not set | 696 | # CONFIG_ATA_NONSTANDARD is not set |
| 697 | CONFIG_SATA_PMP=y | 697 | CONFIG_SATA_PMP=y |
| 698 | # CONFIG_SATA_AHCI is not set | 698 | # CONFIG_SATA_AHCI is not set |
| 699 | # CONFIG_SATA_SIL24 is not set | 699 | # CONFIG_SATA_SIL24 is not set |
| 700 | # CONFIG_ATA_SFF is not set | 700 | # CONFIG_ATA_SFF is not set |
| 701 | CONFIG_MD=y | 701 | CONFIG_MD=y |
| 702 | CONFIG_BLK_DEV_MD=y | 702 | CONFIG_BLK_DEV_MD=y |
| 703 | CONFIG_MD_AUTODETECT=y | 703 | CONFIG_MD_AUTODETECT=y |
| 704 | CONFIG_MD_LINEAR=y | 704 | CONFIG_MD_LINEAR=y |
| 705 | CONFIG_MD_RAID0=y | 705 | CONFIG_MD_RAID0=y |
| 706 | CONFIG_MD_RAID1=y | 706 | CONFIG_MD_RAID1=y |
| 707 | CONFIG_MD_RAID10=m | 707 | CONFIG_MD_RAID10=m |
| 708 | # CONFIG_MD_RAID456 is not set | 708 | # CONFIG_MD_RAID456 is not set |
| 709 | CONFIG_MD_MULTIPATH=m | 709 | CONFIG_MD_MULTIPATH=m |
| 710 | CONFIG_MD_FAULTY=m | 710 | CONFIG_MD_FAULTY=m |
| 711 | CONFIG_BLK_DEV_DM=y | 711 | CONFIG_BLK_DEV_DM=y |
| 712 | # CONFIG_DM_DEBUG is not set | 712 | # CONFIG_DM_DEBUG is not set |
| 713 | CONFIG_DM_CRYPT=m | 713 | CONFIG_DM_CRYPT=m |
| 714 | CONFIG_DM_SNAPSHOT=m | 714 | CONFIG_DM_SNAPSHOT=m |
| 715 | CONFIG_DM_MIRROR=m | 715 | CONFIG_DM_MIRROR=m |
| 716 | CONFIG_DM_ZERO=m | 716 | CONFIG_DM_ZERO=m |
| 717 | CONFIG_DM_MULTIPATH=m | 717 | CONFIG_DM_MULTIPATH=m |
| 718 | # CONFIG_DM_DELAY is not set | 718 | # CONFIG_DM_DELAY is not set |
| 719 | # CONFIG_DM_UEVENT is not set | 719 | # CONFIG_DM_UEVENT is not set |
| 720 | # CONFIG_FUSION is not set | 720 | # CONFIG_FUSION is not set |
| 721 | 721 | ||
| 722 | # | 722 | # |
| 723 | # IEEE 1394 (FireWire) support | 723 | # IEEE 1394 (FireWire) support |
| 724 | # | 724 | # |
| 725 | 725 | ||
| 726 | # | 726 | # |
| 727 | # Enable only one of the two stacks, unless you know what you are doing | 727 | # Enable only one of the two stacks, unless you know what you are doing |
| 728 | # | 728 | # |
| 729 | # CONFIG_FIREWIRE is not set | 729 | # CONFIG_FIREWIRE is not set |
| 730 | # CONFIG_IEEE1394 is not set | 730 | # CONFIG_IEEE1394 is not set |
| 731 | # CONFIG_I2O is not set | 731 | # CONFIG_I2O is not set |
| 732 | # CONFIG_MACINTOSH_DRIVERS is not set | 732 | # CONFIG_MACINTOSH_DRIVERS is not set |
| 733 | CONFIG_NETDEVICES=y | 733 | CONFIG_NETDEVICES=y |
| 734 | CONFIG_DUMMY=m | 734 | CONFIG_DUMMY=m |
| 735 | CONFIG_BONDING=m | 735 | CONFIG_BONDING=m |
| 736 | # CONFIG_MACVLAN is not set | 736 | # CONFIG_MACVLAN is not set |
| 737 | # CONFIG_EQUALIZER is not set | 737 | # CONFIG_EQUALIZER is not set |
| 738 | CONFIG_TUN=m | 738 | CONFIG_TUN=m |
| 739 | # CONFIG_VETH is not set | 739 | # CONFIG_VETH is not set |
| 740 | # CONFIG_ARCNET is not set | 740 | # CONFIG_ARCNET is not set |
| 741 | CONFIG_PHYLIB=y | 741 | CONFIG_PHYLIB=y |
| 742 | 742 | ||
| 743 | # | 743 | # |
| 744 | # MII PHY device drivers | 744 | # MII PHY device drivers |
| 745 | # | 745 | # |
| 746 | # CONFIG_MARVELL_PHY is not set | 746 | # CONFIG_MARVELL_PHY is not set |
| 747 | # CONFIG_DAVICOM_PHY is not set | 747 | # CONFIG_DAVICOM_PHY is not set |
| 748 | # CONFIG_QSEMI_PHY is not set | 748 | # CONFIG_QSEMI_PHY is not set |
| 749 | # CONFIG_LXT_PHY is not set | 749 | # CONFIG_LXT_PHY is not set |
| 750 | # CONFIG_CICADA_PHY is not set | 750 | # CONFIG_CICADA_PHY is not set |
| 751 | # CONFIG_VITESSE_PHY is not set | 751 | # CONFIG_VITESSE_PHY is not set |
| 752 | # CONFIG_SMSC_PHY is not set | 752 | # CONFIG_SMSC_PHY is not set |
| 753 | # CONFIG_BROADCOM_PHY is not set | 753 | # CONFIG_BROADCOM_PHY is not set |
| 754 | # CONFIG_ICPLUS_PHY is not set | 754 | # CONFIG_ICPLUS_PHY is not set |
| 755 | # CONFIG_REALTEK_PHY is not set | 755 | # CONFIG_REALTEK_PHY is not set |
| 756 | # CONFIG_FIXED_PHY is not set | 756 | # CONFIG_FIXED_PHY is not set |
| 757 | # CONFIG_MDIO_BITBANG is not set | 757 | # CONFIG_MDIO_BITBANG is not set |
| 758 | CONFIG_NET_ETHERNET=y | 758 | CONFIG_NET_ETHERNET=y |
| 759 | CONFIG_MII=y | 759 | CONFIG_MII=y |
| 760 | # CONFIG_HAPPYMEAL is not set | 760 | # CONFIG_HAPPYMEAL is not set |
| 761 | # CONFIG_SUNGEM is not set | 761 | # CONFIG_SUNGEM is not set |
| 762 | # CONFIG_CASSINI is not set | 762 | # CONFIG_CASSINI is not set |
| 763 | CONFIG_NET_VENDOR_3COM=y | 763 | CONFIG_NET_VENDOR_3COM=y |
| 764 | CONFIG_VORTEX=y | 764 | CONFIG_VORTEX=y |
| 765 | # CONFIG_TYPHOON is not set | 765 | # CONFIG_TYPHOON is not set |
| 766 | # CONFIG_NET_TULIP is not set | 766 | # CONFIG_NET_TULIP is not set |
| 767 | # CONFIG_HP100 is not set | 767 | # CONFIG_HP100 is not set |
| 768 | CONFIG_IBMVETH=y | 768 | CONFIG_IBMVETH=y |
| 769 | # CONFIG_IBM_NEW_EMAC_ZMII is not set | 769 | # CONFIG_IBM_NEW_EMAC_ZMII is not set |
| 770 | # CONFIG_IBM_NEW_EMAC_RGMII is not set | 770 | # CONFIG_IBM_NEW_EMAC_RGMII is not set |
| 771 | # CONFIG_IBM_NEW_EMAC_TAH is not set | 771 | # CONFIG_IBM_NEW_EMAC_TAH is not set |
| 772 | # CONFIG_IBM_NEW_EMAC_EMAC4 is not set | 772 | # CONFIG_IBM_NEW_EMAC_EMAC4 is not set |
| 773 | # CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set | 773 | # CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set |
| 774 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set | 774 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set |
| 775 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set | 775 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set |
| 776 | CONFIG_NET_PCI=y | 776 | CONFIG_NET_PCI=y |
| 777 | CONFIG_PCNET32=y | 777 | CONFIG_PCNET32=y |
| 778 | # CONFIG_AMD8111_ETH is not set | 778 | # CONFIG_AMD8111_ETH is not set |
| 779 | # CONFIG_ADAPTEC_STARFIRE is not set | 779 | # CONFIG_ADAPTEC_STARFIRE is not set |
| 780 | # CONFIG_B44 is not set | 780 | # CONFIG_B44 is not set |
| 781 | # CONFIG_FORCEDETH is not set | 781 | # CONFIG_FORCEDETH is not set |
| 782 | # CONFIG_EEPRO100 is not set | 782 | # CONFIG_EEPRO100 is not set |
| 783 | CONFIG_E100=y | 783 | CONFIG_E100=y |
| 784 | # CONFIG_FEALNX is not set | 784 | # CONFIG_FEALNX is not set |
| 785 | # CONFIG_NATSEMI is not set | 785 | # CONFIG_NATSEMI is not set |
| 786 | # CONFIG_NE2K_PCI is not set | 786 | # CONFIG_NE2K_PCI is not set |
| 787 | # CONFIG_8139CP is not set | 787 | # CONFIG_8139CP is not set |
| 788 | # CONFIG_8139TOO is not set | 788 | # CONFIG_8139TOO is not set |
| 789 | # CONFIG_R6040 is not set | 789 | # CONFIG_R6040 is not set |
| 790 | # CONFIG_SIS900 is not set | 790 | # CONFIG_SIS900 is not set |
| 791 | # CONFIG_EPIC100 is not set | 791 | # CONFIG_EPIC100 is not set |
| 792 | # CONFIG_SUNDANCE is not set | 792 | # CONFIG_SUNDANCE is not set |
| 793 | # CONFIG_TLAN is not set | 793 | # CONFIG_TLAN is not set |
| 794 | # CONFIG_VIA_RHINE is not set | 794 | # CONFIG_VIA_RHINE is not set |
| 795 | # CONFIG_SC92031 is not set | 795 | # CONFIG_SC92031 is not set |
| 796 | # CONFIG_NET_POCKET is not set | 796 | # CONFIG_NET_POCKET is not set |
| 797 | # CONFIG_ATL2 is not set | 797 | # CONFIG_ATL2 is not set |
| 798 | CONFIG_NETDEV_1000=y | 798 | CONFIG_NETDEV_1000=y |
| 799 | CONFIG_ACENIC=y | 799 | CONFIG_ACENIC=y |
| 800 | CONFIG_ACENIC_OMIT_TIGON_I=y | 800 | CONFIG_ACENIC_OMIT_TIGON_I=y |
| 801 | # CONFIG_DL2K is not set | 801 | # CONFIG_DL2K is not set |
| 802 | CONFIG_E1000=y | 802 | CONFIG_E1000=y |
| 803 | # CONFIG_E1000E is not set | 803 | # CONFIG_E1000E is not set |
| 804 | # CONFIG_IP1000 is not set | 804 | # CONFIG_IP1000 is not set |
| 805 | # CONFIG_IGB is not set | 805 | # CONFIG_IGB is not set |
| 806 | # CONFIG_NS83820 is not set | 806 | # CONFIG_NS83820 is not set |
| 807 | # CONFIG_HAMACHI is not set | 807 | # CONFIG_HAMACHI is not set |
| 808 | # CONFIG_YELLOWFIN is not set | 808 | # CONFIG_YELLOWFIN is not set |
| 809 | # CONFIG_R8169 is not set | 809 | # CONFIG_R8169 is not set |
| 810 | # CONFIG_SIS190 is not set | 810 | # CONFIG_SIS190 is not set |
| 811 | # CONFIG_SKGE is not set | 811 | # CONFIG_SKGE is not set |
| 812 | # CONFIG_SKY2 is not set | 812 | # CONFIG_SKY2 is not set |
| 813 | # CONFIG_VIA_VELOCITY is not set | 813 | # CONFIG_VIA_VELOCITY is not set |
| 814 | CONFIG_TIGON3=y | 814 | CONFIG_TIGON3=y |
| 815 | # CONFIG_BNX2 is not set | 815 | # CONFIG_BNX2 is not set |
| 816 | # CONFIG_QLA3XXX is not set | 816 | # CONFIG_QLA3XXX is not set |
| 817 | # CONFIG_ATL1 is not set | 817 | # CONFIG_ATL1 is not set |
| 818 | # CONFIG_ATL1E is not set | 818 | # CONFIG_ATL1E is not set |
| 819 | # CONFIG_JME is not set | 819 | # CONFIG_JME is not set |
| 820 | CONFIG_NETDEV_10000=y | 820 | CONFIG_NETDEV_10000=y |
| 821 | # CONFIG_CHELSIO_T1 is not set | 821 | # CONFIG_CHELSIO_T1 is not set |
| 822 | # CONFIG_CHELSIO_T3 is not set | 822 | # CONFIG_CHELSIO_T3 is not set |
| 823 | CONFIG_EHEA=y | 823 | CONFIG_EHEA=y |
| 824 | # CONFIG_ENIC is not set | 824 | # CONFIG_ENIC is not set |
| 825 | # CONFIG_IXGBE is not set | 825 | # CONFIG_IXGBE is not set |
| 826 | CONFIG_IXGB=m | 826 | CONFIG_IXGB=m |
| 827 | CONFIG_S2IO=m | 827 | CONFIG_S2IO=m |
| 828 | # CONFIG_MYRI10GE is not set | 828 | # CONFIG_MYRI10GE is not set |
| 829 | # CONFIG_NETXEN_NIC is not set | 829 | # CONFIG_NETXEN_NIC is not set |
| 830 | # CONFIG_NIU is not set | 830 | # CONFIG_NIU is not set |
| 831 | # CONFIG_MLX4_EN is not set | 831 | # CONFIG_MLX4_EN is not set |
| 832 | # CONFIG_MLX4_CORE is not set | 832 | # CONFIG_MLX4_CORE is not set |
| 833 | # CONFIG_TEHUTI is not set | 833 | # CONFIG_TEHUTI is not set |
| 834 | # CONFIG_BNX2X is not set | 834 | # CONFIG_BNX2X is not set |
| 835 | # CONFIG_QLGE is not set | 835 | # CONFIG_QLGE is not set |
| 836 | # CONFIG_SFC is not set | 836 | # CONFIG_SFC is not set |
| 837 | CONFIG_TR=y | 837 | CONFIG_TR=y |
| 838 | CONFIG_IBMOL=y | 838 | CONFIG_IBMOL=y |
| 839 | # CONFIG_3C359 is not set | 839 | # CONFIG_3C359 is not set |
| 840 | # CONFIG_TMS380TR is not set | 840 | # CONFIG_TMS380TR is not set |
| 841 | 841 | ||
| 842 | # | 842 | # |
| 843 | # Wireless LAN | 843 | # Wireless LAN |
| 844 | # | 844 | # |
| 845 | # CONFIG_WLAN_PRE80211 is not set | 845 | # CONFIG_WLAN_PRE80211 is not set |
| 846 | # CONFIG_WLAN_80211 is not set | 846 | # CONFIG_WLAN_80211 is not set |
| 847 | # CONFIG_IWLWIFI_LEDS is not set | 847 | # CONFIG_IWLWIFI_LEDS is not set |
| 848 | 848 | ||
| 849 | # | 849 | # |
| 850 | # USB Network Adapters | 850 | # USB Network Adapters |
| 851 | # | 851 | # |
| 852 | # CONFIG_USB_CATC is not set | 852 | # CONFIG_USB_CATC is not set |
| 853 | # CONFIG_USB_KAWETH is not set | 853 | # CONFIG_USB_KAWETH is not set |
| 854 | # CONFIG_USB_PEGASUS is not set | 854 | # CONFIG_USB_PEGASUS is not set |
| 855 | # CONFIG_USB_RTL8150 is not set | 855 | # CONFIG_USB_RTL8150 is not set |
| 856 | # CONFIG_USB_USBNET is not set | 856 | # CONFIG_USB_USBNET is not set |
| 857 | # CONFIG_WAN is not set | 857 | # CONFIG_WAN is not set |
| 858 | # CONFIG_FDDI is not set | 858 | # CONFIG_FDDI is not set |
| 859 | # CONFIG_HIPPI is not set | 859 | # CONFIG_HIPPI is not set |
| 860 | # CONFIG_PLIP is not set | 860 | # CONFIG_PLIP is not set |
| 861 | CONFIG_PPP=m | 861 | CONFIG_PPP=m |
| 862 | # CONFIG_PPP_MULTILINK is not set | 862 | # CONFIG_PPP_MULTILINK is not set |
| 863 | # CONFIG_PPP_FILTER is not set | 863 | # CONFIG_PPP_FILTER is not set |
| 864 | CONFIG_PPP_ASYNC=m | 864 | CONFIG_PPP_ASYNC=m |
| 865 | CONFIG_PPP_SYNC_TTY=m | 865 | CONFIG_PPP_SYNC_TTY=m |
| 866 | CONFIG_PPP_DEFLATE=m | 866 | CONFIG_PPP_DEFLATE=m |
| 867 | CONFIG_PPP_BSDCOMP=m | 867 | CONFIG_PPP_BSDCOMP=m |
| 868 | # CONFIG_PPP_MPPE is not set | 868 | # CONFIG_PPP_MPPE is not set |
| 869 | CONFIG_PPPOE=m | 869 | CONFIG_PPPOE=m |
| 870 | # CONFIG_PPPOL2TP is not set | 870 | # CONFIG_PPPOL2TP is not set |
| 871 | # CONFIG_SLIP is not set | 871 | # CONFIG_SLIP is not set |
| 872 | CONFIG_SLHC=m | 872 | CONFIG_SLHC=m |
| 873 | # CONFIG_NET_FC is not set | 873 | # CONFIG_NET_FC is not set |
| 874 | CONFIG_NETCONSOLE=y | 874 | CONFIG_NETCONSOLE=y |
| 875 | # CONFIG_NETCONSOLE_DYNAMIC is not set | 875 | # CONFIG_NETCONSOLE_DYNAMIC is not set |
| 876 | CONFIG_NETPOLL=y | 876 | CONFIG_NETPOLL=y |
| 877 | CONFIG_NETPOLL_TRAP=y | 877 | CONFIG_NETPOLL_TRAP=y |
| 878 | CONFIG_NET_POLL_CONTROLLER=y | 878 | CONFIG_NET_POLL_CONTROLLER=y |
| 879 | # CONFIG_ISDN is not set | 879 | # CONFIG_ISDN is not set |
| 880 | # CONFIG_PHONE is not set | 880 | # CONFIG_PHONE is not set |
| 881 | 881 | ||
| 882 | # | 882 | # |
| 883 | # Input device support | 883 | # Input device support |
| 884 | # | 884 | # |
| 885 | CONFIG_INPUT=y | 885 | CONFIG_INPUT=y |
| 886 | # CONFIG_INPUT_FF_MEMLESS is not set | 886 | # CONFIG_INPUT_FF_MEMLESS is not set |
| 887 | # CONFIG_INPUT_POLLDEV is not set | 887 | # CONFIG_INPUT_POLLDEV is not set |
| 888 | 888 | ||
| 889 | # | 889 | # |
| 890 | # Userland interfaces | 890 | # Userland interfaces |
| 891 | # | 891 | # |
| 892 | CONFIG_INPUT_MOUSEDEV=y | 892 | CONFIG_INPUT_MOUSEDEV=y |
| 893 | # CONFIG_INPUT_MOUSEDEV_PSAUX is not set | 893 | # CONFIG_INPUT_MOUSEDEV_PSAUX is not set |
| 894 | CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 | 894 | CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 |
| 895 | CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 | 895 | CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 |
| 896 | # CONFIG_INPUT_JOYDEV is not set | 896 | # CONFIG_INPUT_JOYDEV is not set |
| 897 | # CONFIG_INPUT_EVDEV is not set | 897 | # CONFIG_INPUT_EVDEV is not set |
| 898 | # CONFIG_INPUT_EVBUG is not set | 898 | # CONFIG_INPUT_EVBUG is not set |
| 899 | 899 | ||
| 900 | # | 900 | # |
| 901 | # Input Device Drivers | 901 | # Input Device Drivers |
| 902 | # | 902 | # |
| 903 | CONFIG_INPUT_KEYBOARD=y | 903 | CONFIG_INPUT_KEYBOARD=y |
| 904 | CONFIG_KEYBOARD_ATKBD=y | 904 | CONFIG_KEYBOARD_ATKBD=y |
| 905 | # CONFIG_KEYBOARD_SUNKBD is not set | 905 | # CONFIG_KEYBOARD_SUNKBD is not set |
| 906 | # CONFIG_KEYBOARD_LKKBD is not set | 906 | # CONFIG_KEYBOARD_LKKBD is not set |
| 907 | # CONFIG_KEYBOARD_XTKBD is not set | 907 | # CONFIG_KEYBOARD_XTKBD is not set |
| 908 | # CONFIG_KEYBOARD_NEWTON is not set | 908 | # CONFIG_KEYBOARD_NEWTON is not set |
| 909 | # CONFIG_KEYBOARD_STOWAWAY is not set | 909 | # CONFIG_KEYBOARD_STOWAWAY is not set |
| 910 | CONFIG_INPUT_MOUSE=y | 910 | CONFIG_INPUT_MOUSE=y |
| 911 | CONFIG_MOUSE_PS2=y | 911 | CONFIG_MOUSE_PS2=y |
| 912 | CONFIG_MOUSE_PS2_ALPS=y | 912 | CONFIG_MOUSE_PS2_ALPS=y |
| 913 | CONFIG_MOUSE_PS2_LOGIPS2PP=y | 913 | CONFIG_MOUSE_PS2_LOGIPS2PP=y |
| 914 | CONFIG_MOUSE_PS2_SYNAPTICS=y | 914 | CONFIG_MOUSE_PS2_SYNAPTICS=y |
| 915 | CONFIG_MOUSE_PS2_LIFEBOOK=y | 915 | CONFIG_MOUSE_PS2_LIFEBOOK=y |
| 916 | CONFIG_MOUSE_PS2_TRACKPOINT=y | 916 | CONFIG_MOUSE_PS2_TRACKPOINT=y |
| 917 | # CONFIG_MOUSE_PS2_ELANTECH is not set | 917 | # CONFIG_MOUSE_PS2_ELANTECH is not set |
| 918 | # CONFIG_MOUSE_PS2_TOUCHKIT is not set | 918 | # CONFIG_MOUSE_PS2_TOUCHKIT is not set |
| 919 | # CONFIG_MOUSE_SERIAL is not set | 919 | # CONFIG_MOUSE_SERIAL is not set |
| 920 | # CONFIG_MOUSE_APPLETOUCH is not set | 920 | # CONFIG_MOUSE_APPLETOUCH is not set |
| 921 | # CONFIG_MOUSE_BCM5974 is not set | 921 | # CONFIG_MOUSE_BCM5974 is not set |
| 922 | # CONFIG_MOUSE_VSXXXAA is not set | 922 | # CONFIG_MOUSE_VSXXXAA is not set |
| 923 | # CONFIG_INPUT_JOYSTICK is not set | 923 | # CONFIG_INPUT_JOYSTICK is not set |
| 924 | # CONFIG_INPUT_TABLET is not set | 924 | # CONFIG_INPUT_TABLET is not set |
| 925 | # CONFIG_INPUT_TOUCHSCREEN is not set | 925 | # CONFIG_INPUT_TOUCHSCREEN is not set |
| 926 | CONFIG_INPUT_MISC=y | 926 | CONFIG_INPUT_MISC=y |
| 927 | CONFIG_INPUT_PCSPKR=m | 927 | CONFIG_INPUT_PCSPKR=m |
| 928 | # CONFIG_INPUT_ATI_REMOTE is not set | 928 | # CONFIG_INPUT_ATI_REMOTE is not set |
| 929 | # CONFIG_INPUT_ATI_REMOTE2 is not set | 929 | # CONFIG_INPUT_ATI_REMOTE2 is not set |
| 930 | # CONFIG_INPUT_KEYSPAN_REMOTE is not set | 930 | # CONFIG_INPUT_KEYSPAN_REMOTE is not set |
| 931 | # CONFIG_INPUT_POWERMATE is not set | 931 | # CONFIG_INPUT_POWERMATE is not set |
| 932 | # CONFIG_INPUT_YEALINK is not set | 932 | # CONFIG_INPUT_YEALINK is not set |
| 933 | # CONFIG_INPUT_CM109 is not set | 933 | # CONFIG_INPUT_CM109 is not set |
| 934 | # CONFIG_INPUT_UINPUT is not set | 934 | # CONFIG_INPUT_UINPUT is not set |
| 935 | 935 | ||
| 936 | # | 936 | # |
| 937 | # Hardware I/O ports | 937 | # Hardware I/O ports |
| 938 | # | 938 | # |
| 939 | CONFIG_SERIO=y | 939 | CONFIG_SERIO=y |
| 940 | CONFIG_SERIO_I8042=y | 940 | CONFIG_SERIO_I8042=y |
| 941 | # CONFIG_SERIO_SERPORT is not set | 941 | # CONFIG_SERIO_SERPORT is not set |
| 942 | # CONFIG_SERIO_PARKBD is not set | 942 | # CONFIG_SERIO_PARKBD is not set |
| 943 | # CONFIG_SERIO_PCIPS2 is not set | 943 | # CONFIG_SERIO_PCIPS2 is not set |
| 944 | CONFIG_SERIO_LIBPS2=y | 944 | CONFIG_SERIO_LIBPS2=y |
| 945 | # CONFIG_SERIO_RAW is not set | 945 | # CONFIG_SERIO_RAW is not set |
| 946 | # CONFIG_SERIO_XILINX_XPS_PS2 is not set | 946 | # CONFIG_SERIO_XILINX_XPS_PS2 is not set |
| 947 | # CONFIG_GAMEPORT is not set | 947 | # CONFIG_GAMEPORT is not set |
| 948 | 948 | ||
| 949 | # | 949 | # |
| 950 | # Character devices | 950 | # Character devices |
| 951 | # | 951 | # |
| 952 | CONFIG_VT=y | 952 | CONFIG_VT=y |
| 953 | CONFIG_CONSOLE_TRANSLATIONS=y | 953 | CONFIG_CONSOLE_TRANSLATIONS=y |
| 954 | CONFIG_VT_CONSOLE=y | 954 | CONFIG_VT_CONSOLE=y |
| 955 | CONFIG_HW_CONSOLE=y | 955 | CONFIG_HW_CONSOLE=y |
| 956 | # CONFIG_VT_HW_CONSOLE_BINDING is not set | 956 | # CONFIG_VT_HW_CONSOLE_BINDING is not set |
| 957 | CONFIG_DEVKMEM=y | 957 | CONFIG_DEVKMEM=y |
| 958 | # CONFIG_SERIAL_NONSTANDARD is not set | 958 | # CONFIG_SERIAL_NONSTANDARD is not set |
| 959 | # CONFIG_NOZOMI is not set | 959 | # CONFIG_NOZOMI is not set |
| 960 | 960 | ||
| 961 | # | 961 | # |
| 962 | # Serial drivers | 962 | # Serial drivers |
| 963 | # | 963 | # |
| 964 | CONFIG_SERIAL_8250=y | 964 | CONFIG_SERIAL_8250=y |
| 965 | CONFIG_SERIAL_8250_CONSOLE=y | 965 | CONFIG_SERIAL_8250_CONSOLE=y |
| 966 | CONFIG_SERIAL_8250_PCI=y | 966 | CONFIG_SERIAL_8250_PCI=y |
| 967 | CONFIG_SERIAL_8250_NR_UARTS=4 | 967 | CONFIG_SERIAL_8250_NR_UARTS=4 |
| 968 | CONFIG_SERIAL_8250_RUNTIME_UARTS=4 | 968 | CONFIG_SERIAL_8250_RUNTIME_UARTS=4 |
| 969 | # CONFIG_SERIAL_8250_EXTENDED is not set | 969 | # CONFIG_SERIAL_8250_EXTENDED is not set |
| 970 | 970 | ||
| 971 | # | 971 | # |
| 972 | # Non-8250 serial port support | 972 | # Non-8250 serial port support |
| 973 | # | 973 | # |
| 974 | CONFIG_SERIAL_CORE=y | 974 | CONFIG_SERIAL_CORE=y |
| 975 | CONFIG_SERIAL_CORE_CONSOLE=y | 975 | CONFIG_SERIAL_CORE_CONSOLE=y |
| 976 | CONFIG_SERIAL_ICOM=m | 976 | CONFIG_SERIAL_ICOM=m |
| 977 | CONFIG_SERIAL_JSM=m | 977 | CONFIG_SERIAL_JSM=m |
| 978 | # CONFIG_SERIAL_OF_PLATFORM is not set | 978 | # CONFIG_SERIAL_OF_PLATFORM is not set |
| 979 | CONFIG_UNIX98_PTYS=y | 979 | CONFIG_UNIX98_PTYS=y |
| 980 | CONFIG_LEGACY_PTYS=y | 980 | CONFIG_LEGACY_PTYS=y |
| 981 | CONFIG_LEGACY_PTY_COUNT=256 | 981 | CONFIG_LEGACY_PTY_COUNT=256 |
| 982 | # CONFIG_PRINTER is not set | 982 | # CONFIG_PRINTER is not set |
| 983 | # CONFIG_PPDEV is not set | 983 | # CONFIG_PPDEV is not set |
| 984 | CONFIG_HVC_DRIVER=y | 984 | CONFIG_HVC_DRIVER=y |
| 985 | CONFIG_HVC_IRQ=y | 985 | CONFIG_HVC_IRQ=y |
| 986 | CONFIG_HVC_CONSOLE=y | 986 | CONFIG_HVC_CONSOLE=y |
| 987 | CONFIG_HVC_RTAS=y | 987 | CONFIG_HVC_RTAS=y |
| 988 | CONFIG_HVCS=m | 988 | CONFIG_HVCS=m |
| 989 | CONFIG_IBM_BSR=m | 989 | CONFIG_IBM_BSR=m |
| 990 | # CONFIG_IPMI_HANDLER is not set | 990 | # CONFIG_IPMI_HANDLER is not set |
| 991 | # CONFIG_HW_RANDOM is not set | 991 | # CONFIG_HW_RANDOM is not set |
| 992 | CONFIG_GEN_RTC=y | 992 | CONFIG_GEN_RTC=y |
| 993 | # CONFIG_GEN_RTC_X is not set | 993 | # CONFIG_GEN_RTC_X is not set |
| 994 | # CONFIG_R3964 is not set | 994 | # CONFIG_R3964 is not set |
| 995 | # CONFIG_APPLICOM is not set | 995 | # CONFIG_APPLICOM is not set |
| 996 | CONFIG_RAW_DRIVER=y | 996 | CONFIG_RAW_DRIVER=y |
| 997 | CONFIG_MAX_RAW_DEVS=1024 | 997 | CONFIG_MAX_RAW_DEVS=1024 |
| 998 | # CONFIG_HANGCHECK_TIMER is not set | 998 | # CONFIG_HANGCHECK_TIMER is not set |
| 999 | # CONFIG_TCG_TPM is not set | 999 | # CONFIG_TCG_TPM is not set |
| 1000 | CONFIG_DEVPORT=y | 1000 | CONFIG_DEVPORT=y |
| 1001 | CONFIG_I2C=y | 1001 | CONFIG_I2C=y |
| 1002 | CONFIG_I2C_BOARDINFO=y | 1002 | CONFIG_I2C_BOARDINFO=y |
| 1003 | # CONFIG_I2C_CHARDEV is not set | 1003 | # CONFIG_I2C_CHARDEV is not set |
| 1004 | CONFIG_I2C_HELPER_AUTO=y | 1004 | CONFIG_I2C_HELPER_AUTO=y |
| 1005 | CONFIG_I2C_ALGOBIT=y | 1005 | CONFIG_I2C_ALGOBIT=y |
| 1006 | 1006 | ||
| 1007 | # | 1007 | # |
| 1008 | # I2C Hardware Bus support | 1008 | # I2C Hardware Bus support |
| 1009 | # | 1009 | # |
| 1010 | 1010 | ||
| 1011 | # | 1011 | # |
| 1012 | # PC SMBus host controller drivers | 1012 | # PC SMBus host controller drivers |
| 1013 | # | 1013 | # |
| 1014 | # CONFIG_I2C_ALI1535 is not set | 1014 | # CONFIG_I2C_ALI1535 is not set |
| 1015 | # CONFIG_I2C_ALI1563 is not set | 1015 | # CONFIG_I2C_ALI1563 is not set |
| 1016 | # CONFIG_I2C_ALI15X3 is not set | 1016 | # CONFIG_I2C_ALI15X3 is not set |
| 1017 | # CONFIG_I2C_AMD756 is not set | 1017 | # CONFIG_I2C_AMD756 is not set |
| 1018 | # CONFIG_I2C_AMD8111 is not set | 1018 | # CONFIG_I2C_AMD8111 is not set |
| 1019 | # CONFIG_I2C_I801 is not set | 1019 | # CONFIG_I2C_I801 is not set |
| 1020 | # CONFIG_I2C_ISCH is not set | 1020 | # CONFIG_I2C_ISCH is not set |
| 1021 | # CONFIG_I2C_PIIX4 is not set | 1021 | # CONFIG_I2C_PIIX4 is not set |
| 1022 | # CONFIG_I2C_NFORCE2 is not set | 1022 | # CONFIG_I2C_NFORCE2 is not set |
| 1023 | # CONFIG_I2C_SIS5595 is not set | 1023 | # CONFIG_I2C_SIS5595 is not set |
| 1024 | # CONFIG_I2C_SIS630 is not set | 1024 | # CONFIG_I2C_SIS630 is not set |
| 1025 | # CONFIG_I2C_SIS96X is not set | 1025 | # CONFIG_I2C_SIS96X is not set |
| 1026 | # CONFIG_I2C_VIA is not set | 1026 | # CONFIG_I2C_VIA is not set |
| 1027 | # CONFIG_I2C_VIAPRO is not set | 1027 | # CONFIG_I2C_VIAPRO is not set |
| 1028 | 1028 | ||
| 1029 | # | 1029 | # |
| 1030 | # I2C system bus drivers (mostly embedded / system-on-chip) | 1030 | # I2C system bus drivers (mostly embedded / system-on-chip) |
| 1031 | # | 1031 | # |
| 1032 | # CONFIG_I2C_OCORES is not set | 1032 | # CONFIG_I2C_OCORES is not set |
| 1033 | # CONFIG_I2C_SIMTEC is not set | 1033 | # CONFIG_I2C_SIMTEC is not set |
| 1034 | 1034 | ||
| 1035 | # | 1035 | # |
| 1036 | # External I2C/SMBus adapter drivers | 1036 | # External I2C/SMBus adapter drivers |
| 1037 | # | 1037 | # |
| 1038 | # CONFIG_I2C_PARPORT is not set | 1038 | # CONFIG_I2C_PARPORT is not set |
| 1039 | # CONFIG_I2C_PARPORT_LIGHT is not set | 1039 | # CONFIG_I2C_PARPORT_LIGHT is not set |
| 1040 | # CONFIG_I2C_TAOS_EVM is not set | 1040 | # CONFIG_I2C_TAOS_EVM is not set |
| 1041 | # CONFIG_I2C_TINY_USB is not set | 1041 | # CONFIG_I2C_TINY_USB is not set |
| 1042 | 1042 | ||
| 1043 | # | 1043 | # |
| 1044 | # Graphics adapter I2C/DDC channel drivers | 1044 | # Graphics adapter I2C/DDC channel drivers |
| 1045 | # | 1045 | # |
| 1046 | # CONFIG_I2C_VOODOO3 is not set | 1046 | # CONFIG_I2C_VOODOO3 is not set |
| 1047 | 1047 | ||
| 1048 | # | 1048 | # |
| 1049 | # Other I2C/SMBus bus drivers | 1049 | # Other I2C/SMBus bus drivers |
| 1050 | # | 1050 | # |
| 1051 | # CONFIG_I2C_PCA_PLATFORM is not set | 1051 | # CONFIG_I2C_PCA_PLATFORM is not set |
| 1052 | # CONFIG_I2C_STUB is not set | 1052 | # CONFIG_I2C_STUB is not set |
| 1053 | 1053 | ||
| 1054 | # | 1054 | # |
| 1055 | # Miscellaneous I2C Chip support | 1055 | # Miscellaneous I2C Chip support |
| 1056 | # | 1056 | # |
| 1057 | # CONFIG_DS1682 is not set | 1057 | # CONFIG_DS1682 is not set |
| 1058 | # CONFIG_EEPROM_AT24 is not set | 1058 | # CONFIG_EEPROM_AT24 is not set |
| 1059 | # CONFIG_EEPROM_LEGACY is not set | 1059 | # CONFIG_EEPROM_LEGACY is not set |
| 1060 | # CONFIG_SENSORS_PCF8574 is not set | 1060 | # CONFIG_SENSORS_PCF8574 is not set |
| 1061 | # CONFIG_PCF8575 is not set | 1061 | # CONFIG_PCF8575 is not set |
| 1062 | # CONFIG_SENSORS_PCA9539 is not set | 1062 | # CONFIG_SENSORS_PCA9539 is not set |
| 1063 | # CONFIG_SENSORS_PCF8591 is not set | 1063 | # CONFIG_SENSORS_PCF8591 is not set |
| 1064 | # CONFIG_SENSORS_MAX6875 is not set | 1064 | # CONFIG_SENSORS_MAX6875 is not set |
| 1065 | # CONFIG_SENSORS_TSL2550 is not set | 1065 | # CONFIG_SENSORS_TSL2550 is not set |
| 1066 | # CONFIG_I2C_DEBUG_CORE is not set | 1066 | # CONFIG_I2C_DEBUG_CORE is not set |
| 1067 | # CONFIG_I2C_DEBUG_ALGO is not set | 1067 | # CONFIG_I2C_DEBUG_ALGO is not set |
| 1068 | # CONFIG_I2C_DEBUG_BUS is not set | 1068 | # CONFIG_I2C_DEBUG_BUS is not set |
| 1069 | # CONFIG_I2C_DEBUG_CHIP is not set | 1069 | # CONFIG_I2C_DEBUG_CHIP is not set |
| 1070 | # CONFIG_SPI is not set | 1070 | # CONFIG_SPI is not set |
| 1071 | CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y | 1071 | CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y |
| 1072 | # CONFIG_GPIOLIB is not set | 1072 | # CONFIG_GPIOLIB is not set |
| 1073 | # CONFIG_W1 is not set | 1073 | # CONFIG_W1 is not set |
| 1074 | # CONFIG_POWER_SUPPLY is not set | 1074 | # CONFIG_POWER_SUPPLY is not set |
| 1075 | # CONFIG_HWMON is not set | 1075 | # CONFIG_HWMON is not set |
| 1076 | # CONFIG_THERMAL is not set | 1076 | # CONFIG_THERMAL is not set |
| 1077 | # CONFIG_THERMAL_HWMON is not set | 1077 | # CONFIG_THERMAL_HWMON is not set |
| 1078 | # CONFIG_WATCHDOG is not set | 1078 | # CONFIG_WATCHDOG is not set |
| 1079 | 1079 | ||
| 1080 | # | 1080 | # |
| 1081 | # Sonics Silicon Backplane | 1081 | # Sonics Silicon Backplane |
| 1082 | # | 1082 | # |
| 1083 | CONFIG_SSB_POSSIBLE=y | 1083 | CONFIG_SSB_POSSIBLE=y |
| 1084 | # CONFIG_SSB is not set | 1084 | # CONFIG_SSB is not set |
| 1085 | 1085 | ||
| 1086 | # | 1086 | # |
| 1087 | # Multifunction device drivers | 1087 | # Multifunction device drivers |
| 1088 | # | 1088 | # |
| 1089 | # CONFIG_MFD_CORE is not set | 1089 | # CONFIG_MFD_CORE is not set |
| 1090 | # CONFIG_MFD_SM501 is not set | 1090 | # CONFIG_MFD_SM501 is not set |
| 1091 | # CONFIG_HTC_PASIC3 is not set | 1091 | # CONFIG_HTC_PASIC3 is not set |
| 1092 | # CONFIG_MFD_TMIO is not set | 1092 | # CONFIG_MFD_TMIO is not set |
| 1093 | # CONFIG_PMIC_DA903X is not set | 1093 | # CONFIG_PMIC_DA903X is not set |
| 1094 | # CONFIG_MFD_WM8400 is not set | 1094 | # CONFIG_MFD_WM8400 is not set |
| 1095 | # CONFIG_MFD_WM8350_I2C is not set | 1095 | # CONFIG_MFD_WM8350_I2C is not set |
| 1096 | 1096 | ||
| 1097 | # | 1097 | # |
| 1098 | # Voltage and Current regulators | 1098 | # Voltage and Current regulators |
| 1099 | # | 1099 | # |
| 1100 | # CONFIG_REGULATOR is not set | 1100 | # CONFIG_REGULATOR is not set |
| 1101 | # CONFIG_REGULATOR_FIXED_VOLTAGE is not set | 1101 | # CONFIG_REGULATOR_FIXED_VOLTAGE is not set |
| 1102 | # CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set | 1102 | # CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set |
| 1103 | # CONFIG_REGULATOR_BQ24022 is not set | 1103 | # CONFIG_REGULATOR_BQ24022 is not set |
| 1104 | 1104 | ||
| 1105 | # | 1105 | # |
| 1106 | # Multimedia devices | 1106 | # Multimedia devices |
| 1107 | # | 1107 | # |
| 1108 | 1108 | ||
| 1109 | # | 1109 | # |
| 1110 | # Multimedia core support | 1110 | # Multimedia core support |
| 1111 | # | 1111 | # |
| 1112 | # CONFIG_VIDEO_DEV is not set | 1112 | # CONFIG_VIDEO_DEV is not set |
| 1113 | # CONFIG_DVB_CORE is not set | 1113 | # CONFIG_DVB_CORE is not set |
| 1114 | # CONFIG_VIDEO_MEDIA is not set | 1114 | # CONFIG_VIDEO_MEDIA is not set |
| 1115 | 1115 | ||
| 1116 | # | 1116 | # |
| 1117 | # Multimedia drivers | 1117 | # Multimedia drivers |
| 1118 | # | 1118 | # |
| 1119 | # CONFIG_DAB is not set | 1119 | # CONFIG_DAB is not set |
| 1120 | 1120 | ||
| 1121 | # | 1121 | # |
| 1122 | # Graphics support | 1122 | # Graphics support |
| 1123 | # | 1123 | # |
| 1124 | # CONFIG_AGP is not set | 1124 | # CONFIG_AGP is not set |
| 1125 | # CONFIG_DRM is not set | 1125 | # CONFIG_DRM is not set |
| 1126 | # CONFIG_VGASTATE is not set | 1126 | # CONFIG_VGASTATE is not set |
| 1127 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set | 1127 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set |
| 1128 | CONFIG_FB=y | 1128 | CONFIG_FB=y |
| 1129 | CONFIG_FIRMWARE_EDID=y | 1129 | CONFIG_FIRMWARE_EDID=y |
| 1130 | CONFIG_FB_DDC=y | 1130 | CONFIG_FB_DDC=y |
| 1131 | # CONFIG_FB_BOOT_VESA_SUPPORT is not set | 1131 | # CONFIG_FB_BOOT_VESA_SUPPORT is not set |
| 1132 | CONFIG_FB_CFB_FILLRECT=y | 1132 | CONFIG_FB_CFB_FILLRECT=y |
| 1133 | CONFIG_FB_CFB_COPYAREA=y | 1133 | CONFIG_FB_CFB_COPYAREA=y |
| 1134 | CONFIG_FB_CFB_IMAGEBLIT=y | 1134 | CONFIG_FB_CFB_IMAGEBLIT=y |
| 1135 | # CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set | 1135 | # CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set |
| 1136 | # CONFIG_FB_SYS_FILLRECT is not set | 1136 | # CONFIG_FB_SYS_FILLRECT is not set |
| 1137 | # CONFIG_FB_SYS_COPYAREA is not set | 1137 | # CONFIG_FB_SYS_COPYAREA is not set |
| 1138 | # CONFIG_FB_SYS_IMAGEBLIT is not set | 1138 | # CONFIG_FB_SYS_IMAGEBLIT is not set |
| 1139 | # CONFIG_FB_FOREIGN_ENDIAN is not set | 1139 | # CONFIG_FB_FOREIGN_ENDIAN is not set |
| 1140 | # CONFIG_FB_SYS_FOPS is not set | 1140 | # CONFIG_FB_SYS_FOPS is not set |
| 1141 | # CONFIG_FB_SVGALIB is not set | 1141 | # CONFIG_FB_SVGALIB is not set |
| 1142 | CONFIG_FB_MACMODES=y | 1142 | CONFIG_FB_MACMODES=y |
| 1143 | CONFIG_FB_BACKLIGHT=y | 1143 | CONFIG_FB_BACKLIGHT=y |
| 1144 | CONFIG_FB_MODE_HELPERS=y | 1144 | CONFIG_FB_MODE_HELPERS=y |
| 1145 | CONFIG_FB_TILEBLITTING=y | 1145 | CONFIG_FB_TILEBLITTING=y |
| 1146 | 1146 | ||
| 1147 | # | 1147 | # |
| 1148 | # Frame buffer hardware drivers | 1148 | # Frame buffer hardware drivers |
| 1149 | # | 1149 | # |
| 1150 | # CONFIG_FB_CIRRUS is not set | 1150 | # CONFIG_FB_CIRRUS is not set |
| 1151 | # CONFIG_FB_PM2 is not set | 1151 | # CONFIG_FB_PM2 is not set |
| 1152 | # CONFIG_FB_CYBER2000 is not set | 1152 | # CONFIG_FB_CYBER2000 is not set |
| 1153 | CONFIG_FB_OF=y | 1153 | CONFIG_FB_OF=y |
| 1154 | # CONFIG_FB_ASILIANT is not set | 1154 | # CONFIG_FB_ASILIANT is not set |
| 1155 | # CONFIG_FB_IMSTT is not set | 1155 | # CONFIG_FB_IMSTT is not set |
| 1156 | # CONFIG_FB_VGA16 is not set | 1156 | # CONFIG_FB_VGA16 is not set |
| 1157 | # CONFIG_FB_S1D13XXX is not set | 1157 | # CONFIG_FB_S1D13XXX is not set |
| 1158 | # CONFIG_FB_NVIDIA is not set | 1158 | # CONFIG_FB_NVIDIA is not set |
| 1159 | # CONFIG_FB_RIVA is not set | 1159 | # CONFIG_FB_RIVA is not set |
| 1160 | CONFIG_FB_MATROX=y | 1160 | CONFIG_FB_MATROX=y |
| 1161 | CONFIG_FB_MATROX_MILLENIUM=y | 1161 | CONFIG_FB_MATROX_MILLENIUM=y |
| 1162 | CONFIG_FB_MATROX_MYSTIQUE=y | 1162 | CONFIG_FB_MATROX_MYSTIQUE=y |
| 1163 | CONFIG_FB_MATROX_G=y | 1163 | CONFIG_FB_MATROX_G=y |
| 1164 | # CONFIG_FB_MATROX_I2C is not set | 1164 | # CONFIG_FB_MATROX_I2C is not set |
| 1165 | CONFIG_FB_MATROX_MULTIHEAD=y | 1165 | CONFIG_FB_MATROX_MULTIHEAD=y |
| 1166 | CONFIG_FB_RADEON=y | 1166 | CONFIG_FB_RADEON=y |
| 1167 | CONFIG_FB_RADEON_I2C=y | 1167 | CONFIG_FB_RADEON_I2C=y |
| 1168 | CONFIG_FB_RADEON_BACKLIGHT=y | 1168 | CONFIG_FB_RADEON_BACKLIGHT=y |
| 1169 | # CONFIG_FB_RADEON_DEBUG is not set | 1169 | # CONFIG_FB_RADEON_DEBUG is not set |
| 1170 | # CONFIG_FB_ATY128 is not set | 1170 | # CONFIG_FB_ATY128 is not set |
| 1171 | # CONFIG_FB_ATY is not set | 1171 | # CONFIG_FB_ATY is not set |
| 1172 | # CONFIG_FB_S3 is not set | 1172 | # CONFIG_FB_S3 is not set |
| 1173 | # CONFIG_FB_SAVAGE is not set | 1173 | # CONFIG_FB_SAVAGE is not set |
| 1174 | # CONFIG_FB_SIS is not set | 1174 | # CONFIG_FB_SIS is not set |
| 1175 | # CONFIG_FB_VIA is not set | 1175 | # CONFIG_FB_VIA is not set |
| 1176 | # CONFIG_FB_NEOMAGIC is not set | 1176 | # CONFIG_FB_NEOMAGIC is not set |
| 1177 | # CONFIG_FB_KYRO is not set | 1177 | # CONFIG_FB_KYRO is not set |
| 1178 | # CONFIG_FB_3DFX is not set | 1178 | # CONFIG_FB_3DFX is not set |
| 1179 | # CONFIG_FB_VOODOO1 is not set | 1179 | # CONFIG_FB_VOODOO1 is not set |
| 1180 | # CONFIG_FB_VT8623 is not set | 1180 | # CONFIG_FB_VT8623 is not set |
| 1181 | # CONFIG_FB_TRIDENT is not set | 1181 | # CONFIG_FB_TRIDENT is not set |
| 1182 | # CONFIG_FB_ARK is not set | 1182 | # CONFIG_FB_ARK is not set |
| 1183 | # CONFIG_FB_PM3 is not set | 1183 | # CONFIG_FB_PM3 is not set |
| 1184 | # CONFIG_FB_CARMINE is not set | 1184 | # CONFIG_FB_CARMINE is not set |
| 1185 | CONFIG_FB_IBM_GXT4500=y | 1185 | CONFIG_FB_IBM_GXT4500=y |
| 1186 | # CONFIG_FB_VIRTUAL is not set | 1186 | # CONFIG_FB_VIRTUAL is not set |
| 1187 | # CONFIG_FB_METRONOME is not set | 1187 | # CONFIG_FB_METRONOME is not set |
| 1188 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | 1188 | CONFIG_BACKLIGHT_LCD_SUPPORT=y |
| 1189 | CONFIG_LCD_CLASS_DEVICE=m | 1189 | CONFIG_LCD_CLASS_DEVICE=m |
| 1190 | # CONFIG_LCD_ILI9320 is not set | 1190 | # CONFIG_LCD_ILI9320 is not set |
| 1191 | CONFIG_LCD_PLATFORM=m | 1191 | CONFIG_LCD_PLATFORM=m |
| 1192 | CONFIG_BACKLIGHT_CLASS_DEVICE=y | 1192 | CONFIG_BACKLIGHT_CLASS_DEVICE=y |
| 1193 | # CONFIG_BACKLIGHT_CORGI is not set | 1193 | # CONFIG_BACKLIGHT_CORGI is not set |
| 1194 | 1194 | ||
| 1195 | # | 1195 | # |
| 1196 | # Display device support | 1196 | # Display device support |
| 1197 | # | 1197 | # |
| 1198 | CONFIG_DISPLAY_SUPPORT=y | 1198 | CONFIG_DISPLAY_SUPPORT=y |
| 1199 | 1199 | ||
| 1200 | # | 1200 | # |
| 1201 | # Display hardware drivers | 1201 | # Display hardware drivers |
| 1202 | # | 1202 | # |
| 1203 | 1203 | ||
| 1204 | # | 1204 | # |
| 1205 | # Console display driver support | 1205 | # Console display driver support |
| 1206 | # | 1206 | # |
| 1207 | # CONFIG_VGA_CONSOLE is not set | 1207 | # CONFIG_VGA_CONSOLE is not set |
| 1208 | CONFIG_DUMMY_CONSOLE=y | 1208 | CONFIG_DUMMY_CONSOLE=y |
| 1209 | CONFIG_FRAMEBUFFER_CONSOLE=y | 1209 | CONFIG_FRAMEBUFFER_CONSOLE=y |
| 1210 | # CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set | 1210 | # CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set |
| 1211 | # CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set | 1211 | # CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set |
| 1212 | # CONFIG_FONTS is not set | 1212 | # CONFIG_FONTS is not set |
| 1213 | CONFIG_FONT_8x8=y | 1213 | CONFIG_FONT_8x8=y |
| 1214 | CONFIG_FONT_8x16=y | 1214 | CONFIG_FONT_8x16=y |
| 1215 | CONFIG_LOGO=y | 1215 | CONFIG_LOGO=y |
| 1216 | CONFIG_LOGO_LINUX_MONO=y | 1216 | CONFIG_LOGO_LINUX_MONO=y |
| 1217 | CONFIG_LOGO_LINUX_VGA16=y | 1217 | CONFIG_LOGO_LINUX_VGA16=y |
| 1218 | CONFIG_LOGO_LINUX_CLUT224=y | 1218 | CONFIG_LOGO_LINUX_CLUT224=y |
| 1219 | # CONFIG_SOUND is not set | 1219 | # CONFIG_SOUND is not set |
| 1220 | CONFIG_HID_SUPPORT=y | 1220 | CONFIG_HID_SUPPORT=y |
| 1221 | CONFIG_HID=y | 1221 | CONFIG_HID=y |
| 1222 | # CONFIG_HID_DEBUG is not set | 1222 | # CONFIG_HID_DEBUG is not set |
| 1223 | # CONFIG_HIDRAW is not set | 1223 | # CONFIG_HIDRAW is not set |
| 1224 | 1224 | ||
| 1225 | # | 1225 | # |
| 1226 | # USB Input Devices | 1226 | # USB Input Devices |
| 1227 | # | 1227 | # |
| 1228 | CONFIG_USB_HID=y | 1228 | CONFIG_USB_HID=y |
| 1229 | # CONFIG_HID_PID is not set | 1229 | # CONFIG_HID_PID is not set |
| 1230 | CONFIG_USB_HIDDEV=y | 1230 | CONFIG_USB_HIDDEV=y |
| 1231 | 1231 | ||
| 1232 | # | 1232 | # |
| 1233 | # Special HID drivers | 1233 | # Special HID drivers |
| 1234 | # | 1234 | # |
| 1235 | CONFIG_HID_COMPAT=y | 1235 | CONFIG_HID_COMPAT=y |
| 1236 | CONFIG_HID_A4TECH=y | 1236 | CONFIG_HID_A4TECH=y |
| 1237 | CONFIG_HID_APPLE=y | 1237 | CONFIG_HID_APPLE=y |
| 1238 | CONFIG_HID_BELKIN=y | 1238 | CONFIG_HID_BELKIN=y |
| 1239 | CONFIG_HID_BRIGHT=y | 1239 | CONFIG_HID_BRIGHT=y |
| 1240 | CONFIG_HID_CHERRY=y | 1240 | CONFIG_HID_CHERRY=y |
| 1241 | CONFIG_HID_CHICONY=y | 1241 | CONFIG_HID_CHICONY=y |
| 1242 | CONFIG_HID_CYPRESS=y | 1242 | CONFIG_HID_CYPRESS=y |
| 1243 | CONFIG_HID_DELL=y | 1243 | CONFIG_HID_DELL=y |
| 1244 | CONFIG_HID_EZKEY=y | 1244 | CONFIG_HID_EZKEY=y |
| 1245 | CONFIG_HID_GYRATION=y | 1245 | CONFIG_HID_GYRATION=y |
| 1246 | CONFIG_HID_LOGITECH=y | 1246 | CONFIG_HID_LOGITECH=y |
| 1247 | # CONFIG_LOGITECH_FF is not set | 1247 | # CONFIG_LOGITECH_FF is not set |
| 1248 | # CONFIG_LOGIRUMBLEPAD2_FF is not set | 1248 | # CONFIG_LOGIRUMBLEPAD2_FF is not set |
| 1249 | CONFIG_HID_MICROSOFT=y | 1249 | CONFIG_HID_MICROSOFT=y |
| 1250 | CONFIG_HID_MONTEREY=y | 1250 | CONFIG_HID_MONTEREY=y |
| 1251 | CONFIG_HID_PANTHERLORD=y | 1251 | CONFIG_HID_PANTHERLORD=y |
| 1252 | # CONFIG_PANTHERLORD_FF is not set | 1252 | # CONFIG_PANTHERLORD_FF is not set |
| 1253 | CONFIG_HID_PETALYNX=y | 1253 | CONFIG_HID_PETALYNX=y |
| 1254 | CONFIG_HID_SAMSUNG=y | 1254 | CONFIG_HID_SAMSUNG=y |
| 1255 | CONFIG_HID_SONY=y | 1255 | CONFIG_HID_SONY=y |
| 1256 | CONFIG_HID_SUNPLUS=y | 1256 | CONFIG_HID_SUNPLUS=y |
| 1257 | # CONFIG_THRUSTMASTER_FF is not set | 1257 | # CONFIG_THRUSTMASTER_FF is not set |
| 1258 | # CONFIG_ZEROPLUS_FF is not set | 1258 | # CONFIG_ZEROPLUS_FF is not set |
| 1259 | CONFIG_USB_SUPPORT=y | 1259 | CONFIG_USB_SUPPORT=y |
| 1260 | CONFIG_USB_ARCH_HAS_HCD=y | 1260 | CONFIG_USB_ARCH_HAS_HCD=y |
| 1261 | CONFIG_USB_ARCH_HAS_OHCI=y | 1261 | CONFIG_USB_ARCH_HAS_OHCI=y |
| 1262 | CONFIG_USB_ARCH_HAS_EHCI=y | 1262 | CONFIG_USB_ARCH_HAS_EHCI=y |
| 1263 | CONFIG_USB=y | 1263 | CONFIG_USB=y |
| 1264 | # CONFIG_USB_DEBUG is not set | 1264 | # CONFIG_USB_DEBUG is not set |
| 1265 | # CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set | 1265 | # CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set |
| 1266 | 1266 | ||
| 1267 | # | 1267 | # |
| 1268 | # Miscellaneous USB options | 1268 | # Miscellaneous USB options |
| 1269 | # | 1269 | # |
| 1270 | CONFIG_USB_DEVICEFS=y | 1270 | CONFIG_USB_DEVICEFS=y |
| 1271 | CONFIG_USB_DEVICE_CLASS=y | 1271 | CONFIG_USB_DEVICE_CLASS=y |
| 1272 | # CONFIG_USB_DYNAMIC_MINORS is not set | 1272 | # CONFIG_USB_DYNAMIC_MINORS is not set |
| 1273 | # CONFIG_USB_OTG is not set | 1273 | # CONFIG_USB_OTG is not set |
| 1274 | CONFIG_USB_MON=y | 1274 | CONFIG_USB_MON=y |
| 1275 | # CONFIG_USB_WUSB is not set | 1275 | # CONFIG_USB_WUSB is not set |
| 1276 | # CONFIG_USB_WUSB_CBAF is not set | 1276 | # CONFIG_USB_WUSB_CBAF is not set |
| 1277 | 1277 | ||
| 1278 | # | 1278 | # |
| 1279 | # USB Host Controller Drivers | 1279 | # USB Host Controller Drivers |
| 1280 | # | 1280 | # |
| 1281 | # CONFIG_USB_C67X00_HCD is not set | 1281 | # CONFIG_USB_C67X00_HCD is not set |
| 1282 | CONFIG_USB_EHCI_HCD=y | 1282 | CONFIG_USB_EHCI_HCD=y |
| 1283 | # CONFIG_USB_EHCI_ROOT_HUB_TT is not set | 1283 | # CONFIG_USB_EHCI_ROOT_HUB_TT is not set |
| 1284 | # CONFIG_USB_EHCI_TT_NEWSCHED is not set | 1284 | # CONFIG_USB_EHCI_TT_NEWSCHED is not set |
| 1285 | # CONFIG_USB_EHCI_HCD_PPC_OF is not set | 1285 | # CONFIG_USB_EHCI_HCD_PPC_OF is not set |
| 1286 | # CONFIG_USB_ISP116X_HCD is not set | 1286 | # CONFIG_USB_ISP116X_HCD is not set |
| 1287 | # CONFIG_USB_ISP1760_HCD is not set | 1287 | # CONFIG_USB_ISP1760_HCD is not set |
| 1288 | CONFIG_USB_OHCI_HCD=y | 1288 | CONFIG_USB_OHCI_HCD=y |
| 1289 | # CONFIG_USB_OHCI_HCD_PPC_OF is not set | 1289 | # CONFIG_USB_OHCI_HCD_PPC_OF is not set |
| 1290 | # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set | 1290 | # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set |
| 1291 | # CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set | 1291 | # CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set |
| 1292 | CONFIG_USB_OHCI_LITTLE_ENDIAN=y | 1292 | CONFIG_USB_OHCI_LITTLE_ENDIAN=y |
| 1293 | # CONFIG_USB_UHCI_HCD is not set | 1293 | # CONFIG_USB_UHCI_HCD is not set |
| 1294 | # CONFIG_USB_SL811_HCD is not set | 1294 | # CONFIG_USB_SL811_HCD is not set |
| 1295 | # CONFIG_USB_R8A66597_HCD is not set | 1295 | # CONFIG_USB_R8A66597_HCD is not set |
| 1296 | # CONFIG_USB_WHCI_HCD is not set | 1296 | # CONFIG_USB_WHCI_HCD is not set |
| 1297 | # CONFIG_USB_HWA_HCD is not set | 1297 | # CONFIG_USB_HWA_HCD is not set |
| 1298 | 1298 | ||
| 1299 | # | 1299 | # |
| 1300 | # USB Device Class drivers | 1300 | # USB Device Class drivers |
| 1301 | # | 1301 | # |
| 1302 | # CONFIG_USB_ACM is not set | 1302 | # CONFIG_USB_ACM is not set |
| 1303 | # CONFIG_USB_PRINTER is not set | 1303 | # CONFIG_USB_PRINTER is not set |
| 1304 | # CONFIG_USB_WDM is not set | 1304 | # CONFIG_USB_WDM is not set |
| 1305 | # CONFIG_USB_TMC is not set | 1305 | # CONFIG_USB_TMC is not set |
| 1306 | 1306 | ||
| 1307 | # | 1307 | # |
| 1308 | # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' | 1308 | # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' |
| 1309 | # | 1309 | # |
| 1310 | 1310 | ||
| 1311 | # | 1311 | # |
| 1312 | # may also be needed; see USB_STORAGE Help for more information | 1312 | # may also be needed; see USB_STORAGE Help for more information |
| 1313 | # | 1313 | # |
| 1314 | CONFIG_USB_STORAGE=y | 1314 | CONFIG_USB_STORAGE=y |
| 1315 | # CONFIG_USB_STORAGE_DEBUG is not set | 1315 | # CONFIG_USB_STORAGE_DEBUG is not set |
| 1316 | # CONFIG_USB_STORAGE_DATAFAB is not set | 1316 | # CONFIG_USB_STORAGE_DATAFAB is not set |
| 1317 | # CONFIG_USB_STORAGE_FREECOM is not set | 1317 | # CONFIG_USB_STORAGE_FREECOM is not set |
| 1318 | # CONFIG_USB_STORAGE_ISD200 is not set | 1318 | # CONFIG_USB_STORAGE_ISD200 is not set |
| 1319 | # CONFIG_USB_STORAGE_DPCM is not set | 1319 | # CONFIG_USB_STORAGE_DPCM is not set |
| 1320 | # CONFIG_USB_STORAGE_USBAT is not set | 1320 | # CONFIG_USB_STORAGE_USBAT is not set |
| 1321 | # CONFIG_USB_STORAGE_SDDR09 is not set | 1321 | # CONFIG_USB_STORAGE_SDDR09 is not set |
| 1322 | # CONFIG_USB_STORAGE_SDDR55 is not set | 1322 | # CONFIG_USB_STORAGE_SDDR55 is not set |
| 1323 | # CONFIG_USB_STORAGE_JUMPSHOT is not set | 1323 | # CONFIG_USB_STORAGE_JUMPSHOT is not set |
| 1324 | # CONFIG_USB_STORAGE_ALAUDA is not set | 1324 | # CONFIG_USB_STORAGE_ALAUDA is not set |
| 1325 | CONFIG_USB_STORAGE_ONETOUCH=y | 1325 | CONFIG_USB_STORAGE_ONETOUCH=y |
| 1326 | # CONFIG_USB_STORAGE_KARMA is not set | 1326 | # CONFIG_USB_STORAGE_KARMA is not set |
| 1327 | # CONFIG_USB_STORAGE_CYPRESS_ATACB is not set | 1327 | # CONFIG_USB_STORAGE_CYPRESS_ATACB is not set |
| 1328 | # CONFIG_USB_LIBUSUAL is not set | 1328 | # CONFIG_USB_LIBUSUAL is not set |
| 1329 | 1329 | ||
| 1330 | # | 1330 | # |
| 1331 | # USB Imaging devices | 1331 | # USB Imaging devices |
| 1332 | # | 1332 | # |
| 1333 | # CONFIG_USB_MDC800 is not set | 1333 | # CONFIG_USB_MDC800 is not set |
| 1334 | # CONFIG_USB_MICROTEK is not set | 1334 | # CONFIG_USB_MICROTEK is not set |
| 1335 | 1335 | ||
| 1336 | # | 1336 | # |
| 1337 | # USB port drivers | 1337 | # USB port drivers |
| 1338 | # | 1338 | # |
| 1339 | # CONFIG_USB_USS720 is not set | 1339 | # CONFIG_USB_USS720 is not set |
| 1340 | # CONFIG_USB_SERIAL is not set | 1340 | # CONFIG_USB_SERIAL is not set |
| 1341 | 1341 | ||
| 1342 | # | 1342 | # |
| 1343 | # USB Miscellaneous drivers | 1343 | # USB Miscellaneous drivers |
| 1344 | # | 1344 | # |
| 1345 | # CONFIG_USB_EMI62 is not set | 1345 | # CONFIG_USB_EMI62 is not set |
| 1346 | # CONFIG_USB_EMI26 is not set | 1346 | # CONFIG_USB_EMI26 is not set |
| 1347 | # CONFIG_USB_ADUTUX is not set | 1347 | # CONFIG_USB_ADUTUX is not set |
| 1348 | # CONFIG_USB_SEVSEG is not set | 1348 | # CONFIG_USB_SEVSEG is not set |
| 1349 | # CONFIG_USB_RIO500 is not set | 1349 | # CONFIG_USB_RIO500 is not set |
| 1350 | # CONFIG_USB_LEGOTOWER is not set | 1350 | # CONFIG_USB_LEGOTOWER is not set |
| 1351 | # CONFIG_USB_LCD is not set | 1351 | # CONFIG_USB_LCD is not set |
| 1352 | # CONFIG_USB_BERRY_CHARGE is not set | 1352 | # CONFIG_USB_BERRY_CHARGE is not set |
| 1353 | # CONFIG_USB_LED is not set | 1353 | # CONFIG_USB_LED is not set |
| 1354 | # CONFIG_USB_CYPRESS_CY7C63 is not set | 1354 | # CONFIG_USB_CYPRESS_CY7C63 is not set |
| 1355 | # CONFIG_USB_CYTHERM is not set | 1355 | # CONFIG_USB_CYTHERM is not set |
| 1356 | # CONFIG_USB_PHIDGET is not set | 1356 | # CONFIG_USB_PHIDGET is not set |
| 1357 | # CONFIG_USB_IDMOUSE is not set | 1357 | # CONFIG_USB_IDMOUSE is not set |
| 1358 | # CONFIG_USB_FTDI_ELAN is not set | 1358 | # CONFIG_USB_FTDI_ELAN is not set |
| 1359 | # CONFIG_USB_APPLEDISPLAY is not set | 1359 | # CONFIG_USB_APPLEDISPLAY is not set |
| 1360 | # CONFIG_USB_SISUSBVGA is not set | 1360 | # CONFIG_USB_SISUSBVGA is not set |
| 1361 | # CONFIG_USB_LD is not set | 1361 | # CONFIG_USB_LD is not set |
| 1362 | # CONFIG_USB_TRANCEVIBRATOR is not set | 1362 | # CONFIG_USB_TRANCEVIBRATOR is not set |
| 1363 | # CONFIG_USB_IOWARRIOR is not set | 1363 | # CONFIG_USB_IOWARRIOR is not set |
| 1364 | # CONFIG_USB_TEST is not set | 1364 | # CONFIG_USB_TEST is not set |
| 1365 | # CONFIG_USB_ISIGHTFW is not set | 1365 | # CONFIG_USB_ISIGHTFW is not set |
| 1366 | # CONFIG_USB_VST is not set | 1366 | # CONFIG_USB_VST is not set |
| 1367 | # CONFIG_USB_GADGET is not set | 1367 | # CONFIG_USB_GADGET is not set |
| 1368 | # CONFIG_UWB is not set | 1368 | # CONFIG_UWB is not set |
| 1369 | # CONFIG_MMC is not set | 1369 | # CONFIG_MMC is not set |
| 1370 | # CONFIG_MEMSTICK is not set | 1370 | # CONFIG_MEMSTICK is not set |
| 1371 | # CONFIG_NEW_LEDS is not set | 1371 | # CONFIG_NEW_LEDS is not set |
| 1372 | # CONFIG_ACCESSIBILITY is not set | 1372 | # CONFIG_ACCESSIBILITY is not set |
| 1373 | CONFIG_INFINIBAND=m | 1373 | CONFIG_INFINIBAND=m |
| 1374 | CONFIG_INFINIBAND_USER_MAD=m | 1374 | CONFIG_INFINIBAND_USER_MAD=m |
| 1375 | CONFIG_INFINIBAND_USER_ACCESS=m | 1375 | CONFIG_INFINIBAND_USER_ACCESS=m |
| 1376 | CONFIG_INFINIBAND_USER_MEM=y | 1376 | CONFIG_INFINIBAND_USER_MEM=y |
| 1377 | CONFIG_INFINIBAND_ADDR_TRANS=y | 1377 | CONFIG_INFINIBAND_ADDR_TRANS=y |
| 1378 | CONFIG_INFINIBAND_MTHCA=m | 1378 | CONFIG_INFINIBAND_MTHCA=m |
| 1379 | CONFIG_INFINIBAND_MTHCA_DEBUG=y | 1379 | CONFIG_INFINIBAND_MTHCA_DEBUG=y |
| 1380 | # CONFIG_INFINIBAND_IPATH is not set | 1380 | # CONFIG_INFINIBAND_IPATH is not set |
| 1381 | CONFIG_INFINIBAND_EHCA=m | 1381 | CONFIG_INFINIBAND_EHCA=m |
| 1382 | # CONFIG_INFINIBAND_AMSO1100 is not set | 1382 | # CONFIG_INFINIBAND_AMSO1100 is not set |
| 1383 | # CONFIG_MLX4_INFINIBAND is not set | 1383 | # CONFIG_MLX4_INFINIBAND is not set |
| 1384 | # CONFIG_INFINIBAND_NES is not set | 1384 | # CONFIG_INFINIBAND_NES is not set |
| 1385 | CONFIG_INFINIBAND_IPOIB=m | 1385 | CONFIG_INFINIBAND_IPOIB=m |
| 1386 | # CONFIG_INFINIBAND_IPOIB_CM is not set | 1386 | # CONFIG_INFINIBAND_IPOIB_CM is not set |
| 1387 | CONFIG_INFINIBAND_IPOIB_DEBUG=y | 1387 | CONFIG_INFINIBAND_IPOIB_DEBUG=y |
| 1388 | # CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set | 1388 | # CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set |
| 1389 | CONFIG_INFINIBAND_SRP=m | 1389 | CONFIG_INFINIBAND_SRP=m |
| 1390 | # CONFIG_INFINIBAND_ISER is not set | 1390 | # CONFIG_INFINIBAND_ISER is not set |
| 1391 | # CONFIG_EDAC is not set | 1391 | # CONFIG_EDAC is not set |
| 1392 | # CONFIG_RTC_CLASS is not set | 1392 | # CONFIG_RTC_CLASS is not set |
| 1393 | # CONFIG_DMADEVICES is not set | 1393 | # CONFIG_DMADEVICES is not set |
| 1394 | # CONFIG_AUXDISPLAY is not set | 1394 | # CONFIG_AUXDISPLAY is not set |
| 1395 | # CONFIG_UIO is not set | 1395 | # CONFIG_UIO is not set |
| 1396 | # CONFIG_STAGING is not set | 1396 | # CONFIG_STAGING is not set |
| 1397 | 1397 | ||
| 1398 | # | 1398 | # |
| 1399 | # File systems | 1399 | # File systems |
| 1400 | # | 1400 | # |
| 1401 | CONFIG_EXT2_FS=y | 1401 | CONFIG_EXT2_FS=y |
| 1402 | CONFIG_EXT2_FS_XATTR=y | 1402 | CONFIG_EXT2_FS_XATTR=y |
| 1403 | CONFIG_EXT2_FS_POSIX_ACL=y | 1403 | CONFIG_EXT2_FS_POSIX_ACL=y |
| 1404 | CONFIG_EXT2_FS_SECURITY=y | 1404 | CONFIG_EXT2_FS_SECURITY=y |
| 1405 | CONFIG_EXT2_FS_XIP=y | 1405 | CONFIG_EXT2_FS_XIP=y |
| 1406 | CONFIG_EXT3_FS=y | 1406 | CONFIG_EXT3_FS=y |
| 1407 | CONFIG_EXT3_FS_XATTR=y | 1407 | CONFIG_EXT3_FS_XATTR=y |
| 1408 | CONFIG_EXT3_FS_POSIX_ACL=y | 1408 | CONFIG_EXT3_FS_POSIX_ACL=y |
| 1409 | CONFIG_EXT3_FS_SECURITY=y | 1409 | CONFIG_EXT3_FS_SECURITY=y |
| 1410 | CONFIG_EXT4_FS=y | 1410 | CONFIG_EXT4_FS=y |
| 1411 | # CONFIG_EXT4DEV_COMPAT is not set | 1411 | # CONFIG_EXT4DEV_COMPAT is not set |
| 1412 | CONFIG_EXT4_FS_XATTR=y | 1412 | CONFIG_EXT4_FS_XATTR=y |
| 1413 | CONFIG_EXT4_FS_POSIX_ACL=y | 1413 | CONFIG_EXT4_FS_POSIX_ACL=y |
| 1414 | CONFIG_EXT4_FS_SECURITY=y | 1414 | CONFIG_EXT4_FS_SECURITY=y |
| 1415 | CONFIG_FS_XIP=y | 1415 | CONFIG_FS_XIP=y |
| 1416 | CONFIG_JBD=y | 1416 | CONFIG_JBD=y |
| 1417 | # CONFIG_JBD_DEBUG is not set | 1417 | # CONFIG_JBD_DEBUG is not set |
| 1418 | CONFIG_JBD2=y | 1418 | CONFIG_JBD2=y |
| 1419 | # CONFIG_JBD2_DEBUG is not set | 1419 | # CONFIG_JBD2_DEBUG is not set |
| 1420 | CONFIG_FS_MBCACHE=y | 1420 | CONFIG_FS_MBCACHE=y |
| 1421 | CONFIG_REISERFS_FS=y | 1421 | CONFIG_REISERFS_FS=y |
| 1422 | # CONFIG_REISERFS_CHECK is not set | 1422 | # CONFIG_REISERFS_CHECK is not set |
| 1423 | # CONFIG_REISERFS_PROC_INFO is not set | 1423 | # CONFIG_REISERFS_PROC_INFO is not set |
| 1424 | CONFIG_REISERFS_FS_XATTR=y | 1424 | CONFIG_REISERFS_FS_XATTR=y |
| 1425 | CONFIG_REISERFS_FS_POSIX_ACL=y | 1425 | CONFIG_REISERFS_FS_POSIX_ACL=y |
| 1426 | CONFIG_REISERFS_FS_SECURITY=y | 1426 | CONFIG_REISERFS_FS_SECURITY=y |
| 1427 | CONFIG_JFS_FS=m | 1427 | CONFIG_JFS_FS=m |
| 1428 | CONFIG_JFS_POSIX_ACL=y | 1428 | CONFIG_JFS_POSIX_ACL=y |
| 1429 | CONFIG_JFS_SECURITY=y | 1429 | CONFIG_JFS_SECURITY=y |
| 1430 | # CONFIG_JFS_DEBUG is not set | 1430 | # CONFIG_JFS_DEBUG is not set |
| 1431 | # CONFIG_JFS_STATISTICS is not set | 1431 | # CONFIG_JFS_STATISTICS is not set |
| 1432 | CONFIG_FS_POSIX_ACL=y | 1432 | CONFIG_FS_POSIX_ACL=y |
| 1433 | CONFIG_FILE_LOCKING=y | 1433 | CONFIG_FILE_LOCKING=y |
| 1434 | CONFIG_XFS_FS=m | 1434 | CONFIG_XFS_FS=m |
| 1435 | # CONFIG_XFS_QUOTA is not set | 1435 | # CONFIG_XFS_QUOTA is not set |
| 1436 | CONFIG_XFS_POSIX_ACL=y | 1436 | CONFIG_XFS_POSIX_ACL=y |
| 1437 | # CONFIG_XFS_RT is not set | 1437 | # CONFIG_XFS_RT is not set |
| 1438 | # CONFIG_XFS_DEBUG is not set | 1438 | # CONFIG_XFS_DEBUG is not set |
| 1439 | # CONFIG_GFS2_FS is not set | 1439 | # CONFIG_GFS2_FS is not set |
| 1440 | CONFIG_OCFS2_FS=m | 1440 | CONFIG_OCFS2_FS=m |
| 1441 | CONFIG_OCFS2_FS_O2CB=m | 1441 | CONFIG_OCFS2_FS_O2CB=m |
| 1442 | CONFIG_OCFS2_FS_STATS=y | 1442 | CONFIG_OCFS2_FS_STATS=y |
| 1443 | CONFIG_OCFS2_DEBUG_MASKLOG=y | 1443 | CONFIG_OCFS2_DEBUG_MASKLOG=y |
| 1444 | # CONFIG_OCFS2_DEBUG_FS is not set | 1444 | # CONFIG_OCFS2_DEBUG_FS is not set |
| 1445 | # CONFIG_OCFS2_COMPAT_JBD is not set | 1445 | # CONFIG_OCFS2_COMPAT_JBD is not set |
| 1446 | CONFIG_DNOTIFY=y | 1446 | CONFIG_DNOTIFY=y |
| 1447 | CONFIG_INOTIFY=y | 1447 | CONFIG_INOTIFY=y |
| 1448 | CONFIG_INOTIFY_USER=y | 1448 | CONFIG_INOTIFY_USER=y |
| 1449 | # CONFIG_QUOTA is not set | 1449 | # CONFIG_QUOTA is not set |
| 1450 | # CONFIG_AUTOFS_FS is not set | 1450 | # CONFIG_AUTOFS_FS is not set |
| 1451 | CONFIG_AUTOFS4_FS=m | 1451 | CONFIG_AUTOFS4_FS=m |
| 1452 | CONFIG_FUSE_FS=m | 1452 | CONFIG_FUSE_FS=m |
| 1453 | 1453 | ||
| 1454 | # | 1454 | # |
| 1455 | # CD-ROM/DVD Filesystems | 1455 | # CD-ROM/DVD Filesystems |
| 1456 | # | 1456 | # |
| 1457 | CONFIG_ISO9660_FS=y | 1457 | CONFIG_ISO9660_FS=y |
| 1458 | CONFIG_JOLIET=y | 1458 | CONFIG_JOLIET=y |
| 1459 | CONFIG_ZISOFS=y | 1459 | CONFIG_ZISOFS=y |
| 1460 | CONFIG_UDF_FS=m | 1460 | CONFIG_UDF_FS=m |
| 1461 | CONFIG_UDF_NLS=y | 1461 | CONFIG_UDF_NLS=y |
| 1462 | 1462 | ||
| 1463 | # | 1463 | # |
| 1464 | # DOS/FAT/NT Filesystems | 1464 | # DOS/FAT/NT Filesystems |
| 1465 | # | 1465 | # |
| 1466 | CONFIG_FAT_FS=y | 1466 | CONFIG_FAT_FS=y |
| 1467 | CONFIG_MSDOS_FS=y | 1467 | CONFIG_MSDOS_FS=y |
| 1468 | CONFIG_VFAT_FS=y | 1468 | CONFIG_VFAT_FS=y |
| 1469 | CONFIG_FAT_DEFAULT_CODEPAGE=437 | 1469 | CONFIG_FAT_DEFAULT_CODEPAGE=437 |
| 1470 | CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" | 1470 | CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" |
| 1471 | # CONFIG_NTFS_FS is not set | 1471 | # CONFIG_NTFS_FS is not set |
| 1472 | 1472 | ||
| 1473 | # | 1473 | # |
| 1474 | # Pseudo filesystems | 1474 | # Pseudo filesystems |
| 1475 | # | 1475 | # |
| 1476 | CONFIG_PROC_FS=y | 1476 | CONFIG_PROC_FS=y |
| 1477 | CONFIG_PROC_KCORE=y | 1477 | CONFIG_PROC_KCORE=y |
| 1478 | CONFIG_PROC_SYSCTL=y | 1478 | CONFIG_PROC_SYSCTL=y |
| 1479 | CONFIG_PROC_PAGE_MONITOR=y | 1479 | CONFIG_PROC_PAGE_MONITOR=y |
| 1480 | CONFIG_SYSFS=y | 1480 | CONFIG_SYSFS=y |
| 1481 | CONFIG_TMPFS=y | 1481 | CONFIG_TMPFS=y |
| 1482 | # CONFIG_TMPFS_POSIX_ACL is not set | 1482 | # CONFIG_TMPFS_POSIX_ACL is not set |
| 1483 | CONFIG_HUGETLBFS=y | 1483 | CONFIG_HUGETLBFS=y |
| 1484 | CONFIG_HUGETLB_PAGE=y | 1484 | CONFIG_HUGETLB_PAGE=y |
| 1485 | CONFIG_CONFIGFS_FS=m | 1485 | CONFIG_CONFIGFS_FS=m |
| 1486 | 1486 | ||
| 1487 | # | 1487 | # |
| 1488 | # Miscellaneous filesystems | 1488 | # Miscellaneous filesystems |
| 1489 | # | 1489 | # |
| 1490 | # CONFIG_ADFS_FS is not set | 1490 | # CONFIG_ADFS_FS is not set |
| 1491 | # CONFIG_AFFS_FS is not set | 1491 | # CONFIG_AFFS_FS is not set |
| 1492 | # CONFIG_HFS_FS is not set | 1492 | # CONFIG_HFS_FS is not set |
| 1493 | # CONFIG_HFSPLUS_FS is not set | 1493 | # CONFIG_HFSPLUS_FS is not set |
| 1494 | # CONFIG_BEFS_FS is not set | 1494 | # CONFIG_BEFS_FS is not set |
| 1495 | # CONFIG_BFS_FS is not set | 1495 | # CONFIG_BFS_FS is not set |
| 1496 | # CONFIG_EFS_FS is not set | 1496 | # CONFIG_EFS_FS is not set |
| 1497 | CONFIG_CRAMFS=y | 1497 | CONFIG_CRAMFS=y |
| 1498 | # CONFIG_VXFS_FS is not set | 1498 | # CONFIG_VXFS_FS is not set |
| 1499 | # CONFIG_MINIX_FS is not set | 1499 | # CONFIG_MINIX_FS is not set |
| 1500 | # CONFIG_OMFS_FS is not set | 1500 | # CONFIG_OMFS_FS is not set |
| 1501 | # CONFIG_HPFS_FS is not set | 1501 | # CONFIG_HPFS_FS is not set |
| 1502 | # CONFIG_QNX4FS_FS is not set | 1502 | # CONFIG_QNX4FS_FS is not set |
| 1503 | # CONFIG_ROMFS_FS is not set | 1503 | # CONFIG_ROMFS_FS is not set |
| 1504 | # CONFIG_SYSV_FS is not set | 1504 | # CONFIG_SYSV_FS is not set |
| 1505 | # CONFIG_UFS_FS is not set | 1505 | # CONFIG_UFS_FS is not set |
| 1506 | CONFIG_NETWORK_FILESYSTEMS=y | 1506 | CONFIG_NETWORK_FILESYSTEMS=y |
| 1507 | CONFIG_NFS_FS=y | 1507 | CONFIG_NFS_FS=y |
| 1508 | CONFIG_NFS_V3=y | 1508 | CONFIG_NFS_V3=y |
| 1509 | CONFIG_NFS_V3_ACL=y | 1509 | CONFIG_NFS_V3_ACL=y |
| 1510 | CONFIG_NFS_V4=y | 1510 | CONFIG_NFS_V4=y |
| 1511 | CONFIG_NFSD=y | 1511 | CONFIG_NFSD=y |
| 1512 | CONFIG_NFSD_V2_ACL=y | 1512 | CONFIG_NFSD_V2_ACL=y |
| 1513 | CONFIG_NFSD_V3=y | 1513 | CONFIG_NFSD_V3=y |
| 1514 | CONFIG_NFSD_V3_ACL=y | 1514 | CONFIG_NFSD_V3_ACL=y |
| 1515 | CONFIG_NFSD_V4=y | 1515 | CONFIG_NFSD_V4=y |
| 1516 | CONFIG_LOCKD=y | 1516 | CONFIG_LOCKD=y |
| 1517 | CONFIG_LOCKD_V4=y | 1517 | CONFIG_LOCKD_V4=y |
| 1518 | CONFIG_EXPORTFS=y | 1518 | CONFIG_EXPORTFS=y |
| 1519 | CONFIG_NFS_ACL_SUPPORT=y | 1519 | CONFIG_NFS_ACL_SUPPORT=y |
| 1520 | CONFIG_NFS_COMMON=y | 1520 | CONFIG_NFS_COMMON=y |
| 1521 | CONFIG_SUNRPC=y | 1521 | CONFIG_SUNRPC=y |
| 1522 | CONFIG_SUNRPC_GSS=y | 1522 | CONFIG_SUNRPC_GSS=y |
| 1523 | CONFIG_SUNRPC_XPRT_RDMA=m | 1523 | CONFIG_SUNRPC_XPRT_RDMA=m |
| 1524 | # CONFIG_SUNRPC_REGISTER_V4 is not set | 1524 | # CONFIG_SUNRPC_REGISTER_V4 is not set |
| 1525 | CONFIG_RPCSEC_GSS_KRB5=y | 1525 | CONFIG_RPCSEC_GSS_KRB5=y |
| 1526 | CONFIG_RPCSEC_GSS_SPKM3=m | 1526 | CONFIG_RPCSEC_GSS_SPKM3=m |
| 1527 | # CONFIG_SMB_FS is not set | 1527 | # CONFIG_SMB_FS is not set |
| 1528 | CONFIG_CIFS=m | 1528 | CONFIG_CIFS=m |
| 1529 | # CONFIG_CIFS_STATS is not set | 1529 | # CONFIG_CIFS_STATS is not set |
| 1530 | # CONFIG_CIFS_WEAK_PW_HASH is not set | 1530 | # CONFIG_CIFS_WEAK_PW_HASH is not set |
| 1531 | CONFIG_CIFS_XATTR=y | 1531 | CONFIG_CIFS_XATTR=y |
| 1532 | CONFIG_CIFS_POSIX=y | 1532 | CONFIG_CIFS_POSIX=y |
| 1533 | # CONFIG_CIFS_DEBUG2 is not set | 1533 | # CONFIG_CIFS_DEBUG2 is not set |
| 1534 | # CONFIG_CIFS_EXPERIMENTAL is not set | 1534 | # CONFIG_CIFS_EXPERIMENTAL is not set |
| 1535 | # CONFIG_NCP_FS is not set | 1535 | # CONFIG_NCP_FS is not set |
| 1536 | # CONFIG_CODA_FS is not set | 1536 | # CONFIG_CODA_FS is not set |
| 1537 | # CONFIG_AFS_FS is not set | 1537 | # CONFIG_AFS_FS is not set |
| 1538 | 1538 | ||
| 1539 | # | 1539 | # |
| 1540 | # Partition Types | 1540 | # Partition Types |
| 1541 | # | 1541 | # |
| 1542 | # CONFIG_PARTITION_ADVANCED is not set | 1542 | # CONFIG_PARTITION_ADVANCED is not set |
| 1543 | CONFIG_MSDOS_PARTITION=y | 1543 | CONFIG_MSDOS_PARTITION=y |
| 1544 | CONFIG_NLS=y | 1544 | CONFIG_NLS=y |
| 1545 | CONFIG_NLS_DEFAULT="iso8859-1" | 1545 | CONFIG_NLS_DEFAULT="iso8859-1" |
| 1546 | CONFIG_NLS_CODEPAGE_437=y | 1546 | CONFIG_NLS_CODEPAGE_437=y |
| 1547 | # CONFIG_NLS_CODEPAGE_737 is not set | 1547 | # CONFIG_NLS_CODEPAGE_737 is not set |
| 1548 | # CONFIG_NLS_CODEPAGE_775 is not set | 1548 | # CONFIG_NLS_CODEPAGE_775 is not set |
| 1549 | # CONFIG_NLS_CODEPAGE_850 is not set | 1549 | # CONFIG_NLS_CODEPAGE_850 is not set |
| 1550 | # CONFIG_NLS_CODEPAGE_852 is not set | 1550 | # CONFIG_NLS_CODEPAGE_852 is not set |
| 1551 | # CONFIG_NLS_CODEPAGE_855 is not set | 1551 | # CONFIG_NLS_CODEPAGE_855 is not set |
| 1552 | # CONFIG_NLS_CODEPAGE_857 is not set | 1552 | # CONFIG_NLS_CODEPAGE_857 is not set |
| 1553 | # CONFIG_NLS_CODEPAGE_860 is not set | 1553 | # CONFIG_NLS_CODEPAGE_860 is not set |
| 1554 | # CONFIG_NLS_CODEPAGE_861 is not set | 1554 | # CONFIG_NLS_CODEPAGE_861 is not set |
| 1555 | # CONFIG_NLS_CODEPAGE_862 is not set | 1555 | # CONFIG_NLS_CODEPAGE_862 is not set |
| 1556 | # CONFIG_NLS_CODEPAGE_863 is not set | 1556 | # CONFIG_NLS_CODEPAGE_863 is not set |
| 1557 | # CONFIG_NLS_CODEPAGE_864 is not set | 1557 | # CONFIG_NLS_CODEPAGE_864 is not set |
| 1558 | # CONFIG_NLS_CODEPAGE_865 is not set | 1558 | # CONFIG_NLS_CODEPAGE_865 is not set |
| 1559 | # CONFIG_NLS_CODEPAGE_866 is not set | 1559 | # CONFIG_NLS_CODEPAGE_866 is not set |
| 1560 | # CONFIG_NLS_CODEPAGE_869 is not set | 1560 | # CONFIG_NLS_CODEPAGE_869 is not set |
| 1561 | # CONFIG_NLS_CODEPAGE_936 is not set | 1561 | # CONFIG_NLS_CODEPAGE_936 is not set |
| 1562 | # CONFIG_NLS_CODEPAGE_950 is not set | 1562 | # CONFIG_NLS_CODEPAGE_950 is not set |
| 1563 | # CONFIG_NLS_CODEPAGE_932 is not set | 1563 | # CONFIG_NLS_CODEPAGE_932 is not set |
| 1564 | # CONFIG_NLS_CODEPAGE_949 is not set | 1564 | # CONFIG_NLS_CODEPAGE_949 is not set |
| 1565 | # CONFIG_NLS_CODEPAGE_874 is not set | 1565 | # CONFIG_NLS_CODEPAGE_874 is not set |
| 1566 | # CONFIG_NLS_ISO8859_8 is not set | 1566 | # CONFIG_NLS_ISO8859_8 is not set |
| 1567 | # CONFIG_NLS_CODEPAGE_1250 is not set | 1567 | # CONFIG_NLS_CODEPAGE_1250 is not set |
| 1568 | # CONFIG_NLS_CODEPAGE_1251 is not set | 1568 | # CONFIG_NLS_CODEPAGE_1251 is not set |
| 1569 | CONFIG_NLS_ASCII=y | 1569 | CONFIG_NLS_ASCII=y |
| 1570 | CONFIG_NLS_ISO8859_1=y | 1570 | CONFIG_NLS_ISO8859_1=y |
| 1571 | # CONFIG_NLS_ISO8859_2 is not set | 1571 | # CONFIG_NLS_ISO8859_2 is not set |
| 1572 | # CONFIG_NLS_ISO8859_3 is not set | 1572 | # CONFIG_NLS_ISO8859_3 is not set |
| 1573 | # CONFIG_NLS_ISO8859_4 is not set | 1573 | # CONFIG_NLS_ISO8859_4 is not set |
| 1574 | # CONFIG_NLS_ISO8859_5 is not set | 1574 | # CONFIG_NLS_ISO8859_5 is not set |
| 1575 | # CONFIG_NLS_ISO8859_6 is not set | 1575 | # CONFIG_NLS_ISO8859_6 is not set |
| 1576 | # CONFIG_NLS_ISO8859_7 is not set | 1576 | # CONFIG_NLS_ISO8859_7 is not set |
| 1577 | # CONFIG_NLS_ISO8859_9 is not set | 1577 | # CONFIG_NLS_ISO8859_9 is not set |
| 1578 | # CONFIG_NLS_ISO8859_13 is not set | 1578 | # CONFIG_NLS_ISO8859_13 is not set |
| 1579 | # CONFIG_NLS_ISO8859_14 is not set | 1579 | # CONFIG_NLS_ISO8859_14 is not set |
| 1580 | # CONFIG_NLS_ISO8859_15 is not set | 1580 | # CONFIG_NLS_ISO8859_15 is not set |
| 1581 | # CONFIG_NLS_KOI8_R is not set | 1581 | # CONFIG_NLS_KOI8_R is not set |
| 1582 | # CONFIG_NLS_KOI8_U is not set | 1582 | # CONFIG_NLS_KOI8_U is not set |
| 1583 | # CONFIG_NLS_UTF8 is not set | 1583 | # CONFIG_NLS_UTF8 is not set |
| 1584 | # CONFIG_DLM is not set | 1584 | # CONFIG_DLM is not set |
| 1585 | 1585 | ||
| 1586 | # | 1586 | # |
| 1587 | # Library routines | 1587 | # Library routines |
| 1588 | # | 1588 | # |
| 1589 | CONFIG_BITREVERSE=y | 1589 | CONFIG_BITREVERSE=y |
| 1590 | CONFIG_CRC_CCITT=m | 1590 | CONFIG_CRC_CCITT=m |
| 1591 | CONFIG_CRC16=y | 1591 | CONFIG_CRC16=y |
| 1592 | CONFIG_CRC_T10DIF=y | 1592 | CONFIG_CRC_T10DIF=y |
| 1593 | CONFIG_CRC_ITU_T=m | 1593 | CONFIG_CRC_ITU_T=m |
| 1594 | CONFIG_CRC32=y | 1594 | CONFIG_CRC32=y |
| 1595 | # CONFIG_CRC7 is not set | 1595 | # CONFIG_CRC7 is not set |
| 1596 | CONFIG_LIBCRC32C=m | 1596 | CONFIG_LIBCRC32C=m |
| 1597 | CONFIG_ZLIB_INFLATE=y | 1597 | CONFIG_ZLIB_INFLATE=y |
| 1598 | CONFIG_ZLIB_DEFLATE=m | 1598 | CONFIG_ZLIB_DEFLATE=m |
| 1599 | CONFIG_LZO_COMPRESS=m | 1599 | CONFIG_LZO_COMPRESS=m |
| 1600 | CONFIG_LZO_DECOMPRESS=m | 1600 | CONFIG_LZO_DECOMPRESS=m |
| 1601 | CONFIG_TEXTSEARCH=y | 1601 | CONFIG_TEXTSEARCH=y |
| 1602 | CONFIG_TEXTSEARCH_KMP=m | 1602 | CONFIG_TEXTSEARCH_KMP=m |
| 1603 | CONFIG_TEXTSEARCH_BM=m | 1603 | CONFIG_TEXTSEARCH_BM=m |
| 1604 | CONFIG_TEXTSEARCH_FSM=m | 1604 | CONFIG_TEXTSEARCH_FSM=m |
| 1605 | CONFIG_PLIST=y | 1605 | CONFIG_PLIST=y |
| 1606 | CONFIG_HAS_IOMEM=y | 1606 | CONFIG_HAS_IOMEM=y |
| 1607 | CONFIG_HAS_IOPORT=y | 1607 | CONFIG_HAS_IOPORT=y |
| 1608 | CONFIG_HAS_DMA=y | 1608 | CONFIG_HAS_DMA=y |
| 1609 | CONFIG_HAVE_LMB=y | 1609 | CONFIG_HAVE_LMB=y |
| 1610 | 1610 | ||
| 1611 | # | 1611 | # |
| 1612 | # Kernel hacking | 1612 | # Kernel hacking |
| 1613 | # | 1613 | # |
| 1614 | # CONFIG_PRINTK_TIME is not set | 1614 | # CONFIG_PRINTK_TIME is not set |
| 1615 | CONFIG_ENABLE_WARN_DEPRECATED=y | 1615 | CONFIG_ENABLE_WARN_DEPRECATED=y |
| 1616 | CONFIG_ENABLE_MUST_CHECK=y | 1616 | CONFIG_ENABLE_MUST_CHECK=y |
| 1617 | CONFIG_FRAME_WARN=2048 | 1617 | CONFIG_FRAME_WARN=2048 |
| 1618 | CONFIG_MAGIC_SYSRQ=y | 1618 | CONFIG_MAGIC_SYSRQ=y |
| 1619 | # CONFIG_UNUSED_SYMBOLS is not set | 1619 | # CONFIG_UNUSED_SYMBOLS is not set |
| 1620 | CONFIG_DEBUG_FS=y | 1620 | CONFIG_DEBUG_FS=y |
| 1621 | # CONFIG_HEADERS_CHECK is not set | 1621 | # CONFIG_HEADERS_CHECK is not set |
| 1622 | CONFIG_DEBUG_KERNEL=y | 1622 | CONFIG_DEBUG_KERNEL=y |
| 1623 | # CONFIG_DEBUG_SHIRQ is not set | 1623 | # CONFIG_DEBUG_SHIRQ is not set |
| 1624 | CONFIG_DETECT_SOFTLOCKUP=y | 1624 | CONFIG_DETECT_SOFTLOCKUP=y |
| 1625 | # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set | 1625 | # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set |
| 1626 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 | 1626 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 |
| 1627 | CONFIG_SCHED_DEBUG=y | 1627 | CONFIG_SCHED_DEBUG=y |
| 1628 | CONFIG_SCHEDSTATS=y | 1628 | CONFIG_SCHEDSTATS=y |
| 1629 | # CONFIG_TIMER_STATS is not set | 1629 | # CONFIG_TIMER_STATS is not set |
| 1630 | # CONFIG_DEBUG_OBJECTS is not set | 1630 | # CONFIG_DEBUG_OBJECTS is not set |
| 1631 | # CONFIG_SLUB_DEBUG_ON is not set | 1631 | # CONFIG_SLUB_DEBUG_ON is not set |
| 1632 | # CONFIG_SLUB_STATS is not set | 1632 | # CONFIG_SLUB_STATS is not set |
| 1633 | # CONFIG_DEBUG_RT_MUTEXES is not set | 1633 | # CONFIG_DEBUG_RT_MUTEXES is not set |
| 1634 | # CONFIG_RT_MUTEX_TESTER is not set | 1634 | # CONFIG_RT_MUTEX_TESTER is not set |
| 1635 | # CONFIG_DEBUG_SPINLOCK is not set | 1635 | # CONFIG_DEBUG_SPINLOCK is not set |
| 1636 | # CONFIG_DEBUG_MUTEXES is not set | 1636 | # CONFIG_DEBUG_MUTEXES is not set |
| 1637 | # CONFIG_DEBUG_LOCK_ALLOC is not set | 1637 | # CONFIG_DEBUG_LOCK_ALLOC is not set |
| 1638 | # CONFIG_PROVE_LOCKING is not set | 1638 | # CONFIG_PROVE_LOCKING is not set |
| 1639 | # CONFIG_LOCK_STAT is not set | 1639 | # CONFIG_LOCK_STAT is not set |
| 1640 | CONFIG_TRACE_IRQFLAGS=y | 1640 | CONFIG_TRACE_IRQFLAGS=y |
| 1641 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set | 1641 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set |
| 1642 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set | 1642 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set |
| 1643 | CONFIG_STACKTRACE=y | 1643 | CONFIG_STACKTRACE=y |
| 1644 | # CONFIG_DEBUG_KOBJECT is not set | 1644 | # CONFIG_DEBUG_KOBJECT is not set |
| 1645 | CONFIG_DEBUG_BUGVERBOSE=y | 1645 | CONFIG_DEBUG_BUGVERBOSE=y |
| 1646 | # CONFIG_DEBUG_INFO is not set | 1646 | # CONFIG_DEBUG_INFO is not set |
| 1647 | # CONFIG_DEBUG_VM is not set | 1647 | # CONFIG_DEBUG_VM is not set |
| 1648 | # CONFIG_DEBUG_WRITECOUNT is not set | 1648 | # CONFIG_DEBUG_WRITECOUNT is not set |
| 1649 | CONFIG_DEBUG_MEMORY_INIT=y | 1649 | CONFIG_DEBUG_MEMORY_INIT=y |
| 1650 | # CONFIG_DEBUG_LIST is not set | 1650 | # CONFIG_DEBUG_LIST is not set |
| 1651 | # CONFIG_DEBUG_SG is not set | 1651 | # CONFIG_DEBUG_SG is not set |
| 1652 | CONFIG_FRAME_POINTER=y | 1652 | CONFIG_FRAME_POINTER=y |
| 1653 | # CONFIG_BOOT_PRINTK_DELAY is not set | 1653 | # CONFIG_BOOT_PRINTK_DELAY is not set |
| 1654 | # CONFIG_RCU_TORTURE_TEST is not set | 1654 | # CONFIG_RCU_TORTURE_TEST is not set |
| 1655 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | 1655 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set |
| 1656 | # CONFIG_KPROBES_SANITY_TEST is not set | 1656 | # CONFIG_KPROBES_SANITY_TEST is not set |
| 1657 | # CONFIG_BACKTRACE_SELF_TEST is not set | 1657 | # CONFIG_BACKTRACE_SELF_TEST is not set |
| 1658 | # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set | 1658 | # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set |
| 1659 | # CONFIG_LKDTM is not set | 1659 | # CONFIG_LKDTM is not set |
| 1660 | # CONFIG_FAULT_INJECTION is not set | 1660 | # CONFIG_FAULT_INJECTION is not set |
| 1661 | CONFIG_LATENCYTOP=y | 1661 | CONFIG_LATENCYTOP=y |
| 1662 | CONFIG_SYSCTL_SYSCALL_CHECK=y | 1662 | CONFIG_SYSCTL_SYSCALL_CHECK=y |
| 1663 | CONFIG_NOP_TRACER=y | 1663 | CONFIG_NOP_TRACER=y |
| 1664 | CONFIG_HAVE_FUNCTION_TRACER=y | 1664 | CONFIG_HAVE_FUNCTION_TRACER=y |
| 1665 | CONFIG_TRACER_MAX_TRACE=y | 1665 | CONFIG_TRACER_MAX_TRACE=y |
| 1666 | CONFIG_RING_BUFFER=y | 1666 | CONFIG_RING_BUFFER=y |
| 1667 | CONFIG_TRACING=y | 1667 | CONFIG_TRACING=y |
| 1668 | 1668 | ||
| 1669 | # | 1669 | # |
| 1670 | # Tracers | 1670 | # Tracers |
| 1671 | # | 1671 | # |
| 1672 | CONFIG_FUNCTION_TRACER=y | 1672 | CONFIG_FUNCTION_TRACER=y |
| 1673 | CONFIG_IRQSOFF_TRACER=y | 1673 | CONFIG_IRQSOFF_TRACER=y |
| 1674 | CONFIG_SCHED_TRACER=y | 1674 | CONFIG_SCHED_TRACER=y |
| 1675 | CONFIG_CONTEXT_SWITCH_TRACER=y | 1675 | CONFIG_CONTEXT_SWITCH_TRACER=y |
| 1676 | # CONFIG_BOOT_TRACER is not set | 1676 | # CONFIG_BOOT_TRACER is not set |
| 1677 | CONFIG_STACK_TRACER=y | 1677 | CONFIG_STACK_TRACER=y |
| 1678 | # CONFIG_FTRACE_STARTUP_TEST is not set | 1678 | # CONFIG_FTRACE_STARTUP_TEST is not set |
| 1679 | CONFIG_DYNAMIC_PRINTK_DEBUG=y | 1679 | CONFIG_DYNAMIC_PRINTK_DEBUG=y |
| 1680 | # CONFIG_SAMPLES is not set | 1680 | # CONFIG_SAMPLES is not set |
| 1681 | CONFIG_HAVE_ARCH_KGDB=y | 1681 | CONFIG_HAVE_ARCH_KGDB=y |
| 1682 | # CONFIG_KGDB is not set | 1682 | # CONFIG_KGDB is not set |
| 1683 | CONFIG_DEBUG_STACKOVERFLOW=y | 1683 | CONFIG_DEBUG_STACKOVERFLOW=y |
| 1684 | # CONFIG_DEBUG_STACK_USAGE is not set | 1684 | # CONFIG_DEBUG_STACK_USAGE is not set |
| 1685 | # CONFIG_DEBUG_PAGEALLOC is not set | 1685 | # CONFIG_DEBUG_PAGEALLOC is not set |
| 1686 | CONFIG_HCALL_STATS=y | 1686 | # CONFIG_HCALL_STATS is not set |
| 1687 | # CONFIG_CODE_PATCHING_SELFTEST is not set | 1687 | # CONFIG_CODE_PATCHING_SELFTEST is not set |
| 1688 | # CONFIG_FTR_FIXUP_SELFTEST is not set | 1688 | # CONFIG_FTR_FIXUP_SELFTEST is not set |
| 1689 | # CONFIG_MSI_BITMAP_SELFTEST is not set | 1689 | # CONFIG_MSI_BITMAP_SELFTEST is not set |
| 1690 | CONFIG_XMON=y | 1690 | CONFIG_XMON=y |
| 1691 | CONFIG_XMON_DEFAULT=y | 1691 | CONFIG_XMON_DEFAULT=y |
| 1692 | CONFIG_XMON_DISASSEMBLY=y | 1692 | CONFIG_XMON_DISASSEMBLY=y |
| 1693 | CONFIG_DEBUGGER=y | 1693 | CONFIG_DEBUGGER=y |
| 1694 | CONFIG_IRQSTACKS=y | 1694 | CONFIG_IRQSTACKS=y |
| 1695 | CONFIG_VIRQ_DEBUG=y | 1695 | CONFIG_VIRQ_DEBUG=y |
| 1696 | # CONFIG_BOOTX_TEXT is not set | 1696 | # CONFIG_BOOTX_TEXT is not set |
| 1697 | # CONFIG_PPC_EARLY_DEBUG is not set | 1697 | # CONFIG_PPC_EARLY_DEBUG is not set |
| 1698 | 1698 | ||
| 1699 | # | 1699 | # |
| 1700 | # Security options | 1700 | # Security options |
| 1701 | # | 1701 | # |
| 1702 | # CONFIG_KEYS is not set | 1702 | # CONFIG_KEYS is not set |
| 1703 | # CONFIG_SECURITY is not set | 1703 | # CONFIG_SECURITY is not set |
| 1704 | # CONFIG_SECURITYFS is not set | 1704 | # CONFIG_SECURITYFS is not set |
| 1705 | # CONFIG_SECURITY_FILE_CAPABILITIES is not set | 1705 | # CONFIG_SECURITY_FILE_CAPABILITIES is not set |
| 1706 | CONFIG_CRYPTO=y | 1706 | CONFIG_CRYPTO=y |
| 1707 | 1707 | ||
| 1708 | # | 1708 | # |
| 1709 | # Crypto core or helper | 1709 | # Crypto core or helper |
| 1710 | # | 1710 | # |
| 1711 | # CONFIG_CRYPTO_FIPS is not set | 1711 | # CONFIG_CRYPTO_FIPS is not set |
| 1712 | CONFIG_CRYPTO_ALGAPI=y | 1712 | CONFIG_CRYPTO_ALGAPI=y |
| 1713 | CONFIG_CRYPTO_AEAD=y | 1713 | CONFIG_CRYPTO_AEAD=y |
| 1714 | CONFIG_CRYPTO_BLKCIPHER=y | 1714 | CONFIG_CRYPTO_BLKCIPHER=y |
| 1715 | CONFIG_CRYPTO_HASH=y | 1715 | CONFIG_CRYPTO_HASH=y |
| 1716 | CONFIG_CRYPTO_RNG=y | 1716 | CONFIG_CRYPTO_RNG=y |
| 1717 | CONFIG_CRYPTO_MANAGER=y | 1717 | CONFIG_CRYPTO_MANAGER=y |
| 1718 | CONFIG_CRYPTO_GF128MUL=m | 1718 | CONFIG_CRYPTO_GF128MUL=m |
| 1719 | CONFIG_CRYPTO_NULL=m | 1719 | CONFIG_CRYPTO_NULL=m |
| 1720 | # CONFIG_CRYPTO_CRYPTD is not set | 1720 | # CONFIG_CRYPTO_CRYPTD is not set |
| 1721 | CONFIG_CRYPTO_AUTHENC=m | 1721 | CONFIG_CRYPTO_AUTHENC=m |
| 1722 | CONFIG_CRYPTO_TEST=m | 1722 | CONFIG_CRYPTO_TEST=m |
| 1723 | 1723 | ||
| 1724 | # | 1724 | # |
| 1725 | # Authenticated Encryption with Associated Data | 1725 | # Authenticated Encryption with Associated Data |
| 1726 | # | 1726 | # |
| 1727 | CONFIG_CRYPTO_CCM=m | 1727 | CONFIG_CRYPTO_CCM=m |
| 1728 | CONFIG_CRYPTO_GCM=m | 1728 | CONFIG_CRYPTO_GCM=m |
| 1729 | CONFIG_CRYPTO_SEQIV=m | 1729 | CONFIG_CRYPTO_SEQIV=m |
| 1730 | 1730 | ||
| 1731 | # | 1731 | # |
| 1732 | # Block modes | 1732 | # Block modes |
| 1733 | # | 1733 | # |
| 1734 | CONFIG_CRYPTO_CBC=y | 1734 | CONFIG_CRYPTO_CBC=y |
| 1735 | CONFIG_CRYPTO_CTR=m | 1735 | CONFIG_CRYPTO_CTR=m |
| 1736 | # CONFIG_CRYPTO_CTS is not set | 1736 | # CONFIG_CRYPTO_CTS is not set |
| 1737 | CONFIG_CRYPTO_ECB=m | 1737 | CONFIG_CRYPTO_ECB=m |
| 1738 | # CONFIG_CRYPTO_LRW is not set | 1738 | # CONFIG_CRYPTO_LRW is not set |
| 1739 | CONFIG_CRYPTO_PCBC=m | 1739 | CONFIG_CRYPTO_PCBC=m |
| 1740 | # CONFIG_CRYPTO_XTS is not set | 1740 | # CONFIG_CRYPTO_XTS is not set |
| 1741 | 1741 | ||
| 1742 | # | 1742 | # |
| 1743 | # Hash modes | 1743 | # Hash modes |
| 1744 | # | 1744 | # |
| 1745 | CONFIG_CRYPTO_HMAC=y | 1745 | CONFIG_CRYPTO_HMAC=y |
| 1746 | # CONFIG_CRYPTO_XCBC is not set | 1746 | # CONFIG_CRYPTO_XCBC is not set |
| 1747 | 1747 | ||
| 1748 | # | 1748 | # |
| 1749 | # Digest | 1749 | # Digest |
| 1750 | # | 1750 | # |
| 1751 | CONFIG_CRYPTO_CRC32C=m | 1751 | CONFIG_CRYPTO_CRC32C=m |
| 1752 | CONFIG_CRYPTO_MD4=m | 1752 | CONFIG_CRYPTO_MD4=m |
| 1753 | CONFIG_CRYPTO_MD5=y | 1753 | CONFIG_CRYPTO_MD5=y |
| 1754 | CONFIG_CRYPTO_MICHAEL_MIC=m | 1754 | CONFIG_CRYPTO_MICHAEL_MIC=m |
| 1755 | # CONFIG_CRYPTO_RMD128 is not set | 1755 | # CONFIG_CRYPTO_RMD128 is not set |
| 1756 | # CONFIG_CRYPTO_RMD160 is not set | 1756 | # CONFIG_CRYPTO_RMD160 is not set |
| 1757 | # CONFIG_CRYPTO_RMD256 is not set | 1757 | # CONFIG_CRYPTO_RMD256 is not set |
| 1758 | # CONFIG_CRYPTO_RMD320 is not set | 1758 | # CONFIG_CRYPTO_RMD320 is not set |
| 1759 | CONFIG_CRYPTO_SHA1=m | 1759 | CONFIG_CRYPTO_SHA1=m |
| 1760 | CONFIG_CRYPTO_SHA256=m | 1760 | CONFIG_CRYPTO_SHA256=m |
| 1761 | CONFIG_CRYPTO_SHA512=m | 1761 | CONFIG_CRYPTO_SHA512=m |
| 1762 | CONFIG_CRYPTO_TGR192=m | 1762 | CONFIG_CRYPTO_TGR192=m |
| 1763 | CONFIG_CRYPTO_WP512=m | 1763 | CONFIG_CRYPTO_WP512=m |
| 1764 | 1764 | ||
| 1765 | # | 1765 | # |
| 1766 | # Ciphers | 1766 | # Ciphers |
| 1767 | # | 1767 | # |
| 1768 | CONFIG_CRYPTO_AES=m | 1768 | CONFIG_CRYPTO_AES=m |
| 1769 | CONFIG_CRYPTO_ANUBIS=m | 1769 | CONFIG_CRYPTO_ANUBIS=m |
| 1770 | CONFIG_CRYPTO_ARC4=m | 1770 | CONFIG_CRYPTO_ARC4=m |
| 1771 | CONFIG_CRYPTO_BLOWFISH=m | 1771 | CONFIG_CRYPTO_BLOWFISH=m |
| 1772 | # CONFIG_CRYPTO_CAMELLIA is not set | 1772 | # CONFIG_CRYPTO_CAMELLIA is not set |
| 1773 | CONFIG_CRYPTO_CAST5=m | 1773 | CONFIG_CRYPTO_CAST5=m |
| 1774 | CONFIG_CRYPTO_CAST6=m | 1774 | CONFIG_CRYPTO_CAST6=m |
| 1775 | CONFIG_CRYPTO_DES=y | 1775 | CONFIG_CRYPTO_DES=y |
| 1776 | # CONFIG_CRYPTO_FCRYPT is not set | 1776 | # CONFIG_CRYPTO_FCRYPT is not set |
| 1777 | CONFIG_CRYPTO_KHAZAD=m | 1777 | CONFIG_CRYPTO_KHAZAD=m |
| 1778 | CONFIG_CRYPTO_SALSA20=m | 1778 | CONFIG_CRYPTO_SALSA20=m |
| 1779 | # CONFIG_CRYPTO_SEED is not set | 1779 | # CONFIG_CRYPTO_SEED is not set |
| 1780 | CONFIG_CRYPTO_SERPENT=m | 1780 | CONFIG_CRYPTO_SERPENT=m |
| 1781 | CONFIG_CRYPTO_TEA=m | 1781 | CONFIG_CRYPTO_TEA=m |
| 1782 | CONFIG_CRYPTO_TWOFISH=m | 1782 | CONFIG_CRYPTO_TWOFISH=m |
| 1783 | CONFIG_CRYPTO_TWOFISH_COMMON=m | 1783 | CONFIG_CRYPTO_TWOFISH_COMMON=m |
| 1784 | 1784 | ||
| 1785 | # | 1785 | # |
| 1786 | # Compression | 1786 | # Compression |
| 1787 | # | 1787 | # |
| 1788 | CONFIG_CRYPTO_DEFLATE=m | 1788 | CONFIG_CRYPTO_DEFLATE=m |
| 1789 | CONFIG_CRYPTO_LZO=m | 1789 | CONFIG_CRYPTO_LZO=m |
| 1790 | 1790 | ||
| 1791 | # | 1791 | # |
| 1792 | # Random Number Generation | 1792 | # Random Number Generation |
| 1793 | # | 1793 | # |
| 1794 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 1794 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
| 1795 | # CONFIG_CRYPTO_HW is not set | 1795 | # CONFIG_CRYPTO_HW is not set |
| 1796 | # CONFIG_PPC_CLOCK is not set | 1796 | # CONFIG_PPC_CLOCK is not set |
| 1797 | # CONFIG_VIRTUALIZATION is not set | 1797 | # CONFIG_VIRTUALIZATION is not set |
| 1798 | 1798 |
arch/powerpc/include/asm/emulated_ops.h
| 1 | /* | 1 | /* |
| 2 | * Copyright 2007 Sony Corporation | 2 | * Copyright 2007 Sony Corporation |
| 3 | * | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; version 2 of the License. | 6 | * the Free Software Foundation; version 2 of the License. |
| 7 | * | 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, | 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. | 11 | * GNU General Public License for more details. |
| 12 | * | 12 | * |
| 13 | * You should have received a copy of the GNU General Public License | 13 | * You should have received a copy of the GNU General Public License |
| 14 | * along with this program. | 14 | * along with this program. |
| 15 | * If not, see <http://www.gnu.org/licenses/>. | 15 | * If not, see <http://www.gnu.org/licenses/>. |
| 16 | */ | 16 | */ |
| 17 | 17 | ||
| 18 | #ifndef _ASM_POWERPC_EMULATED_OPS_H | 18 | #ifndef _ASM_POWERPC_EMULATED_OPS_H |
| 19 | #define _ASM_POWERPC_EMULATED_OPS_H | 19 | #define _ASM_POWERPC_EMULATED_OPS_H |
| 20 | 20 | ||
| 21 | #include <asm/atomic.h> | 21 | #include <asm/atomic.h> |
| 22 | #include <linux/perf_event.h> | ||
| 22 | 23 | ||
| 23 | 24 | ||
| 24 | #ifdef CONFIG_PPC_EMULATED_STATS | 25 | #ifdef CONFIG_PPC_EMULATED_STATS |
| 25 | 26 | ||
| 26 | struct ppc_emulated_entry { | 27 | struct ppc_emulated_entry { |
| 27 | const char *name; | 28 | const char *name; |
| 28 | atomic_t val; | 29 | atomic_t val; |
| 29 | }; | 30 | }; |
| 30 | 31 | ||
| 31 | extern struct ppc_emulated { | 32 | extern struct ppc_emulated { |
| 32 | #ifdef CONFIG_ALTIVEC | 33 | #ifdef CONFIG_ALTIVEC |
| 33 | struct ppc_emulated_entry altivec; | 34 | struct ppc_emulated_entry altivec; |
| 34 | #endif | 35 | #endif |
| 35 | struct ppc_emulated_entry dcba; | 36 | struct ppc_emulated_entry dcba; |
| 36 | struct ppc_emulated_entry dcbz; | 37 | struct ppc_emulated_entry dcbz; |
| 37 | struct ppc_emulated_entry fp_pair; | 38 | struct ppc_emulated_entry fp_pair; |
| 38 | struct ppc_emulated_entry isel; | 39 | struct ppc_emulated_entry isel; |
| 39 | struct ppc_emulated_entry mcrxr; | 40 | struct ppc_emulated_entry mcrxr; |
| 40 | struct ppc_emulated_entry mfpvr; | 41 | struct ppc_emulated_entry mfpvr; |
| 41 | struct ppc_emulated_entry multiple; | 42 | struct ppc_emulated_entry multiple; |
| 42 | struct ppc_emulated_entry popcntb; | 43 | struct ppc_emulated_entry popcntb; |
| 43 | struct ppc_emulated_entry spe; | 44 | struct ppc_emulated_entry spe; |
| 44 | struct ppc_emulated_entry string; | 45 | struct ppc_emulated_entry string; |
| 45 | struct ppc_emulated_entry unaligned; | 46 | struct ppc_emulated_entry unaligned; |
| 46 | #ifdef CONFIG_MATH_EMULATION | 47 | #ifdef CONFIG_MATH_EMULATION |
| 47 | struct ppc_emulated_entry math; | 48 | struct ppc_emulated_entry math; |
| 48 | #elif defined(CONFIG_8XX_MINIMAL_FPEMU) | 49 | #elif defined(CONFIG_8XX_MINIMAL_FPEMU) |
| 49 | struct ppc_emulated_entry 8xx; | 50 | struct ppc_emulated_entry 8xx; |
| 50 | #endif | 51 | #endif |
| 51 | #ifdef CONFIG_VSX | 52 | #ifdef CONFIG_VSX |
| 52 | struct ppc_emulated_entry vsx; | 53 | struct ppc_emulated_entry vsx; |
| 53 | #endif | 54 | #endif |
| 54 | } ppc_emulated; | 55 | } ppc_emulated; |
| 55 | 56 | ||
| 56 | extern u32 ppc_warn_emulated; | 57 | extern u32 ppc_warn_emulated; |
| 57 | 58 | ||
| 58 | extern void ppc_warn_emulated_print(const char *type); | 59 | extern void ppc_warn_emulated_print(const char *type); |
| 59 | 60 | ||
| 60 | #define PPC_WARN_EMULATED(type) \ | 61 | #define __PPC_WARN_EMULATED(type) \ |
| 61 | do { \ | 62 | do { \ |
| 62 | atomic_inc(&ppc_emulated.type.val); \ | 63 | atomic_inc(&ppc_emulated.type.val); \ |
| 63 | if (ppc_warn_emulated) \ | 64 | if (ppc_warn_emulated) \ |
| 64 | ppc_warn_emulated_print(ppc_emulated.type.name); \ | 65 | ppc_warn_emulated_print(ppc_emulated.type.name); \ |
| 65 | } while (0) | 66 | } while (0) |
| 66 | 67 | ||
| 67 | #else /* !CONFIG_PPC_EMULATED_STATS */ | 68 | #else /* !CONFIG_PPC_EMULATED_STATS */ |
| 68 | 69 | ||
| 69 | #define PPC_WARN_EMULATED(type) do { } while (0) | 70 | #define __PPC_WARN_EMULATED(type) do { } while (0) |
| 70 | 71 | ||
| 71 | #endif /* !CONFIG_PPC_EMULATED_STATS */ | 72 | #endif /* !CONFIG_PPC_EMULATED_STATS */ |
| 73 | |||
| 74 | #define PPC_WARN_EMULATED(type, regs) \ | ||
| 75 | do { \ | ||
| 76 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \ | ||
| 77 | 1, 0, regs, 0); \ | ||
| 78 | __PPC_WARN_EMULATED(type); \ | ||
| 79 | } while (0) | ||
| 80 | |||
| 81 | #define PPC_WARN_ALIGNMENT(type, regs) \ | ||
| 82 | do { \ | ||
| 83 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \ | ||
| 84 | 1, 0, regs, regs->dar); \ | ||
| 85 | __PPC_WARN_EMULATED(type); \ | ||
| 86 | } while (0) | ||
| 72 | 87 | ||
| 73 | #endif /* _ASM_POWERPC_EMULATED_OPS_H */ | 88 | #endif /* _ASM_POWERPC_EMULATED_OPS_H */ |
| 74 | 89 |
arch/powerpc/include/asm/hvcall.h
| 1 | #ifndef _ASM_POWERPC_HVCALL_H | 1 | #ifndef _ASM_POWERPC_HVCALL_H |
| 2 | #define _ASM_POWERPC_HVCALL_H | 2 | #define _ASM_POWERPC_HVCALL_H |
| 3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
| 4 | 4 | ||
| 5 | #define HVSC .long 0x44000022 | 5 | #define HVSC .long 0x44000022 |
| 6 | 6 | ||
| 7 | #define H_SUCCESS 0 | 7 | #define H_SUCCESS 0 |
| 8 | #define H_BUSY 1 /* Hardware busy -- retry later */ | 8 | #define H_BUSY 1 /* Hardware busy -- retry later */ |
| 9 | #define H_CLOSED 2 /* Resource closed */ | 9 | #define H_CLOSED 2 /* Resource closed */ |
| 10 | #define H_NOT_AVAILABLE 3 | 10 | #define H_NOT_AVAILABLE 3 |
| 11 | #define H_CONSTRAINED 4 /* Resource request constrained to max allowed */ | 11 | #define H_CONSTRAINED 4 /* Resource request constrained to max allowed */ |
| 12 | #define H_PARTIAL 5 | 12 | #define H_PARTIAL 5 |
| 13 | #define H_IN_PROGRESS 14 /* Kind of like busy */ | 13 | #define H_IN_PROGRESS 14 /* Kind of like busy */ |
| 14 | #define H_PAGE_REGISTERED 15 | 14 | #define H_PAGE_REGISTERED 15 |
| 15 | #define H_PARTIAL_STORE 16 | 15 | #define H_PARTIAL_STORE 16 |
| 16 | #define H_PENDING 17 /* returned from H_POLL_PENDING */ | 16 | #define H_PENDING 17 /* returned from H_POLL_PENDING */ |
| 17 | #define H_CONTINUE 18 /* Returned from H_Join on success */ | 17 | #define H_CONTINUE 18 /* Returned from H_Join on success */ |
| 18 | #define H_LONG_BUSY_START_RANGE 9900 /* Start of long busy range */ | 18 | #define H_LONG_BUSY_START_RANGE 9900 /* Start of long busy range */ |
| 19 | #define H_LONG_BUSY_ORDER_1_MSEC 9900 /* Long busy, hint that 1msec \ | 19 | #define H_LONG_BUSY_ORDER_1_MSEC 9900 /* Long busy, hint that 1msec \ |
| 20 | is a good time to retry */ | 20 | is a good time to retry */ |
| 21 | #define H_LONG_BUSY_ORDER_10_MSEC 9901 /* Long busy, hint that 10msec \ | 21 | #define H_LONG_BUSY_ORDER_10_MSEC 9901 /* Long busy, hint that 10msec \ |
| 22 | is a good time to retry */ | 22 | is a good time to retry */ |
| 23 | #define H_LONG_BUSY_ORDER_100_MSEC 9902 /* Long busy, hint that 100msec \ | 23 | #define H_LONG_BUSY_ORDER_100_MSEC 9902 /* Long busy, hint that 100msec \ |
| 24 | is a good time to retry */ | 24 | is a good time to retry */ |
| 25 | #define H_LONG_BUSY_ORDER_1_SEC 9903 /* Long busy, hint that 1sec \ | 25 | #define H_LONG_BUSY_ORDER_1_SEC 9903 /* Long busy, hint that 1sec \ |
| 26 | is a good time to retry */ | 26 | is a good time to retry */ |
| 27 | #define H_LONG_BUSY_ORDER_10_SEC 9904 /* Long busy, hint that 10sec \ | 27 | #define H_LONG_BUSY_ORDER_10_SEC 9904 /* Long busy, hint that 10sec \ |
| 28 | is a good time to retry */ | 28 | is a good time to retry */ |
| 29 | #define H_LONG_BUSY_ORDER_100_SEC 9905 /* Long busy, hint that 100sec \ | 29 | #define H_LONG_BUSY_ORDER_100_SEC 9905 /* Long busy, hint that 100sec \ |
| 30 | is a good time to retry */ | 30 | is a good time to retry */ |
| 31 | #define H_LONG_BUSY_END_RANGE 9905 /* End of long busy range */ | 31 | #define H_LONG_BUSY_END_RANGE 9905 /* End of long busy range */ |
| 32 | #define H_HARDWARE -1 /* Hardware error */ | 32 | #define H_HARDWARE -1 /* Hardware error */ |
| 33 | #define H_FUNCTION -2 /* Function not supported */ | 33 | #define H_FUNCTION -2 /* Function not supported */ |
| 34 | #define H_PRIVILEGE -3 /* Caller not privileged */ | 34 | #define H_PRIVILEGE -3 /* Caller not privileged */ |
| 35 | #define H_PARAMETER -4 /* Parameter invalid, out-of-range or conflicting */ | 35 | #define H_PARAMETER -4 /* Parameter invalid, out-of-range or conflicting */ |
| 36 | #define H_BAD_MODE -5 /* Illegal msr value */ | 36 | #define H_BAD_MODE -5 /* Illegal msr value */ |
| 37 | #define H_PTEG_FULL -6 /* PTEG is full */ | 37 | #define H_PTEG_FULL -6 /* PTEG is full */ |
| 38 | #define H_NOT_FOUND -7 /* PTE was not found" */ | 38 | #define H_NOT_FOUND -7 /* PTE was not found" */ |
| 39 | #define H_RESERVED_DABR -8 /* DABR address is reserved by the hypervisor on this processor" */ | 39 | #define H_RESERVED_DABR -8 /* DABR address is reserved by the hypervisor on this processor" */ |
| 40 | #define H_NO_MEM -9 | 40 | #define H_NO_MEM -9 |
| 41 | #define H_AUTHORITY -10 | 41 | #define H_AUTHORITY -10 |
| 42 | #define H_PERMISSION -11 | 42 | #define H_PERMISSION -11 |
| 43 | #define H_DROPPED -12 | 43 | #define H_DROPPED -12 |
| 44 | #define H_SOURCE_PARM -13 | 44 | #define H_SOURCE_PARM -13 |
| 45 | #define H_DEST_PARM -14 | 45 | #define H_DEST_PARM -14 |
| 46 | #define H_REMOTE_PARM -15 | 46 | #define H_REMOTE_PARM -15 |
| 47 | #define H_RESOURCE -16 | 47 | #define H_RESOURCE -16 |
| 48 | #define H_ADAPTER_PARM -17 | 48 | #define H_ADAPTER_PARM -17 |
| 49 | #define H_RH_PARM -18 | 49 | #define H_RH_PARM -18 |
| 50 | #define H_RCQ_PARM -19 | 50 | #define H_RCQ_PARM -19 |
| 51 | #define H_SCQ_PARM -20 | 51 | #define H_SCQ_PARM -20 |
| 52 | #define H_EQ_PARM -21 | 52 | #define H_EQ_PARM -21 |
| 53 | #define H_RT_PARM -22 | 53 | #define H_RT_PARM -22 |
| 54 | #define H_ST_PARM -23 | 54 | #define H_ST_PARM -23 |
| 55 | #define H_SIGT_PARM -24 | 55 | #define H_SIGT_PARM -24 |
| 56 | #define H_TOKEN_PARM -25 | 56 | #define H_TOKEN_PARM -25 |
| 57 | #define H_MLENGTH_PARM -27 | 57 | #define H_MLENGTH_PARM -27 |
| 58 | #define H_MEM_PARM -28 | 58 | #define H_MEM_PARM -28 |
| 59 | #define H_MEM_ACCESS_PARM -29 | 59 | #define H_MEM_ACCESS_PARM -29 |
| 60 | #define H_ATTR_PARM -30 | 60 | #define H_ATTR_PARM -30 |
| 61 | #define H_PORT_PARM -31 | 61 | #define H_PORT_PARM -31 |
| 62 | #define H_MCG_PARM -32 | 62 | #define H_MCG_PARM -32 |
| 63 | #define H_VL_PARM -33 | 63 | #define H_VL_PARM -33 |
| 64 | #define H_TSIZE_PARM -34 | 64 | #define H_TSIZE_PARM -34 |
| 65 | #define H_TRACE_PARM -35 | 65 | #define H_TRACE_PARM -35 |
| 66 | 66 | ||
| 67 | #define H_MASK_PARM -37 | 67 | #define H_MASK_PARM -37 |
| 68 | #define H_MCG_FULL -38 | 68 | #define H_MCG_FULL -38 |
| 69 | #define H_ALIAS_EXIST -39 | 69 | #define H_ALIAS_EXIST -39 |
| 70 | #define H_P_COUNTER -40 | 70 | #define H_P_COUNTER -40 |
| 71 | #define H_TABLE_FULL -41 | 71 | #define H_TABLE_FULL -41 |
| 72 | #define H_ALT_TABLE -42 | 72 | #define H_ALT_TABLE -42 |
| 73 | #define H_MR_CONDITION -43 | 73 | #define H_MR_CONDITION -43 |
| 74 | #define H_NOT_ENOUGH_RESOURCES -44 | 74 | #define H_NOT_ENOUGH_RESOURCES -44 |
| 75 | #define H_R_STATE -45 | 75 | #define H_R_STATE -45 |
| 76 | #define H_RESCINDEND -46 | 76 | #define H_RESCINDEND -46 |
| 77 | 77 | ||
| 78 | 78 | ||
| 79 | /* Long Busy is a condition that can be returned by the firmware | 79 | /* Long Busy is a condition that can be returned by the firmware |
| 80 | * when a call cannot be completed now, but the identical call | 80 | * when a call cannot be completed now, but the identical call |
| 81 | * should be retried later. This prevents calls blocking in the | 81 | * should be retried later. This prevents calls blocking in the |
| 82 | * firmware for long periods of time. Annoyingly the firmware can return | 82 | * firmware for long periods of time. Annoyingly the firmware can return |
| 83 | * a range of return codes, hinting at how long we should wait before | 83 | * a range of return codes, hinting at how long we should wait before |
| 84 | * retrying. If you don't care for the hint, the macro below is a good | 84 | * retrying. If you don't care for the hint, the macro below is a good |
| 85 | * way to check for the long_busy return codes | 85 | * way to check for the long_busy return codes |
| 86 | */ | 86 | */ |
| 87 | #define H_IS_LONG_BUSY(x) ((x >= H_LONG_BUSY_START_RANGE) \ | 87 | #define H_IS_LONG_BUSY(x) ((x >= H_LONG_BUSY_START_RANGE) \ |
| 88 | && (x <= H_LONG_BUSY_END_RANGE)) | 88 | && (x <= H_LONG_BUSY_END_RANGE)) |
| 89 | 89 | ||
| 90 | /* Flags */ | 90 | /* Flags */ |
| 91 | #define H_LARGE_PAGE (1UL<<(63-16)) | 91 | #define H_LARGE_PAGE (1UL<<(63-16)) |
| 92 | #define H_EXACT (1UL<<(63-24)) /* Use exact PTE or return H_PTEG_FULL */ | 92 | #define H_EXACT (1UL<<(63-24)) /* Use exact PTE or return H_PTEG_FULL */ |
| 93 | #define H_R_XLATE (1UL<<(63-25)) /* include a valid logical page num in the pte if the valid bit is set */ | 93 | #define H_R_XLATE (1UL<<(63-25)) /* include a valid logical page num in the pte if the valid bit is set */ |
| 94 | #define H_READ_4 (1UL<<(63-26)) /* Return 4 PTEs */ | 94 | #define H_READ_4 (1UL<<(63-26)) /* Return 4 PTEs */ |
| 95 | #define H_PAGE_STATE_CHANGE (1UL<<(63-28)) | 95 | #define H_PAGE_STATE_CHANGE (1UL<<(63-28)) |
| 96 | #define H_PAGE_UNUSED ((1UL<<(63-29)) | (1UL<<(63-30))) | 96 | #define H_PAGE_UNUSED ((1UL<<(63-29)) | (1UL<<(63-30))) |
| 97 | #define H_PAGE_SET_UNUSED (H_PAGE_STATE_CHANGE | H_PAGE_UNUSED) | 97 | #define H_PAGE_SET_UNUSED (H_PAGE_STATE_CHANGE | H_PAGE_UNUSED) |
| 98 | #define H_PAGE_SET_LOANED (H_PAGE_SET_UNUSED | (1UL<<(63-31))) | 98 | #define H_PAGE_SET_LOANED (H_PAGE_SET_UNUSED | (1UL<<(63-31))) |
| 99 | #define H_PAGE_SET_ACTIVE H_PAGE_STATE_CHANGE | 99 | #define H_PAGE_SET_ACTIVE H_PAGE_STATE_CHANGE |
| 100 | #define H_AVPN (1UL<<(63-32)) /* An avpn is provided as a sanity test */ | 100 | #define H_AVPN (1UL<<(63-32)) /* An avpn is provided as a sanity test */ |
| 101 | #define H_ANDCOND (1UL<<(63-33)) | 101 | #define H_ANDCOND (1UL<<(63-33)) |
| 102 | #define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */ | 102 | #define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */ |
| 103 | #define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */ | 103 | #define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */ |
| 104 | #define H_ZERO_PAGE (1UL<<(63-48)) /* zero the page before mapping (ignored for IO pages) */ | 104 | #define H_ZERO_PAGE (1UL<<(63-48)) /* zero the page before mapping (ignored for IO pages) */ |
| 105 | #define H_COPY_PAGE (1UL<<(63-49)) | 105 | #define H_COPY_PAGE (1UL<<(63-49)) |
| 106 | #define H_N (1UL<<(63-61)) | 106 | #define H_N (1UL<<(63-61)) |
| 107 | #define H_PP1 (1UL<<(63-62)) | 107 | #define H_PP1 (1UL<<(63-62)) |
| 108 | #define H_PP2 (1UL<<(63-63)) | 108 | #define H_PP2 (1UL<<(63-63)) |
| 109 | 109 | ||
| 110 | /* VASI States */ | 110 | /* VASI States */ |
| 111 | #define H_VASI_INVALID 0 | 111 | #define H_VASI_INVALID 0 |
| 112 | #define H_VASI_ENABLED 1 | 112 | #define H_VASI_ENABLED 1 |
| 113 | #define H_VASI_ABORTED 2 | 113 | #define H_VASI_ABORTED 2 |
| 114 | #define H_VASI_SUSPENDING 3 | 114 | #define H_VASI_SUSPENDING 3 |
| 115 | #define H_VASI_SUSPENDED 4 | 115 | #define H_VASI_SUSPENDED 4 |
| 116 | #define H_VASI_RESUMED 5 | 116 | #define H_VASI_RESUMED 5 |
| 117 | #define H_VASI_COMPLETED 6 | 117 | #define H_VASI_COMPLETED 6 |
| 118 | 118 | ||
| 119 | /* DABRX flags */ | 119 | /* DABRX flags */ |
| 120 | #define H_DABRX_HYPERVISOR (1UL<<(63-61)) | 120 | #define H_DABRX_HYPERVISOR (1UL<<(63-61)) |
| 121 | #define H_DABRX_KERNEL (1UL<<(63-62)) | 121 | #define H_DABRX_KERNEL (1UL<<(63-62)) |
| 122 | #define H_DABRX_USER (1UL<<(63-63)) | 122 | #define H_DABRX_USER (1UL<<(63-63)) |
| 123 | 123 | ||
| 124 | /* Each control block has to be on a 4K bondary */ | 124 | /* Each control block has to be on a 4K bondary */ |
| 125 | #define H_CB_ALIGNMENT 4096 | 125 | #define H_CB_ALIGNMENT 4096 |
| 126 | 126 | ||
| 127 | /* pSeries hypervisor opcodes */ | 127 | /* pSeries hypervisor opcodes */ |
| 128 | #define H_REMOVE 0x04 | 128 | #define H_REMOVE 0x04 |
| 129 | #define H_ENTER 0x08 | 129 | #define H_ENTER 0x08 |
| 130 | #define H_READ 0x0c | 130 | #define H_READ 0x0c |
| 131 | #define H_CLEAR_MOD 0x10 | 131 | #define H_CLEAR_MOD 0x10 |
| 132 | #define H_CLEAR_REF 0x14 | 132 | #define H_CLEAR_REF 0x14 |
| 133 | #define H_PROTECT 0x18 | 133 | #define H_PROTECT 0x18 |
| 134 | #define H_GET_TCE 0x1c | 134 | #define H_GET_TCE 0x1c |
| 135 | #define H_PUT_TCE 0x20 | 135 | #define H_PUT_TCE 0x20 |
| 136 | #define H_SET_SPRG0 0x24 | 136 | #define H_SET_SPRG0 0x24 |
| 137 | #define H_SET_DABR 0x28 | 137 | #define H_SET_DABR 0x28 |
| 138 | #define H_PAGE_INIT 0x2c | 138 | #define H_PAGE_INIT 0x2c |
| 139 | #define H_SET_ASR 0x30 | 139 | #define H_SET_ASR 0x30 |
| 140 | #define H_ASR_ON 0x34 | 140 | #define H_ASR_ON 0x34 |
| 141 | #define H_ASR_OFF 0x38 | 141 | #define H_ASR_OFF 0x38 |
| 142 | #define H_LOGICAL_CI_LOAD 0x3c | 142 | #define H_LOGICAL_CI_LOAD 0x3c |
| 143 | #define H_LOGICAL_CI_STORE 0x40 | 143 | #define H_LOGICAL_CI_STORE 0x40 |
| 144 | #define H_LOGICAL_CACHE_LOAD 0x44 | 144 | #define H_LOGICAL_CACHE_LOAD 0x44 |
| 145 | #define H_LOGICAL_CACHE_STORE 0x48 | 145 | #define H_LOGICAL_CACHE_STORE 0x48 |
| 146 | #define H_LOGICAL_ICBI 0x4c | 146 | #define H_LOGICAL_ICBI 0x4c |
| 147 | #define H_LOGICAL_DCBF 0x50 | 147 | #define H_LOGICAL_DCBF 0x50 |
| 148 | #define H_GET_TERM_CHAR 0x54 | 148 | #define H_GET_TERM_CHAR 0x54 |
| 149 | #define H_PUT_TERM_CHAR 0x58 | 149 | #define H_PUT_TERM_CHAR 0x58 |
| 150 | #define H_REAL_TO_LOGICAL 0x5c | 150 | #define H_REAL_TO_LOGICAL 0x5c |
| 151 | #define H_HYPERVISOR_DATA 0x60 | 151 | #define H_HYPERVISOR_DATA 0x60 |
| 152 | #define H_EOI 0x64 | 152 | #define H_EOI 0x64 |
| 153 | #define H_CPPR 0x68 | 153 | #define H_CPPR 0x68 |
| 154 | #define H_IPI 0x6c | 154 | #define H_IPI 0x6c |
| 155 | #define H_IPOLL 0x70 | 155 | #define H_IPOLL 0x70 |
| 156 | #define H_XIRR 0x74 | 156 | #define H_XIRR 0x74 |
| 157 | #define H_PERFMON 0x7c | 157 | #define H_PERFMON 0x7c |
| 158 | #define H_MIGRATE_DMA 0x78 | 158 | #define H_MIGRATE_DMA 0x78 |
| 159 | #define H_REGISTER_VPA 0xDC | 159 | #define H_REGISTER_VPA 0xDC |
| 160 | #define H_CEDE 0xE0 | 160 | #define H_CEDE 0xE0 |
| 161 | #define H_CONFER 0xE4 | 161 | #define H_CONFER 0xE4 |
| 162 | #define H_PROD 0xE8 | 162 | #define H_PROD 0xE8 |
| 163 | #define H_GET_PPP 0xEC | 163 | #define H_GET_PPP 0xEC |
| 164 | #define H_SET_PPP 0xF0 | 164 | #define H_SET_PPP 0xF0 |
| 165 | #define H_PURR 0xF4 | 165 | #define H_PURR 0xF4 |
| 166 | #define H_PIC 0xF8 | 166 | #define H_PIC 0xF8 |
| 167 | #define H_REG_CRQ 0xFC | 167 | #define H_REG_CRQ 0xFC |
| 168 | #define H_FREE_CRQ 0x100 | 168 | #define H_FREE_CRQ 0x100 |
| 169 | #define H_VIO_SIGNAL 0x104 | 169 | #define H_VIO_SIGNAL 0x104 |
| 170 | #define H_SEND_CRQ 0x108 | 170 | #define H_SEND_CRQ 0x108 |
| 171 | #define H_COPY_RDMA 0x110 | 171 | #define H_COPY_RDMA 0x110 |
| 172 | #define H_REGISTER_LOGICAL_LAN 0x114 | 172 | #define H_REGISTER_LOGICAL_LAN 0x114 |
| 173 | #define H_FREE_LOGICAL_LAN 0x118 | 173 | #define H_FREE_LOGICAL_LAN 0x118 |
| 174 | #define H_ADD_LOGICAL_LAN_BUFFER 0x11C | 174 | #define H_ADD_LOGICAL_LAN_BUFFER 0x11C |
| 175 | #define H_SEND_LOGICAL_LAN 0x120 | 175 | #define H_SEND_LOGICAL_LAN 0x120 |
| 176 | #define H_BULK_REMOVE 0x124 | 176 | #define H_BULK_REMOVE 0x124 |
| 177 | #define H_MULTICAST_CTRL 0x130 | 177 | #define H_MULTICAST_CTRL 0x130 |
| 178 | #define H_SET_XDABR 0x134 | 178 | #define H_SET_XDABR 0x134 |
| 179 | #define H_STUFF_TCE 0x138 | 179 | #define H_STUFF_TCE 0x138 |
| 180 | #define H_PUT_TCE_INDIRECT 0x13C | 180 | #define H_PUT_TCE_INDIRECT 0x13C |
| 181 | #define H_CHANGE_LOGICAL_LAN_MAC 0x14C | 181 | #define H_CHANGE_LOGICAL_LAN_MAC 0x14C |
| 182 | #define H_VTERM_PARTNER_INFO 0x150 | 182 | #define H_VTERM_PARTNER_INFO 0x150 |
| 183 | #define H_REGISTER_VTERM 0x154 | 183 | #define H_REGISTER_VTERM 0x154 |
| 184 | #define H_FREE_VTERM 0x158 | 184 | #define H_FREE_VTERM 0x158 |
| 185 | #define H_RESET_EVENTS 0x15C | 185 | #define H_RESET_EVENTS 0x15C |
| 186 | #define H_ALLOC_RESOURCE 0x160 | 186 | #define H_ALLOC_RESOURCE 0x160 |
| 187 | #define H_FREE_RESOURCE 0x164 | 187 | #define H_FREE_RESOURCE 0x164 |
| 188 | #define H_MODIFY_QP 0x168 | 188 | #define H_MODIFY_QP 0x168 |
| 189 | #define H_QUERY_QP 0x16C | 189 | #define H_QUERY_QP 0x16C |
| 190 | #define H_REREGISTER_PMR 0x170 | 190 | #define H_REREGISTER_PMR 0x170 |
| 191 | #define H_REGISTER_SMR 0x174 | 191 | #define H_REGISTER_SMR 0x174 |
| 192 | #define H_QUERY_MR 0x178 | 192 | #define H_QUERY_MR 0x178 |
| 193 | #define H_QUERY_MW 0x17C | 193 | #define H_QUERY_MW 0x17C |
| 194 | #define H_QUERY_HCA 0x180 | 194 | #define H_QUERY_HCA 0x180 |
| 195 | #define H_QUERY_PORT 0x184 | 195 | #define H_QUERY_PORT 0x184 |
| 196 | #define H_MODIFY_PORT 0x188 | 196 | #define H_MODIFY_PORT 0x188 |
| 197 | #define H_DEFINE_AQP1 0x18C | 197 | #define H_DEFINE_AQP1 0x18C |
| 198 | #define H_GET_TRACE_BUFFER 0x190 | 198 | #define H_GET_TRACE_BUFFER 0x190 |
| 199 | #define H_DEFINE_AQP0 0x194 | 199 | #define H_DEFINE_AQP0 0x194 |
| 200 | #define H_RESIZE_MR 0x198 | 200 | #define H_RESIZE_MR 0x198 |
| 201 | #define H_ATTACH_MCQP 0x19C | 201 | #define H_ATTACH_MCQP 0x19C |
| 202 | #define H_DETACH_MCQP 0x1A0 | 202 | #define H_DETACH_MCQP 0x1A0 |
| 203 | #define H_CREATE_RPT 0x1A4 | 203 | #define H_CREATE_RPT 0x1A4 |
| 204 | #define H_REMOVE_RPT 0x1A8 | 204 | #define H_REMOVE_RPT 0x1A8 |
| 205 | #define H_REGISTER_RPAGES 0x1AC | 205 | #define H_REGISTER_RPAGES 0x1AC |
| 206 | #define H_DISABLE_AND_GETC 0x1B0 | 206 | #define H_DISABLE_AND_GETC 0x1B0 |
| 207 | #define H_ERROR_DATA 0x1B4 | 207 | #define H_ERROR_DATA 0x1B4 |
| 208 | #define H_GET_HCA_INFO 0x1B8 | 208 | #define H_GET_HCA_INFO 0x1B8 |
| 209 | #define H_GET_PERF_COUNT 0x1BC | 209 | #define H_GET_PERF_COUNT 0x1BC |
| 210 | #define H_MANAGE_TRACE 0x1C0 | 210 | #define H_MANAGE_TRACE 0x1C0 |
| 211 | #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 | 211 | #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 |
| 212 | #define H_QUERY_INT_STATE 0x1E4 | 212 | #define H_QUERY_INT_STATE 0x1E4 |
| 213 | #define H_POLL_PENDING 0x1D8 | 213 | #define H_POLL_PENDING 0x1D8 |
| 214 | #define H_ILLAN_ATTRIBUTES 0x244 | 214 | #define H_ILLAN_ATTRIBUTES 0x244 |
| 215 | #define H_JOIN 0x298 | 215 | #define H_JOIN 0x298 |
| 216 | #define H_VASI_STATE 0x2A4 | 216 | #define H_VASI_STATE 0x2A4 |
| 217 | #define H_ENABLE_CRQ 0x2B0 | 217 | #define H_ENABLE_CRQ 0x2B0 |
| 218 | #define H_SET_MPP 0x2D0 | 218 | #define H_SET_MPP 0x2D0 |
| 219 | #define H_GET_MPP 0x2D4 | 219 | #define H_GET_MPP 0x2D4 |
| 220 | #define MAX_HCALL_OPCODE H_GET_MPP | 220 | #define MAX_HCALL_OPCODE H_GET_MPP |
| 221 | 221 | ||
| 222 | #ifndef __ASSEMBLY__ | 222 | #ifndef __ASSEMBLY__ |
| 223 | 223 | ||
| 224 | /** | 224 | /** |
| 225 | * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments | 225 | * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments |
| 226 | * @opcode: The hypervisor call to make. | 226 | * @opcode: The hypervisor call to make. |
| 227 | * | 227 | * |
| 228 | * This call supports up to 7 arguments and only returns the status of | 228 | * This call supports up to 7 arguments and only returns the status of |
| 229 | * the hcall. Use this version where possible, its slightly faster than | 229 | * the hcall. Use this version where possible, its slightly faster than |
| 230 | * the other plpar_hcalls. | 230 | * the other plpar_hcalls. |
| 231 | */ | 231 | */ |
| 232 | long plpar_hcall_norets(unsigned long opcode, ...); | 232 | long plpar_hcall_norets(unsigned long opcode, ...); |
| 233 | 233 | ||
| 234 | /** | 234 | /** |
| 235 | * plpar_hcall: - Make a pseries hypervisor call | 235 | * plpar_hcall: - Make a pseries hypervisor call |
| 236 | * @opcode: The hypervisor call to make. | 236 | * @opcode: The hypervisor call to make. |
| 237 | * @retbuf: Buffer to store up to 4 return arguments in. | 237 | * @retbuf: Buffer to store up to 4 return arguments in. |
| 238 | * | 238 | * |
| 239 | * This call supports up to 6 arguments and 4 return arguments. Use | 239 | * This call supports up to 6 arguments and 4 return arguments. Use |
| 240 | * PLPAR_HCALL_BUFSIZE to size the return argument buffer. | 240 | * PLPAR_HCALL_BUFSIZE to size the return argument buffer. |
| 241 | * | 241 | * |
| 242 | * Used for all but the craziest of phyp interfaces (see plpar_hcall9) | 242 | * Used for all but the craziest of phyp interfaces (see plpar_hcall9) |
| 243 | */ | 243 | */ |
| 244 | #define PLPAR_HCALL_BUFSIZE 4 | 244 | #define PLPAR_HCALL_BUFSIZE 4 |
| 245 | long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...); | 245 | long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...); |
| 246 | 246 | ||
| 247 | /** | 247 | /** |
| 248 | * plpar_hcall_raw: - Make a hypervisor call without calculating hcall stats | 248 | * plpar_hcall_raw: - Make a hypervisor call without calculating hcall stats |
| 249 | * @opcode: The hypervisor call to make. | 249 | * @opcode: The hypervisor call to make. |
| 250 | * @retbuf: Buffer to store up to 4 return arguments in. | 250 | * @retbuf: Buffer to store up to 4 return arguments in. |
| 251 | * | 251 | * |
| 252 | * This call supports up to 6 arguments and 4 return arguments. Use | 252 | * This call supports up to 6 arguments and 4 return arguments. Use |
| 253 | * PLPAR_HCALL_BUFSIZE to size the return argument buffer. | 253 | * PLPAR_HCALL_BUFSIZE to size the return argument buffer. |
| 254 | * | 254 | * |
| 255 | * Used when phyp interface needs to be called in real mode. Similar to | 255 | * Used when phyp interface needs to be called in real mode. Similar to |
| 256 | * plpar_hcall, but plpar_hcall_raw works in real mode and does not | 256 | * plpar_hcall, but plpar_hcall_raw works in real mode and does not |
| 257 | * calculate hypervisor call statistics. | 257 | * calculate hypervisor call statistics. |
| 258 | */ | 258 | */ |
| 259 | long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...); | 259 | long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...); |
| 260 | 260 | ||
| 261 | /** | 261 | /** |
| 262 | * plpar_hcall9: - Make a pseries hypervisor call with up to 9 return arguments | 262 | * plpar_hcall9: - Make a pseries hypervisor call with up to 9 return arguments |
| 263 | * @opcode: The hypervisor call to make. | 263 | * @opcode: The hypervisor call to make. |
| 264 | * @retbuf: Buffer to store up to 9 return arguments in. | 264 | * @retbuf: Buffer to store up to 9 return arguments in. |
| 265 | * | 265 | * |
| 266 | * This call supports up to 9 arguments and 9 return arguments. Use | 266 | * This call supports up to 9 arguments and 9 return arguments. Use |
| 267 | * PLPAR_HCALL9_BUFSIZE to size the return argument buffer. | 267 | * PLPAR_HCALL9_BUFSIZE to size the return argument buffer. |
| 268 | */ | 268 | */ |
| 269 | #define PLPAR_HCALL9_BUFSIZE 9 | 269 | #define PLPAR_HCALL9_BUFSIZE 9 |
| 270 | long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...); | 270 | long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...); |
| 271 | 271 | ||
| 272 | /* For hcall instrumentation. One structure per-hcall, per-CPU */ | 272 | /* For hcall instrumentation. One structure per-hcall, per-CPU */ |
| 273 | struct hcall_stats { | 273 | struct hcall_stats { |
| 274 | unsigned long num_calls; /* number of calls (on this CPU) */ | 274 | unsigned long num_calls; /* number of calls (on this CPU) */ |
| 275 | unsigned long tb_total; /* total wall time (mftb) of calls. */ | 275 | unsigned long tb_total; /* total wall time (mftb) of calls. */ |
| 276 | unsigned long purr_total; /* total cpu time (PURR) of calls. */ | 276 | unsigned long purr_total; /* total cpu time (PURR) of calls. */ |
| 277 | unsigned long tb_start; | ||
| 278 | unsigned long purr_start; | ||
| 277 | }; | 279 | }; |
| 278 | #define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1) | 280 | #define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1) |
| 279 | 281 | ||
| 280 | struct hvcall_mpp_data { | 282 | struct hvcall_mpp_data { |
| 281 | unsigned long entitled_mem; | 283 | unsigned long entitled_mem; |
| 282 | unsigned long mapped_mem; | 284 | unsigned long mapped_mem; |
| 283 | unsigned short group_num; | 285 | unsigned short group_num; |
| 284 | unsigned short pool_num; | 286 | unsigned short pool_num; |
| 285 | unsigned char mem_weight; | 287 | unsigned char mem_weight; |
| 286 | unsigned char unallocated_mem_weight; | 288 | unsigned char unallocated_mem_weight; |
| 287 | unsigned long unallocated_entitlement; /* value in bytes */ | 289 | unsigned long unallocated_entitlement; /* value in bytes */ |
| 288 | unsigned long pool_size; | 290 | unsigned long pool_size; |
| 289 | signed long loan_request; | 291 | signed long loan_request; |
| 290 | unsigned long backing_mem; | 292 | unsigned long backing_mem; |
| 291 | }; | 293 | }; |
| 292 | 294 | ||
| 293 | int h_get_mpp(struct hvcall_mpp_data *); | 295 | int h_get_mpp(struct hvcall_mpp_data *); |
| 294 | 296 | ||
| 295 | #ifdef CONFIG_PPC_PSERIES | 297 | #ifdef CONFIG_PPC_PSERIES |
| 296 | extern int CMO_PrPSP; | 298 | extern int CMO_PrPSP; |
| 297 | extern int CMO_SecPSP; | 299 | extern int CMO_SecPSP; |
| 298 | extern unsigned long CMO_PageSize; | 300 | extern unsigned long CMO_PageSize; |
| 299 | 301 | ||
| 300 | static inline int cmo_get_primary_psp(void) | 302 | static inline int cmo_get_primary_psp(void) |
| 301 | { | 303 | { |
| 302 | return CMO_PrPSP; | 304 | return CMO_PrPSP; |
| 303 | } | 305 | } |
| 304 | 306 | ||
| 305 | static inline int cmo_get_secondary_psp(void) | 307 | static inline int cmo_get_secondary_psp(void) |
| 306 | { | 308 | { |
| 307 | return CMO_SecPSP; | 309 | return CMO_SecPSP; |
| 308 | } | 310 | } |
| 309 | 311 | ||
| 310 | static inline unsigned long cmo_get_page_size(void) | 312 | static inline unsigned long cmo_get_page_size(void) |
| 311 | { | 313 | { |
| 312 | return CMO_PageSize; | 314 | return CMO_PageSize; |
| 313 | } | 315 | } |
| 314 | #endif /* CONFIG_PPC_PSERIES */ | 316 | #endif /* CONFIG_PPC_PSERIES */ |
| 315 | 317 | ||
| 316 | #endif /* __ASSEMBLY__ */ | 318 | #endif /* __ASSEMBLY__ */ |
| 317 | #endif /* __KERNEL__ */ | 319 | #endif /* __KERNEL__ */ |
| 318 | #endif /* _ASM_POWERPC_HVCALL_H */ | 320 | #endif /* _ASM_POWERPC_HVCALL_H */ |
| 319 | 321 |
arch/powerpc/include/asm/reg.h
| 1 | /* | 1 | /* |
| 2 | * Contains the definition of registers common to all PowerPC variants. | 2 | * Contains the definition of registers common to all PowerPC variants. |
| 3 | * If a register definition has been changed in a different PowerPC | 3 | * If a register definition has been changed in a different PowerPC |
| 4 | * variant, we will case it in #ifndef XXX ... #endif, and have the | 4 | * variant, we will case it in #ifndef XXX ... #endif, and have the |
| 5 | * number used in the Programming Environments Manual For 32-Bit | 5 | * number used in the Programming Environments Manual For 32-Bit |
| 6 | * Implementations of the PowerPC Architecture (a.k.a. Green Book) here. | 6 | * Implementations of the PowerPC Architecture (a.k.a. Green Book) here. |
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #ifndef _ASM_POWERPC_REG_H | 9 | #ifndef _ASM_POWERPC_REG_H |
| 10 | #define _ASM_POWERPC_REG_H | 10 | #define _ASM_POWERPC_REG_H |
| 11 | #ifdef __KERNEL__ | 11 | #ifdef __KERNEL__ |
| 12 | 12 | ||
| 13 | #include <linux/stringify.h> | 13 | #include <linux/stringify.h> |
| 14 | #include <asm/cputable.h> | 14 | #include <asm/cputable.h> |
| 15 | 15 | ||
| 16 | /* Pickup Book E specific registers. */ | 16 | /* Pickup Book E specific registers. */ |
| 17 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) | 17 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) |
| 18 | #include <asm/reg_booke.h> | 18 | #include <asm/reg_booke.h> |
| 19 | #endif /* CONFIG_BOOKE || CONFIG_40x */ | 19 | #endif /* CONFIG_BOOKE || CONFIG_40x */ |
| 20 | 20 | ||
| 21 | #ifdef CONFIG_FSL_EMB_PERFMON | 21 | #ifdef CONFIG_FSL_EMB_PERFMON |
| 22 | #include <asm/reg_fsl_emb.h> | 22 | #include <asm/reg_fsl_emb.h> |
| 23 | #endif | 23 | #endif |
| 24 | 24 | ||
| 25 | #ifdef CONFIG_8xx | 25 | #ifdef CONFIG_8xx |
| 26 | #include <asm/reg_8xx.h> | 26 | #include <asm/reg_8xx.h> |
| 27 | #endif /* CONFIG_8xx */ | 27 | #endif /* CONFIG_8xx */ |
| 28 | 28 | ||
| 29 | #define MSR_SF_LG 63 /* Enable 64 bit mode */ | 29 | #define MSR_SF_LG 63 /* Enable 64 bit mode */ |
| 30 | #define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */ | 30 | #define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */ |
| 31 | #define MSR_HV_LG 60 /* Hypervisor state */ | 31 | #define MSR_HV_LG 60 /* Hypervisor state */ |
| 32 | #define MSR_VEC_LG 25 /* Enable AltiVec */ | 32 | #define MSR_VEC_LG 25 /* Enable AltiVec */ |
| 33 | #define MSR_VSX_LG 23 /* Enable VSX */ | 33 | #define MSR_VSX_LG 23 /* Enable VSX */ |
| 34 | #define MSR_POW_LG 18 /* Enable Power Management */ | 34 | #define MSR_POW_LG 18 /* Enable Power Management */ |
| 35 | #define MSR_WE_LG 18 /* Wait State Enable */ | 35 | #define MSR_WE_LG 18 /* Wait State Enable */ |
| 36 | #define MSR_TGPR_LG 17 /* TLB Update registers in use */ | 36 | #define MSR_TGPR_LG 17 /* TLB Update registers in use */ |
| 37 | #define MSR_CE_LG 17 /* Critical Interrupt Enable */ | 37 | #define MSR_CE_LG 17 /* Critical Interrupt Enable */ |
| 38 | #define MSR_ILE_LG 16 /* Interrupt Little Endian */ | 38 | #define MSR_ILE_LG 16 /* Interrupt Little Endian */ |
| 39 | #define MSR_EE_LG 15 /* External Interrupt Enable */ | 39 | #define MSR_EE_LG 15 /* External Interrupt Enable */ |
| 40 | #define MSR_PR_LG 14 /* Problem State / Privilege Level */ | 40 | #define MSR_PR_LG 14 /* Problem State / Privilege Level */ |
| 41 | #define MSR_FP_LG 13 /* Floating Point enable */ | 41 | #define MSR_FP_LG 13 /* Floating Point enable */ |
| 42 | #define MSR_ME_LG 12 /* Machine Check Enable */ | 42 | #define MSR_ME_LG 12 /* Machine Check Enable */ |
| 43 | #define MSR_FE0_LG 11 /* Floating Exception mode 0 */ | 43 | #define MSR_FE0_LG 11 /* Floating Exception mode 0 */ |
| 44 | #define MSR_SE_LG 10 /* Single Step */ | 44 | #define MSR_SE_LG 10 /* Single Step */ |
| 45 | #define MSR_BE_LG 9 /* Branch Trace */ | 45 | #define MSR_BE_LG 9 /* Branch Trace */ |
| 46 | #define MSR_DE_LG 9 /* Debug Exception Enable */ | 46 | #define MSR_DE_LG 9 /* Debug Exception Enable */ |
| 47 | #define MSR_FE1_LG 8 /* Floating Exception mode 1 */ | 47 | #define MSR_FE1_LG 8 /* Floating Exception mode 1 */ |
| 48 | #define MSR_IP_LG 6 /* Exception prefix 0x000/0xFFF */ | 48 | #define MSR_IP_LG 6 /* Exception prefix 0x000/0xFFF */ |
| 49 | #define MSR_IR_LG 5 /* Instruction Relocate */ | 49 | #define MSR_IR_LG 5 /* Instruction Relocate */ |
| 50 | #define MSR_DR_LG 4 /* Data Relocate */ | 50 | #define MSR_DR_LG 4 /* Data Relocate */ |
| 51 | #define MSR_PE_LG 3 /* Protection Enable */ | 51 | #define MSR_PE_LG 3 /* Protection Enable */ |
| 52 | #define MSR_PX_LG 2 /* Protection Exclusive Mode */ | 52 | #define MSR_PX_LG 2 /* Protection Exclusive Mode */ |
| 53 | #define MSR_PMM_LG 2 /* Performance monitor */ | 53 | #define MSR_PMM_LG 2 /* Performance monitor */ |
| 54 | #define MSR_RI_LG 1 /* Recoverable Exception */ | 54 | #define MSR_RI_LG 1 /* Recoverable Exception */ |
| 55 | #define MSR_LE_LG 0 /* Little Endian */ | 55 | #define MSR_LE_LG 0 /* Little Endian */ |
| 56 | 56 | ||
| 57 | #ifdef __ASSEMBLY__ | 57 | #ifdef __ASSEMBLY__ |
| 58 | #define __MASK(X) (1<<(X)) | 58 | #define __MASK(X) (1<<(X)) |
| 59 | #else | 59 | #else |
| 60 | #define __MASK(X) (1UL<<(X)) | 60 | #define __MASK(X) (1UL<<(X)) |
| 61 | #endif | 61 | #endif |
| 62 | 62 | ||
| 63 | #ifdef CONFIG_PPC64 | 63 | #ifdef CONFIG_PPC64 |
| 64 | #define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */ | 64 | #define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */ |
| 65 | #define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */ | 65 | #define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */ |
| 66 | #define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */ | 66 | #define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */ |
| 67 | #else | 67 | #else |
| 68 | /* so tests for these bits fail on 32-bit */ | 68 | /* so tests for these bits fail on 32-bit */ |
| 69 | #define MSR_SF 0 | 69 | #define MSR_SF 0 |
| 70 | #define MSR_ISF 0 | 70 | #define MSR_ISF 0 |
| 71 | #define MSR_HV 0 | 71 | #define MSR_HV 0 |
| 72 | #endif | 72 | #endif |
| 73 | 73 | ||
| 74 | #define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */ | 74 | #define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */ |
| 75 | #define MSR_VSX __MASK(MSR_VSX_LG) /* Enable VSX */ | 75 | #define MSR_VSX __MASK(MSR_VSX_LG) /* Enable VSX */ |
| 76 | #define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */ | 76 | #define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */ |
| 77 | #define MSR_WE __MASK(MSR_WE_LG) /* Wait State Enable */ | 77 | #define MSR_WE __MASK(MSR_WE_LG) /* Wait State Enable */ |
| 78 | #define MSR_TGPR __MASK(MSR_TGPR_LG) /* TLB Update registers in use */ | 78 | #define MSR_TGPR __MASK(MSR_TGPR_LG) /* TLB Update registers in use */ |
| 79 | #define MSR_CE __MASK(MSR_CE_LG) /* Critical Interrupt Enable */ | 79 | #define MSR_CE __MASK(MSR_CE_LG) /* Critical Interrupt Enable */ |
| 80 | #define MSR_ILE __MASK(MSR_ILE_LG) /* Interrupt Little Endian */ | 80 | #define MSR_ILE __MASK(MSR_ILE_LG) /* Interrupt Little Endian */ |
| 81 | #define MSR_EE __MASK(MSR_EE_LG) /* External Interrupt Enable */ | 81 | #define MSR_EE __MASK(MSR_EE_LG) /* External Interrupt Enable */ |
| 82 | #define MSR_PR __MASK(MSR_PR_LG) /* Problem State / Privilege Level */ | 82 | #define MSR_PR __MASK(MSR_PR_LG) /* Problem State / Privilege Level */ |
| 83 | #define MSR_FP __MASK(MSR_FP_LG) /* Floating Point enable */ | 83 | #define MSR_FP __MASK(MSR_FP_LG) /* Floating Point enable */ |
| 84 | #define MSR_ME __MASK(MSR_ME_LG) /* Machine Check Enable */ | 84 | #define MSR_ME __MASK(MSR_ME_LG) /* Machine Check Enable */ |
| 85 | #define MSR_FE0 __MASK(MSR_FE0_LG) /* Floating Exception mode 0 */ | 85 | #define MSR_FE0 __MASK(MSR_FE0_LG) /* Floating Exception mode 0 */ |
| 86 | #define MSR_SE __MASK(MSR_SE_LG) /* Single Step */ | 86 | #define MSR_SE __MASK(MSR_SE_LG) /* Single Step */ |
| 87 | #define MSR_BE __MASK(MSR_BE_LG) /* Branch Trace */ | 87 | #define MSR_BE __MASK(MSR_BE_LG) /* Branch Trace */ |
| 88 | #define MSR_DE __MASK(MSR_DE_LG) /* Debug Exception Enable */ | 88 | #define MSR_DE __MASK(MSR_DE_LG) /* Debug Exception Enable */ |
| 89 | #define MSR_FE1 __MASK(MSR_FE1_LG) /* Floating Exception mode 1 */ | 89 | #define MSR_FE1 __MASK(MSR_FE1_LG) /* Floating Exception mode 1 */ |
| 90 | #define MSR_IP __MASK(MSR_IP_LG) /* Exception prefix 0x000/0xFFF */ | 90 | #define MSR_IP __MASK(MSR_IP_LG) /* Exception prefix 0x000/0xFFF */ |
| 91 | #define MSR_IR __MASK(MSR_IR_LG) /* Instruction Relocate */ | 91 | #define MSR_IR __MASK(MSR_IR_LG) /* Instruction Relocate */ |
| 92 | #define MSR_DR __MASK(MSR_DR_LG) /* Data Relocate */ | 92 | #define MSR_DR __MASK(MSR_DR_LG) /* Data Relocate */ |
| 93 | #define MSR_PE __MASK(MSR_PE_LG) /* Protection Enable */ | 93 | #define MSR_PE __MASK(MSR_PE_LG) /* Protection Enable */ |
| 94 | #define MSR_PX __MASK(MSR_PX_LG) /* Protection Exclusive Mode */ | 94 | #define MSR_PX __MASK(MSR_PX_LG) /* Protection Exclusive Mode */ |
| 95 | #ifndef MSR_PMM | 95 | #ifndef MSR_PMM |
| 96 | #define MSR_PMM __MASK(MSR_PMM_LG) /* Performance monitor */ | 96 | #define MSR_PMM __MASK(MSR_PMM_LG) /* Performance monitor */ |
| 97 | #endif | 97 | #endif |
| 98 | #define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */ | 98 | #define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */ |
| 99 | #define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */ | 99 | #define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */ |
| 100 | 100 | ||
| 101 | #if defined(CONFIG_PPC_BOOK3S_64) | 101 | #if defined(CONFIG_PPC_BOOK3S_64) |
| 102 | /* Server variant */ | 102 | /* Server variant */ |
| 103 | #define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV | 103 | #define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV |
| 104 | #define MSR_KERNEL MSR_ | MSR_SF | 104 | #define MSR_KERNEL MSR_ | MSR_SF |
| 105 | #define MSR_USER32 MSR_ | MSR_PR | MSR_EE | 105 | #define MSR_USER32 MSR_ | MSR_PR | MSR_EE |
| 106 | #define MSR_USER64 MSR_USER32 | MSR_SF | 106 | #define MSR_USER64 MSR_USER32 | MSR_SF |
| 107 | #elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_8xx) | 107 | #elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_8xx) |
| 108 | /* Default MSR for kernel mode. */ | 108 | /* Default MSR for kernel mode. */ |
| 109 | #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR) | 109 | #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR) |
| 110 | #define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) | 110 | #define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) |
| 111 | #endif | 111 | #endif |
| 112 | 112 | ||
| 113 | /* Floating Point Status and Control Register (FPSCR) Fields */ | 113 | /* Floating Point Status and Control Register (FPSCR) Fields */ |
| 114 | #define FPSCR_FX 0x80000000 /* FPU exception summary */ | 114 | #define FPSCR_FX 0x80000000 /* FPU exception summary */ |
| 115 | #define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */ | 115 | #define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */ |
| 116 | #define FPSCR_VX 0x20000000 /* Invalid operation summary */ | 116 | #define FPSCR_VX 0x20000000 /* Invalid operation summary */ |
| 117 | #define FPSCR_OX 0x10000000 /* Overflow exception summary */ | 117 | #define FPSCR_OX 0x10000000 /* Overflow exception summary */ |
| 118 | #define FPSCR_UX 0x08000000 /* Underflow exception summary */ | 118 | #define FPSCR_UX 0x08000000 /* Underflow exception summary */ |
| 119 | #define FPSCR_ZX 0x04000000 /* Zero-divide exception summary */ | 119 | #define FPSCR_ZX 0x04000000 /* Zero-divide exception summary */ |
| 120 | #define FPSCR_XX 0x02000000 /* Inexact exception summary */ | 120 | #define FPSCR_XX 0x02000000 /* Inexact exception summary */ |
| 121 | #define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */ | 121 | #define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */ |
| 122 | #define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */ | 122 | #define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */ |
| 123 | #define FPSCR_VXIDI 0x00400000 /* Invalid op for Inv / Inv */ | 123 | #define FPSCR_VXIDI 0x00400000 /* Invalid op for Inv / Inv */ |
| 124 | #define FPSCR_VXZDZ 0x00200000 /* Invalid op for Zero / Zero */ | 124 | #define FPSCR_VXZDZ 0x00200000 /* Invalid op for Zero / Zero */ |
| 125 | #define FPSCR_VXIMZ 0x00100000 /* Invalid op for Inv * Zero */ | 125 | #define FPSCR_VXIMZ 0x00100000 /* Invalid op for Inv * Zero */ |
| 126 | #define FPSCR_VXVC 0x00080000 /* Invalid op for Compare */ | 126 | #define FPSCR_VXVC 0x00080000 /* Invalid op for Compare */ |
| 127 | #define FPSCR_FR 0x00040000 /* Fraction rounded */ | 127 | #define FPSCR_FR 0x00040000 /* Fraction rounded */ |
| 128 | #define FPSCR_FI 0x00020000 /* Fraction inexact */ | 128 | #define FPSCR_FI 0x00020000 /* Fraction inexact */ |
| 129 | #define FPSCR_FPRF 0x0001f000 /* FPU Result Flags */ | 129 | #define FPSCR_FPRF 0x0001f000 /* FPU Result Flags */ |
| 130 | #define FPSCR_FPCC 0x0000f000 /* FPU Condition Codes */ | 130 | #define FPSCR_FPCC 0x0000f000 /* FPU Condition Codes */ |
| 131 | #define FPSCR_VXSOFT 0x00000400 /* Invalid op for software request */ | 131 | #define FPSCR_VXSOFT 0x00000400 /* Invalid op for software request */ |
| 132 | #define FPSCR_VXSQRT 0x00000200 /* Invalid op for square root */ | 132 | #define FPSCR_VXSQRT 0x00000200 /* Invalid op for square root */ |
| 133 | #define FPSCR_VXCVI 0x00000100 /* Invalid op for integer convert */ | 133 | #define FPSCR_VXCVI 0x00000100 /* Invalid op for integer convert */ |
| 134 | #define FPSCR_VE 0x00000080 /* Invalid op exception enable */ | 134 | #define FPSCR_VE 0x00000080 /* Invalid op exception enable */ |
| 135 | #define FPSCR_OE 0x00000040 /* IEEE overflow exception enable */ | 135 | #define FPSCR_OE 0x00000040 /* IEEE overflow exception enable */ |
| 136 | #define FPSCR_UE 0x00000020 /* IEEE underflow exception enable */ | 136 | #define FPSCR_UE 0x00000020 /* IEEE underflow exception enable */ |
| 137 | #define FPSCR_ZE 0x00000010 /* IEEE zero divide exception enable */ | 137 | #define FPSCR_ZE 0x00000010 /* IEEE zero divide exception enable */ |
| 138 | #define FPSCR_XE 0x00000008 /* FP inexact exception enable */ | 138 | #define FPSCR_XE 0x00000008 /* FP inexact exception enable */ |
| 139 | #define FPSCR_NI 0x00000004 /* FPU non IEEE-Mode */ | 139 | #define FPSCR_NI 0x00000004 /* FPU non IEEE-Mode */ |
| 140 | #define FPSCR_RN 0x00000003 /* FPU rounding control */ | 140 | #define FPSCR_RN 0x00000003 /* FPU rounding control */ |
| 141 | 141 | ||
| 142 | /* Bit definitions for SPEFSCR. */ | 142 | /* Bit definitions for SPEFSCR. */ |
| 143 | #define SPEFSCR_SOVH 0x80000000 /* Summary integer overflow high */ | 143 | #define SPEFSCR_SOVH 0x80000000 /* Summary integer overflow high */ |
| 144 | #define SPEFSCR_OVH 0x40000000 /* Integer overflow high */ | 144 | #define SPEFSCR_OVH 0x40000000 /* Integer overflow high */ |
| 145 | #define SPEFSCR_FGH 0x20000000 /* Embedded FP guard bit high */ | 145 | #define SPEFSCR_FGH 0x20000000 /* Embedded FP guard bit high */ |
| 146 | #define SPEFSCR_FXH 0x10000000 /* Embedded FP sticky bit high */ | 146 | #define SPEFSCR_FXH 0x10000000 /* Embedded FP sticky bit high */ |
| 147 | #define SPEFSCR_FINVH 0x08000000 /* Embedded FP invalid operation high */ | 147 | #define SPEFSCR_FINVH 0x08000000 /* Embedded FP invalid operation high */ |
| 148 | #define SPEFSCR_FDBZH 0x04000000 /* Embedded FP div by zero high */ | 148 | #define SPEFSCR_FDBZH 0x04000000 /* Embedded FP div by zero high */ |
| 149 | #define SPEFSCR_FUNFH 0x02000000 /* Embedded FP underflow high */ | 149 | #define SPEFSCR_FUNFH 0x02000000 /* Embedded FP underflow high */ |
| 150 | #define SPEFSCR_FOVFH 0x01000000 /* Embedded FP overflow high */ | 150 | #define SPEFSCR_FOVFH 0x01000000 /* Embedded FP overflow high */ |
| 151 | #define SPEFSCR_FINXS 0x00200000 /* Embedded FP inexact sticky */ | 151 | #define SPEFSCR_FINXS 0x00200000 /* Embedded FP inexact sticky */ |
| 152 | #define SPEFSCR_FINVS 0x00100000 /* Embedded FP invalid op. sticky */ | 152 | #define SPEFSCR_FINVS 0x00100000 /* Embedded FP invalid op. sticky */ |
| 153 | #define SPEFSCR_FDBZS 0x00080000 /* Embedded FP div by zero sticky */ | 153 | #define SPEFSCR_FDBZS 0x00080000 /* Embedded FP div by zero sticky */ |
| 154 | #define SPEFSCR_FUNFS 0x00040000 /* Embedded FP underflow sticky */ | 154 | #define SPEFSCR_FUNFS 0x00040000 /* Embedded FP underflow sticky */ |
| 155 | #define SPEFSCR_FOVFS 0x00020000 /* Embedded FP overflow sticky */ | 155 | #define SPEFSCR_FOVFS 0x00020000 /* Embedded FP overflow sticky */ |
| 156 | #define SPEFSCR_MODE 0x00010000 /* Embedded FP mode */ | 156 | #define SPEFSCR_MODE 0x00010000 /* Embedded FP mode */ |
| 157 | #define SPEFSCR_SOV 0x00008000 /* Integer summary overflow */ | 157 | #define SPEFSCR_SOV 0x00008000 /* Integer summary overflow */ |
| 158 | #define SPEFSCR_OV 0x00004000 /* Integer overflow */ | 158 | #define SPEFSCR_OV 0x00004000 /* Integer overflow */ |
| 159 | #define SPEFSCR_FG 0x00002000 /* Embedded FP guard bit */ | 159 | #define SPEFSCR_FG 0x00002000 /* Embedded FP guard bit */ |
| 160 | #define SPEFSCR_FX 0x00001000 /* Embedded FP sticky bit */ | 160 | #define SPEFSCR_FX 0x00001000 /* Embedded FP sticky bit */ |
| 161 | #define SPEFSCR_FINV 0x00000800 /* Embedded FP invalid operation */ | 161 | #define SPEFSCR_FINV 0x00000800 /* Embedded FP invalid operation */ |
| 162 | #define SPEFSCR_FDBZ 0x00000400 /* Embedded FP div by zero */ | 162 | #define SPEFSCR_FDBZ 0x00000400 /* Embedded FP div by zero */ |
| 163 | #define SPEFSCR_FUNF 0x00000200 /* Embedded FP underflow */ | 163 | #define SPEFSCR_FUNF 0x00000200 /* Embedded FP underflow */ |
| 164 | #define SPEFSCR_FOVF 0x00000100 /* Embedded FP overflow */ | 164 | #define SPEFSCR_FOVF 0x00000100 /* Embedded FP overflow */ |
| 165 | #define SPEFSCR_FINXE 0x00000040 /* Embedded FP inexact enable */ | 165 | #define SPEFSCR_FINXE 0x00000040 /* Embedded FP inexact enable */ |
| 166 | #define SPEFSCR_FINVE 0x00000020 /* Embedded FP invalid op. enable */ | 166 | #define SPEFSCR_FINVE 0x00000020 /* Embedded FP invalid op. enable */ |
| 167 | #define SPEFSCR_FDBZE 0x00000010 /* Embedded FP div by zero enable */ | 167 | #define SPEFSCR_FDBZE 0x00000010 /* Embedded FP div by zero enable */ |
| 168 | #define SPEFSCR_FUNFE 0x00000008 /* Embedded FP underflow enable */ | 168 | #define SPEFSCR_FUNFE 0x00000008 /* Embedded FP underflow enable */ |
| 169 | #define SPEFSCR_FOVFE 0x00000004 /* Embedded FP overflow enable */ | 169 | #define SPEFSCR_FOVFE 0x00000004 /* Embedded FP overflow enable */ |
| 170 | #define SPEFSCR_FRMC 0x00000003 /* Embedded FP rounding mode control */ | 170 | #define SPEFSCR_FRMC 0x00000003 /* Embedded FP rounding mode control */ |
| 171 | 171 | ||
| 172 | /* Special Purpose Registers (SPRNs)*/ | 172 | /* Special Purpose Registers (SPRNs)*/ |
| 173 | #define SPRN_CTR 0x009 /* Count Register */ | 173 | #define SPRN_CTR 0x009 /* Count Register */ |
| 174 | #define SPRN_DSCR 0x11 | 174 | #define SPRN_DSCR 0x11 |
| 175 | #define SPRN_CTRLF 0x088 | 175 | #define SPRN_CTRLF 0x088 |
| 176 | #define SPRN_CTRLT 0x098 | 176 | #define SPRN_CTRLT 0x098 |
| 177 | #define CTRL_CT 0xc0000000 /* current thread */ | 177 | #define CTRL_CT 0xc0000000 /* current thread */ |
| 178 | #define CTRL_CT0 0x80000000 /* thread 0 */ | 178 | #define CTRL_CT0 0x80000000 /* thread 0 */ |
| 179 | #define CTRL_CT1 0x40000000 /* thread 1 */ | 179 | #define CTRL_CT1 0x40000000 /* thread 1 */ |
| 180 | #define CTRL_TE 0x00c00000 /* thread enable */ | 180 | #define CTRL_TE 0x00c00000 /* thread enable */ |
| 181 | #define CTRL_RUNLATCH 0x1 | 181 | #define CTRL_RUNLATCH 0x1 |
| 182 | #define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */ | 182 | #define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */ |
| 183 | #define DABR_TRANSLATION (1UL << 2) | 183 | #define DABR_TRANSLATION (1UL << 2) |
| 184 | #define DABR_DATA_WRITE (1UL << 1) | 184 | #define DABR_DATA_WRITE (1UL << 1) |
| 185 | #define DABR_DATA_READ (1UL << 0) | 185 | #define DABR_DATA_READ (1UL << 0) |
| 186 | #define SPRN_DABR2 0x13D /* e300 */ | 186 | #define SPRN_DABR2 0x13D /* e300 */ |
| 187 | #define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */ | 187 | #define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */ |
| 188 | #define DABRX_USER (1UL << 0) | 188 | #define DABRX_USER (1UL << 0) |
| 189 | #define DABRX_KERNEL (1UL << 1) | 189 | #define DABRX_KERNEL (1UL << 1) |
| 190 | #define SPRN_DAR 0x013 /* Data Address Register */ | 190 | #define SPRN_DAR 0x013 /* Data Address Register */ |
| 191 | #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ | 191 | #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ |
| 192 | #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ | 192 | #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ |
| 193 | #define DSISR_NOHPTE 0x40000000 /* no translation found */ | 193 | #define DSISR_NOHPTE 0x40000000 /* no translation found */ |
| 194 | #define DSISR_PROTFAULT 0x08000000 /* protection fault */ | 194 | #define DSISR_PROTFAULT 0x08000000 /* protection fault */ |
| 195 | #define DSISR_ISSTORE 0x02000000 /* access was a store */ | 195 | #define DSISR_ISSTORE 0x02000000 /* access was a store */ |
| 196 | #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ | 196 | #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ |
| 197 | #define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */ | 197 | #define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */ |
| 198 | #define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */ | 198 | #define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */ |
| 199 | #define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ | 199 | #define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ |
| 200 | #define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ | 200 | #define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ |
| 201 | #define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */ | 201 | #define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */ |
| 202 | #define SPRN_SPURR 0x134 /* Scaled PURR */ | 202 | #define SPRN_SPURR 0x134 /* Scaled PURR */ |
| 203 | #define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */ | 203 | #define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */ |
| 204 | #define SPRN_LPCR 0x13E /* LPAR Control Register */ | 204 | #define SPRN_LPCR 0x13E /* LPAR Control Register */ |
| 205 | #define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */ | 205 | #define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */ |
| 206 | #define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */ | 206 | #define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */ |
| 207 | #define SPRN_DBAT1L 0x21B /* Data BAT 1 Lower Register */ | 207 | #define SPRN_DBAT1L 0x21B /* Data BAT 1 Lower Register */ |
| 208 | #define SPRN_DBAT1U 0x21A /* Data BAT 1 Upper Register */ | 208 | #define SPRN_DBAT1U 0x21A /* Data BAT 1 Upper Register */ |
| 209 | #define SPRN_DBAT2L 0x21D /* Data BAT 2 Lower Register */ | 209 | #define SPRN_DBAT2L 0x21D /* Data BAT 2 Lower Register */ |
| 210 | #define SPRN_DBAT2U 0x21C /* Data BAT 2 Upper Register */ | 210 | #define SPRN_DBAT2U 0x21C /* Data BAT 2 Upper Register */ |
| 211 | #define SPRN_DBAT3L 0x21F /* Data BAT 3 Lower Register */ | 211 | #define SPRN_DBAT3L 0x21F /* Data BAT 3 Lower Register */ |
| 212 | #define SPRN_DBAT3U 0x21E /* Data BAT 3 Upper Register */ | 212 | #define SPRN_DBAT3U 0x21E /* Data BAT 3 Upper Register */ |
| 213 | #define SPRN_DBAT4L 0x239 /* Data BAT 4 Lower Register */ | 213 | #define SPRN_DBAT4L 0x239 /* Data BAT 4 Lower Register */ |
| 214 | #define SPRN_DBAT4U 0x238 /* Data BAT 4 Upper Register */ | 214 | #define SPRN_DBAT4U 0x238 /* Data BAT 4 Upper Register */ |
| 215 | #define SPRN_DBAT5L 0x23B /* Data BAT 5 Lower Register */ | 215 | #define SPRN_DBAT5L 0x23B /* Data BAT 5 Lower Register */ |
| 216 | #define SPRN_DBAT5U 0x23A /* Data BAT 5 Upper Register */ | 216 | #define SPRN_DBAT5U 0x23A /* Data BAT 5 Upper Register */ |
| 217 | #define SPRN_DBAT6L 0x23D /* Data BAT 6 Lower Register */ | 217 | #define SPRN_DBAT6L 0x23D /* Data BAT 6 Lower Register */ |
| 218 | #define SPRN_DBAT6U 0x23C /* Data BAT 6 Upper Register */ | 218 | #define SPRN_DBAT6U 0x23C /* Data BAT 6 Upper Register */ |
| 219 | #define SPRN_DBAT7L 0x23F /* Data BAT 7 Lower Register */ | 219 | #define SPRN_DBAT7L 0x23F /* Data BAT 7 Lower Register */ |
| 220 | #define SPRN_DBAT7U 0x23E /* Data BAT 7 Upper Register */ | 220 | #define SPRN_DBAT7U 0x23E /* Data BAT 7 Upper Register */ |
| 221 | 221 | ||
| 222 | #define SPRN_DEC 0x016 /* Decrement Register */ | 222 | #define SPRN_DEC 0x016 /* Decrement Register */ |
| 223 | #define SPRN_DER 0x095 /* Debug Enable Regsiter */ | 223 | #define SPRN_DER 0x095 /* Debug Enable Regsiter */ |
| 224 | #define DER_RSTE 0x40000000 /* Reset Interrupt */ | 224 | #define DER_RSTE 0x40000000 /* Reset Interrupt */ |
| 225 | #define DER_CHSTPE 0x20000000 /* Check Stop */ | 225 | #define DER_CHSTPE 0x20000000 /* Check Stop */ |
| 226 | #define DER_MCIE 0x10000000 /* Machine Check Interrupt */ | 226 | #define DER_MCIE 0x10000000 /* Machine Check Interrupt */ |
| 227 | #define DER_EXTIE 0x02000000 /* External Interrupt */ | 227 | #define DER_EXTIE 0x02000000 /* External Interrupt */ |
| 228 | #define DER_ALIE 0x01000000 /* Alignment Interrupt */ | 228 | #define DER_ALIE 0x01000000 /* Alignment Interrupt */ |
| 229 | #define DER_PRIE 0x00800000 /* Program Interrupt */ | 229 | #define DER_PRIE 0x00800000 /* Program Interrupt */ |
| 230 | #define DER_FPUVIE 0x00400000 /* FP Unavailable Interrupt */ | 230 | #define DER_FPUVIE 0x00400000 /* FP Unavailable Interrupt */ |
| 231 | #define DER_DECIE 0x00200000 /* Decrementer Interrupt */ | 231 | #define DER_DECIE 0x00200000 /* Decrementer Interrupt */ |
| 232 | #define DER_SYSIE 0x00040000 /* System Call Interrupt */ | 232 | #define DER_SYSIE 0x00040000 /* System Call Interrupt */ |
| 233 | #define DER_TRE 0x00020000 /* Trace Interrupt */ | 233 | #define DER_TRE 0x00020000 /* Trace Interrupt */ |
| 234 | #define DER_SEIE 0x00004000 /* FP SW Emulation Interrupt */ | 234 | #define DER_SEIE 0x00004000 /* FP SW Emulation Interrupt */ |
| 235 | #define DER_ITLBMSE 0x00002000 /* Imp. Spec. Instruction TLB Miss */ | 235 | #define DER_ITLBMSE 0x00002000 /* Imp. Spec. Instruction TLB Miss */ |
| 236 | #define DER_ITLBERE 0x00001000 /* Imp. Spec. Instruction TLB Error */ | 236 | #define DER_ITLBERE 0x00001000 /* Imp. Spec. Instruction TLB Error */ |
| 237 | #define DER_DTLBMSE 0x00000800 /* Imp. Spec. Data TLB Miss */ | 237 | #define DER_DTLBMSE 0x00000800 /* Imp. Spec. Data TLB Miss */ |
| 238 | #define DER_DTLBERE 0x00000400 /* Imp. Spec. Data TLB Error */ | 238 | #define DER_DTLBERE 0x00000400 /* Imp. Spec. Data TLB Error */ |
| 239 | #define DER_LBRKE 0x00000008 /* Load/Store Breakpoint Interrupt */ | 239 | #define DER_LBRKE 0x00000008 /* Load/Store Breakpoint Interrupt */ |
| 240 | #define DER_IBRKE 0x00000004 /* Instruction Breakpoint Interrupt */ | 240 | #define DER_IBRKE 0x00000004 /* Instruction Breakpoint Interrupt */ |
| 241 | #define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */ | 241 | #define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */ |
| 242 | #define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */ | 242 | #define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */ |
| 243 | #define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */ | 243 | #define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */ |
| 244 | #define SPRN_EAR 0x11A /* External Address Register */ | 244 | #define SPRN_EAR 0x11A /* External Address Register */ |
| 245 | #define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */ | 245 | #define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */ |
| 246 | #define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */ | 246 | #define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */ |
| 247 | #define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */ | 247 | #define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */ |
| 248 | #define HID0_EMCP (1<<31) /* Enable Machine Check pin */ | 248 | #define HID0_EMCP (1<<31) /* Enable Machine Check pin */ |
| 249 | #define HID0_EBA (1<<29) /* Enable Bus Address Parity */ | 249 | #define HID0_EBA (1<<29) /* Enable Bus Address Parity */ |
| 250 | #define HID0_EBD (1<<28) /* Enable Bus Data Parity */ | 250 | #define HID0_EBD (1<<28) /* Enable Bus Data Parity */ |
| 251 | #define HID0_SBCLK (1<<27) | 251 | #define HID0_SBCLK (1<<27) |
| 252 | #define HID0_EICE (1<<26) | 252 | #define HID0_EICE (1<<26) |
| 253 | #define HID0_TBEN (1<<26) /* Timebase enable - 745x */ | 253 | #define HID0_TBEN (1<<26) /* Timebase enable - 745x */ |
| 254 | #define HID0_ECLK (1<<25) | 254 | #define HID0_ECLK (1<<25) |
| 255 | #define HID0_PAR (1<<24) | 255 | #define HID0_PAR (1<<24) |
| 256 | #define HID0_STEN (1<<24) /* Software table search enable - 745x */ | 256 | #define HID0_STEN (1<<24) /* Software table search enable - 745x */ |
| 257 | #define HID0_HIGH_BAT (1<<23) /* Enable high BATs - 7455 */ | 257 | #define HID0_HIGH_BAT (1<<23) /* Enable high BATs - 7455 */ |
| 258 | #define HID0_DOZE (1<<23) | 258 | #define HID0_DOZE (1<<23) |
| 259 | #define HID0_NAP (1<<22) | 259 | #define HID0_NAP (1<<22) |
| 260 | #define HID0_SLEEP (1<<21) | 260 | #define HID0_SLEEP (1<<21) |
| 261 | #define HID0_DPM (1<<20) | 261 | #define HID0_DPM (1<<20) |
| 262 | #define HID0_BHTCLR (1<<18) /* Clear branch history table - 7450 */ | 262 | #define HID0_BHTCLR (1<<18) /* Clear branch history table - 7450 */ |
| 263 | #define HID0_XAEN (1<<17) /* Extended addressing enable - 7450 */ | 263 | #define HID0_XAEN (1<<17) /* Extended addressing enable - 7450 */ |
| 264 | #define HID0_NHR (1<<16) /* Not hard reset (software bit-7450)*/ | 264 | #define HID0_NHR (1<<16) /* Not hard reset (software bit-7450)*/ |
| 265 | #define HID0_ICE (1<<15) /* Instruction Cache Enable */ | 265 | #define HID0_ICE (1<<15) /* Instruction Cache Enable */ |
| 266 | #define HID0_DCE (1<<14) /* Data Cache Enable */ | 266 | #define HID0_DCE (1<<14) /* Data Cache Enable */ |
| 267 | #define HID0_ILOCK (1<<13) /* Instruction Cache Lock */ | 267 | #define HID0_ILOCK (1<<13) /* Instruction Cache Lock */ |
| 268 | #define HID0_DLOCK (1<<12) /* Data Cache Lock */ | 268 | #define HID0_DLOCK (1<<12) /* Data Cache Lock */ |
| 269 | #define HID0_ICFI (1<<11) /* Instr. Cache Flash Invalidate */ | 269 | #define HID0_ICFI (1<<11) /* Instr. Cache Flash Invalidate */ |
| 270 | #define HID0_DCI (1<<10) /* Data Cache Invalidate */ | 270 | #define HID0_DCI (1<<10) /* Data Cache Invalidate */ |
| 271 | #define HID0_SPD (1<<9) /* Speculative disable */ | 271 | #define HID0_SPD (1<<9) /* Speculative disable */ |
| 272 | #define HID0_DAPUEN (1<<8) /* Debug APU enable */ | 272 | #define HID0_DAPUEN (1<<8) /* Debug APU enable */ |
| 273 | #define HID0_SGE (1<<7) /* Store Gathering Enable */ | 273 | #define HID0_SGE (1<<7) /* Store Gathering Enable */ |
| 274 | #define HID0_SIED (1<<7) /* Serial Instr. Execution [Disable] */ | 274 | #define HID0_SIED (1<<7) /* Serial Instr. Execution [Disable] */ |
| 275 | #define HID0_DCFA (1<<6) /* Data Cache Flush Assist */ | 275 | #define HID0_DCFA (1<<6) /* Data Cache Flush Assist */ |
| 276 | #define HID0_LRSTK (1<<4) /* Link register stack - 745x */ | 276 | #define HID0_LRSTK (1<<4) /* Link register stack - 745x */ |
| 277 | #define HID0_BTIC (1<<5) /* Branch Target Instr Cache Enable */ | 277 | #define HID0_BTIC (1<<5) /* Branch Target Instr Cache Enable */ |
| 278 | #define HID0_ABE (1<<3) /* Address Broadcast Enable */ | 278 | #define HID0_ABE (1<<3) /* Address Broadcast Enable */ |
| 279 | #define HID0_FOLD (1<<3) /* Branch Folding enable - 745x */ | 279 | #define HID0_FOLD (1<<3) /* Branch Folding enable - 745x */ |
| 280 | #define HID0_BHTE (1<<2) /* Branch History Table Enable */ | 280 | #define HID0_BHTE (1<<2) /* Branch History Table Enable */ |
| 281 | #define HID0_BTCD (1<<1) /* Branch target cache disable */ | 281 | #define HID0_BTCD (1<<1) /* Branch target cache disable */ |
| 282 | #define HID0_NOPDST (1<<1) /* No-op dst, dstt, etc. instr. */ | 282 | #define HID0_NOPDST (1<<1) /* No-op dst, dstt, etc. instr. */ |
| 283 | #define HID0_NOPTI (1<<0) /* No-op dcbt and dcbst instr. */ | 283 | #define HID0_NOPTI (1<<0) /* No-op dcbt and dcbst instr. */ |
| 284 | 284 | ||
| 285 | #define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */ | 285 | #define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */ |
| 286 | #define HID1_EMCP (1<<31) /* 7450 Machine Check Pin Enable */ | 286 | #define HID1_EMCP (1<<31) /* 7450 Machine Check Pin Enable */ |
| 287 | #define HID1_DFS (1<<22) /* 7447A Dynamic Frequency Scaling */ | 287 | #define HID1_DFS (1<<22) /* 7447A Dynamic Frequency Scaling */ |
| 288 | #define HID1_PC0 (1<<16) /* 7450 PLL_CFG[0] */ | 288 | #define HID1_PC0 (1<<16) /* 7450 PLL_CFG[0] */ |
| 289 | #define HID1_PC1 (1<<15) /* 7450 PLL_CFG[1] */ | 289 | #define HID1_PC1 (1<<15) /* 7450 PLL_CFG[1] */ |
| 290 | #define HID1_PC2 (1<<14) /* 7450 PLL_CFG[2] */ | 290 | #define HID1_PC2 (1<<14) /* 7450 PLL_CFG[2] */ |
| 291 | #define HID1_PC3 (1<<13) /* 7450 PLL_CFG[3] */ | 291 | #define HID1_PC3 (1<<13) /* 7450 PLL_CFG[3] */ |
| 292 | #define HID1_SYNCBE (1<<11) /* 7450 ABE for sync, eieio */ | 292 | #define HID1_SYNCBE (1<<11) /* 7450 ABE for sync, eieio */ |
| 293 | #define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */ | 293 | #define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */ |
| 294 | #define HID1_PS (1<<16) /* 750FX PLL selection */ | 294 | #define HID1_PS (1<<16) /* 750FX PLL selection */ |
| 295 | #define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */ | 295 | #define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */ |
| 296 | #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ | 296 | #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ |
| 297 | #define SPRN_IABR2 0x3FA /* 83xx */ | 297 | #define SPRN_IABR2 0x3FA /* 83xx */ |
| 298 | #define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */ | 298 | #define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */ |
| 299 | #define SPRN_HID4 0x3F4 /* 970 HID4 */ | 299 | #define SPRN_HID4 0x3F4 /* 970 HID4 */ |
| 300 | #define SPRN_HID5 0x3F6 /* 970 HID5 */ | 300 | #define SPRN_HID5 0x3F6 /* 970 HID5 */ |
| 301 | #define SPRN_HID6 0x3F9 /* BE HID 6 */ | 301 | #define SPRN_HID6 0x3F9 /* BE HID 6 */ |
| 302 | #define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */ | 302 | #define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */ |
| 303 | #define HID6_DLP (1<<20) /* Disable all large page modes (4K only) */ | 303 | #define HID6_DLP (1<<20) /* Disable all large page modes (4K only) */ |
| 304 | #define SPRN_TSC_CELL 0x399 /* Thread switch control on Cell */ | 304 | #define SPRN_TSC_CELL 0x399 /* Thread switch control on Cell */ |
| 305 | #define TSC_CELL_DEC_ENABLE_0 0x400000 /* Decrementer Interrupt */ | 305 | #define TSC_CELL_DEC_ENABLE_0 0x400000 /* Decrementer Interrupt */ |
| 306 | #define TSC_CELL_DEC_ENABLE_1 0x200000 /* Decrementer Interrupt */ | 306 | #define TSC_CELL_DEC_ENABLE_1 0x200000 /* Decrementer Interrupt */ |
| 307 | #define TSC_CELL_EE_ENABLE 0x100000 /* External Interrupt */ | 307 | #define TSC_CELL_EE_ENABLE 0x100000 /* External Interrupt */ |
| 308 | #define TSC_CELL_EE_BOOST 0x080000 /* External Interrupt Boost */ | 308 | #define TSC_CELL_EE_BOOST 0x080000 /* External Interrupt Boost */ |
| 309 | #define SPRN_TSC 0x3FD /* Thread switch control on others */ | 309 | #define SPRN_TSC 0x3FD /* Thread switch control on others */ |
| 310 | #define SPRN_TST 0x3FC /* Thread switch timeout on others */ | 310 | #define SPRN_TST 0x3FC /* Thread switch timeout on others */ |
| 311 | #if !defined(SPRN_IAC1) && !defined(SPRN_IAC2) | 311 | #if !defined(SPRN_IAC1) && !defined(SPRN_IAC2) |
| 312 | #define SPRN_IAC1 0x3F4 /* Instruction Address Compare 1 */ | 312 | #define SPRN_IAC1 0x3F4 /* Instruction Address Compare 1 */ |
| 313 | #define SPRN_IAC2 0x3F5 /* Instruction Address Compare 2 */ | 313 | #define SPRN_IAC2 0x3F5 /* Instruction Address Compare 2 */ |
| 314 | #endif | 314 | #endif |
| 315 | #define SPRN_IBAT0L 0x211 /* Instruction BAT 0 Lower Register */ | 315 | #define SPRN_IBAT0L 0x211 /* Instruction BAT 0 Lower Register */ |
| 316 | #define SPRN_IBAT0U 0x210 /* Instruction BAT 0 Upper Register */ | 316 | #define SPRN_IBAT0U 0x210 /* Instruction BAT 0 Upper Register */ |
| 317 | #define SPRN_IBAT1L 0x213 /* Instruction BAT 1 Lower Register */ | 317 | #define SPRN_IBAT1L 0x213 /* Instruction BAT 1 Lower Register */ |
| 318 | #define SPRN_IBAT1U 0x212 /* Instruction BAT 1 Upper Register */ | 318 | #define SPRN_IBAT1U 0x212 /* Instruction BAT 1 Upper Register */ |
| 319 | #define SPRN_IBAT2L 0x215 /* Instruction BAT 2 Lower Register */ | 319 | #define SPRN_IBAT2L 0x215 /* Instruction BAT 2 Lower Register */ |
| 320 | #define SPRN_IBAT2U 0x214 /* Instruction BAT 2 Upper Register */ | 320 | #define SPRN_IBAT2U 0x214 /* Instruction BAT 2 Upper Register */ |
| 321 | #define SPRN_IBAT3L 0x217 /* Instruction BAT 3 Lower Register */ | 321 | #define SPRN_IBAT3L 0x217 /* Instruction BAT 3 Lower Register */ |
| 322 | #define SPRN_IBAT3U 0x216 /* Instruction BAT 3 Upper Register */ | 322 | #define SPRN_IBAT3U 0x216 /* Instruction BAT 3 Upper Register */ |
| 323 | #define SPRN_IBAT4L 0x231 /* Instruction BAT 4 Lower Register */ | 323 | #define SPRN_IBAT4L 0x231 /* Instruction BAT 4 Lower Register */ |
| 324 | #define SPRN_IBAT4U 0x230 /* Instruction BAT 4 Upper Register */ | 324 | #define SPRN_IBAT4U 0x230 /* Instruction BAT 4 Upper Register */ |
| 325 | #define SPRN_IBAT5L 0x233 /* Instruction BAT 5 Lower Register */ | 325 | #define SPRN_IBAT5L 0x233 /* Instruction BAT 5 Lower Register */ |
| 326 | #define SPRN_IBAT5U 0x232 /* Instruction BAT 5 Upper Register */ | 326 | #define SPRN_IBAT5U 0x232 /* Instruction BAT 5 Upper Register */ |
| 327 | #define SPRN_IBAT6L 0x235 /* Instruction BAT 6 Lower Register */ | 327 | #define SPRN_IBAT6L 0x235 /* Instruction BAT 6 Lower Register */ |
| 328 | #define SPRN_IBAT6U 0x234 /* Instruction BAT 6 Upper Register */ | 328 | #define SPRN_IBAT6U 0x234 /* Instruction BAT 6 Upper Register */ |
| 329 | #define SPRN_IBAT7L 0x237 /* Instruction BAT 7 Lower Register */ | 329 | #define SPRN_IBAT7L 0x237 /* Instruction BAT 7 Lower Register */ |
| 330 | #define SPRN_IBAT7U 0x236 /* Instruction BAT 7 Upper Register */ | 330 | #define SPRN_IBAT7U 0x236 /* Instruction BAT 7 Upper Register */ |
| 331 | #define SPRN_ICMP 0x3D5 /* Instruction TLB Compare Register */ | 331 | #define SPRN_ICMP 0x3D5 /* Instruction TLB Compare Register */ |
| 332 | #define SPRN_ICTC 0x3FB /* Instruction Cache Throttling Control Reg */ | 332 | #define SPRN_ICTC 0x3FB /* Instruction Cache Throttling Control Reg */ |
| 333 | #define SPRN_ICTRL 0x3F3 /* 1011 7450 icache and interrupt ctrl */ | 333 | #define SPRN_ICTRL 0x3F3 /* 1011 7450 icache and interrupt ctrl */ |
| 334 | #define ICTRL_EICE 0x08000000 /* enable icache parity errs */ | 334 | #define ICTRL_EICE 0x08000000 /* enable icache parity errs */ |
| 335 | #define ICTRL_EDC 0x04000000 /* enable dcache parity errs */ | 335 | #define ICTRL_EDC 0x04000000 /* enable dcache parity errs */ |
| 336 | #define ICTRL_EICP 0x00000100 /* enable icache par. check */ | 336 | #define ICTRL_EICP 0x00000100 /* enable icache par. check */ |
| 337 | #define SPRN_IMISS 0x3D4 /* Instruction TLB Miss Register */ | 337 | #define SPRN_IMISS 0x3D4 /* Instruction TLB Miss Register */ |
| 338 | #define SPRN_IMMR 0x27E /* Internal Memory Map Register */ | 338 | #define SPRN_IMMR 0x27E /* Internal Memory Map Register */ |
| 339 | #define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Regsiter */ | 339 | #define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Regsiter */ |
| 340 | #define SPRN_L2CR2 0x3f8 | 340 | #define SPRN_L2CR2 0x3f8 |
| 341 | #define L2CR_L2E 0x80000000 /* L2 enable */ | 341 | #define L2CR_L2E 0x80000000 /* L2 enable */ |
| 342 | #define L2CR_L2PE 0x40000000 /* L2 parity enable */ | 342 | #define L2CR_L2PE 0x40000000 /* L2 parity enable */ |
| 343 | #define L2CR_L2SIZ_MASK 0x30000000 /* L2 size mask */ | 343 | #define L2CR_L2SIZ_MASK 0x30000000 /* L2 size mask */ |
| 344 | #define L2CR_L2SIZ_256KB 0x10000000 /* L2 size 256KB */ | 344 | #define L2CR_L2SIZ_256KB 0x10000000 /* L2 size 256KB */ |
| 345 | #define L2CR_L2SIZ_512KB 0x20000000 /* L2 size 512KB */ | 345 | #define L2CR_L2SIZ_512KB 0x20000000 /* L2 size 512KB */ |
| 346 | #define L2CR_L2SIZ_1MB 0x30000000 /* L2 size 1MB */ | 346 | #define L2CR_L2SIZ_1MB 0x30000000 /* L2 size 1MB */ |
| 347 | #define L2CR_L2CLK_MASK 0x0e000000 /* L2 clock mask */ | 347 | #define L2CR_L2CLK_MASK 0x0e000000 /* L2 clock mask */ |
| 348 | #define L2CR_L2CLK_DISABLED 0x00000000 /* L2 clock disabled */ | 348 | #define L2CR_L2CLK_DISABLED 0x00000000 /* L2 clock disabled */ |
| 349 | #define L2CR_L2CLK_DIV1 0x02000000 /* L2 clock / 1 */ | 349 | #define L2CR_L2CLK_DIV1 0x02000000 /* L2 clock / 1 */ |
| 350 | #define L2CR_L2CLK_DIV1_5 0x04000000 /* L2 clock / 1.5 */ | 350 | #define L2CR_L2CLK_DIV1_5 0x04000000 /* L2 clock / 1.5 */ |
| 351 | #define L2CR_L2CLK_DIV2 0x08000000 /* L2 clock / 2 */ | 351 | #define L2CR_L2CLK_DIV2 0x08000000 /* L2 clock / 2 */ |
| 352 | #define L2CR_L2CLK_DIV2_5 0x0a000000 /* L2 clock / 2.5 */ | 352 | #define L2CR_L2CLK_DIV2_5 0x0a000000 /* L2 clock / 2.5 */ |
| 353 | #define L2CR_L2CLK_DIV3 0x0c000000 /* L2 clock / 3 */ | 353 | #define L2CR_L2CLK_DIV3 0x0c000000 /* L2 clock / 3 */ |
| 354 | #define L2CR_L2RAM_MASK 0x01800000 /* L2 RAM type mask */ | 354 | #define L2CR_L2RAM_MASK 0x01800000 /* L2 RAM type mask */ |
| 355 | #define L2CR_L2RAM_FLOW 0x00000000 /* L2 RAM flow through */ | 355 | #define L2CR_L2RAM_FLOW 0x00000000 /* L2 RAM flow through */ |
| 356 | #define L2CR_L2RAM_PIPE 0x01000000 /* L2 RAM pipelined */ | 356 | #define L2CR_L2RAM_PIPE 0x01000000 /* L2 RAM pipelined */ |
| 357 | #define L2CR_L2RAM_PIPE_LW 0x01800000 /* L2 RAM pipelined latewr */ | 357 | #define L2CR_L2RAM_PIPE_LW 0x01800000 /* L2 RAM pipelined latewr */ |
| 358 | #define L2CR_L2DO 0x00400000 /* L2 data only */ | 358 | #define L2CR_L2DO 0x00400000 /* L2 data only */ |
| 359 | #define L2CR_L2I 0x00200000 /* L2 global invalidate */ | 359 | #define L2CR_L2I 0x00200000 /* L2 global invalidate */ |
| 360 | #define L2CR_L2CTL 0x00100000 /* L2 RAM control */ | 360 | #define L2CR_L2CTL 0x00100000 /* L2 RAM control */ |
| 361 | #define L2CR_L2WT 0x00080000 /* L2 write-through */ | 361 | #define L2CR_L2WT 0x00080000 /* L2 write-through */ |
| 362 | #define L2CR_L2TS 0x00040000 /* L2 test support */ | 362 | #define L2CR_L2TS 0x00040000 /* L2 test support */ |
| 363 | #define L2CR_L2OH_MASK 0x00030000 /* L2 output hold mask */ | 363 | #define L2CR_L2OH_MASK 0x00030000 /* L2 output hold mask */ |
| 364 | #define L2CR_L2OH_0_5 0x00000000 /* L2 output hold 0.5 ns */ | 364 | #define L2CR_L2OH_0_5 0x00000000 /* L2 output hold 0.5 ns */ |
| 365 | #define L2CR_L2OH_1_0 0x00010000 /* L2 output hold 1.0 ns */ | 365 | #define L2CR_L2OH_1_0 0x00010000 /* L2 output hold 1.0 ns */ |
| 366 | #define L2CR_L2SL 0x00008000 /* L2 DLL slow */ | 366 | #define L2CR_L2SL 0x00008000 /* L2 DLL slow */ |
| 367 | #define L2CR_L2DF 0x00004000 /* L2 differential clock */ | 367 | #define L2CR_L2DF 0x00004000 /* L2 differential clock */ |
| 368 | #define L2CR_L2BYP 0x00002000 /* L2 DLL bypass */ | 368 | #define L2CR_L2BYP 0x00002000 /* L2 DLL bypass */ |
| 369 | #define L2CR_L2IP 0x00000001 /* L2 GI in progress */ | 369 | #define L2CR_L2IP 0x00000001 /* L2 GI in progress */ |
| 370 | #define L2CR_L2IO_745x 0x00100000 /* L2 instr. only (745x) */ | 370 | #define L2CR_L2IO_745x 0x00100000 /* L2 instr. only (745x) */ |
| 371 | #define L2CR_L2DO_745x 0x00010000 /* L2 data only (745x) */ | 371 | #define L2CR_L2DO_745x 0x00010000 /* L2 data only (745x) */ |
| 372 | #define L2CR_L2REP_745x 0x00001000 /* L2 repl. algorithm (745x) */ | 372 | #define L2CR_L2REP_745x 0x00001000 /* L2 repl. algorithm (745x) */ |
| 373 | #define L2CR_L2HWF_745x 0x00000800 /* L2 hardware flush (745x) */ | 373 | #define L2CR_L2HWF_745x 0x00000800 /* L2 hardware flush (745x) */ |
| 374 | #define SPRN_L3CR 0x3FA /* Level 3 Cache Control Regsiter */ | 374 | #define SPRN_L3CR 0x3FA /* Level 3 Cache Control Regsiter */ |
| 375 | #define L3CR_L3E 0x80000000 /* L3 enable */ | 375 | #define L3CR_L3E 0x80000000 /* L3 enable */ |
| 376 | #define L3CR_L3PE 0x40000000 /* L3 data parity enable */ | 376 | #define L3CR_L3PE 0x40000000 /* L3 data parity enable */ |
| 377 | #define L3CR_L3APE 0x20000000 /* L3 addr parity enable */ | 377 | #define L3CR_L3APE 0x20000000 /* L3 addr parity enable */ |
| 378 | #define L3CR_L3SIZ 0x10000000 /* L3 size */ | 378 | #define L3CR_L3SIZ 0x10000000 /* L3 size */ |
| 379 | #define L3CR_L3CLKEN 0x08000000 /* L3 clock enable */ | 379 | #define L3CR_L3CLKEN 0x08000000 /* L3 clock enable */ |
| 380 | #define L3CR_L3RES 0x04000000 /* L3 special reserved bit */ | 380 | #define L3CR_L3RES 0x04000000 /* L3 special reserved bit */ |
| 381 | #define L3CR_L3CLKDIV 0x03800000 /* L3 clock divisor */ | 381 | #define L3CR_L3CLKDIV 0x03800000 /* L3 clock divisor */ |
| 382 | #define L3CR_L3IO 0x00400000 /* L3 instruction only */ | 382 | #define L3CR_L3IO 0x00400000 /* L3 instruction only */ |
| 383 | #define L3CR_L3SPO 0x00040000 /* L3 sample point override */ | 383 | #define L3CR_L3SPO 0x00040000 /* L3 sample point override */ |
| 384 | #define L3CR_L3CKSP 0x00030000 /* L3 clock sample point */ | 384 | #define L3CR_L3CKSP 0x00030000 /* L3 clock sample point */ |
| 385 | #define L3CR_L3PSP 0x0000e000 /* L3 P-clock sample point */ | 385 | #define L3CR_L3PSP 0x0000e000 /* L3 P-clock sample point */ |
| 386 | #define L3CR_L3REP 0x00001000 /* L3 replacement algorithm */ | 386 | #define L3CR_L3REP 0x00001000 /* L3 replacement algorithm */ |
| 387 | #define L3CR_L3HWF 0x00000800 /* L3 hardware flush */ | 387 | #define L3CR_L3HWF 0x00000800 /* L3 hardware flush */ |
| 388 | #define L3CR_L3I 0x00000400 /* L3 global invalidate */ | 388 | #define L3CR_L3I 0x00000400 /* L3 global invalidate */ |
| 389 | #define L3CR_L3RT 0x00000300 /* L3 SRAM type */ | 389 | #define L3CR_L3RT 0x00000300 /* L3 SRAM type */ |
| 390 | #define L3CR_L3NIRCA 0x00000080 /* L3 non-integer ratio clock adj. */ | 390 | #define L3CR_L3NIRCA 0x00000080 /* L3 non-integer ratio clock adj. */ |
| 391 | #define L3CR_L3DO 0x00000040 /* L3 data only mode */ | 391 | #define L3CR_L3DO 0x00000040 /* L3 data only mode */ |
| 392 | #define L3CR_PMEN 0x00000004 /* L3 private memory enable */ | 392 | #define L3CR_PMEN 0x00000004 /* L3 private memory enable */ |
| 393 | #define L3CR_PMSIZ 0x00000001 /* L3 private memory size */ | 393 | #define L3CR_PMSIZ 0x00000001 /* L3 private memory size */ |
| 394 | 394 | ||
| 395 | #define SPRN_MSSCR0 0x3f6 /* Memory Subsystem Control Register 0 */ | 395 | #define SPRN_MSSCR0 0x3f6 /* Memory Subsystem Control Register 0 */ |
| 396 | #define SPRN_MSSSR0 0x3f7 /* Memory Subsystem Status Register 1 */ | 396 | #define SPRN_MSSSR0 0x3f7 /* Memory Subsystem Status Register 1 */ |
| 397 | #define SPRN_LDSTCR 0x3f8 /* Load/Store control register */ | 397 | #define SPRN_LDSTCR 0x3f8 /* Load/Store control register */ |
| 398 | #define SPRN_LDSTDB 0x3f4 /* */ | 398 | #define SPRN_LDSTDB 0x3f4 /* */ |
| 399 | #define SPRN_LR 0x008 /* Link Register */ | 399 | #define SPRN_LR 0x008 /* Link Register */ |
| 400 | #ifndef SPRN_PIR | 400 | #ifndef SPRN_PIR |
| 401 | #define SPRN_PIR 0x3FF /* Processor Identification Register */ | 401 | #define SPRN_PIR 0x3FF /* Processor Identification Register */ |
| 402 | #endif | 402 | #endif |
| 403 | #define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */ | 403 | #define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */ |
| 404 | #define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */ | 404 | #define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */ |
| 405 | #define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */ | 405 | #define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */ |
| 406 | #define SPRN_PVR 0x11F /* Processor Version Register */ | 406 | #define SPRN_PVR 0x11F /* Processor Version Register */ |
| 407 | #define SPRN_RPA 0x3D6 /* Required Physical Address Register */ | 407 | #define SPRN_RPA 0x3D6 /* Required Physical Address Register */ |
| 408 | #define SPRN_SDA 0x3BF /* Sampled Data Address Register */ | 408 | #define SPRN_SDA 0x3BF /* Sampled Data Address Register */ |
| 409 | #define SPRN_SDR1 0x019 /* MMU Hash Base Register */ | 409 | #define SPRN_SDR1 0x019 /* MMU Hash Base Register */ |
| 410 | #define SPRN_ASR 0x118 /* Address Space Register */ | 410 | #define SPRN_ASR 0x118 /* Address Space Register */ |
| 411 | #define SPRN_SIA 0x3BB /* Sampled Instruction Address Register */ | 411 | #define SPRN_SIA 0x3BB /* Sampled Instruction Address Register */ |
| 412 | #define SPRN_SPRG0 0x110 /* Special Purpose Register General 0 */ | 412 | #define SPRN_SPRG0 0x110 /* Special Purpose Register General 0 */ |
| 413 | #define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */ | 413 | #define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */ |
| 414 | #define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */ | 414 | #define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */ |
| 415 | #define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */ | 415 | #define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */ |
| 416 | #define SPRN_SPRG4 0x114 /* Special Purpose Register General 4 */ | 416 | #define SPRN_SPRG4 0x114 /* Special Purpose Register General 4 */ |
| 417 | #define SPRN_SPRG5 0x115 /* Special Purpose Register General 5 */ | 417 | #define SPRN_SPRG5 0x115 /* Special Purpose Register General 5 */ |
| 418 | #define SPRN_SPRG6 0x116 /* Special Purpose Register General 6 */ | 418 | #define SPRN_SPRG6 0x116 /* Special Purpose Register General 6 */ |
| 419 | #define SPRN_SPRG7 0x117 /* Special Purpose Register General 7 */ | 419 | #define SPRN_SPRG7 0x117 /* Special Purpose Register General 7 */ |
| 420 | #define SPRN_SRR0 0x01A /* Save/Restore Register 0 */ | 420 | #define SPRN_SRR0 0x01A /* Save/Restore Register 0 */ |
| 421 | #define SPRN_SRR1 0x01B /* Save/Restore Register 1 */ | 421 | #define SPRN_SRR1 0x01B /* Save/Restore Register 1 */ |
| 422 | #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ | 422 | #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ |
| 423 | #define SRR1_WAKERESET 0x00380000 /* System reset */ | 423 | #define SRR1_WAKERESET 0x00380000 /* System reset */ |
| 424 | #define SRR1_WAKESYSERR 0x00300000 /* System error */ | 424 | #define SRR1_WAKESYSERR 0x00300000 /* System error */ |
| 425 | #define SRR1_WAKEEE 0x00200000 /* External interrupt */ | 425 | #define SRR1_WAKEEE 0x00200000 /* External interrupt */ |
| 426 | #define SRR1_WAKEMT 0x00280000 /* mtctrl */ | 426 | #define SRR1_WAKEMT 0x00280000 /* mtctrl */ |
| 427 | #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ | 427 | #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ |
| 428 | #define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */ | 428 | #define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */ |
| 429 | #define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */ | 429 | #define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */ |
| 430 | #define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */ | 430 | #define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */ |
| 431 | 431 | ||
| 432 | #define SPRN_TBCTL 0x35f /* PA6T Timebase control register */ | 432 | #define SPRN_TBCTL 0x35f /* PA6T Timebase control register */ |
| 433 | #define TBCTL_FREEZE 0x0000000000000000ull /* Freeze all tbs */ | 433 | #define TBCTL_FREEZE 0x0000000000000000ull /* Freeze all tbs */ |
| 434 | #define TBCTL_RESTART 0x0000000100000000ull /* Restart all tbs */ | 434 | #define TBCTL_RESTART 0x0000000100000000ull /* Restart all tbs */ |
| 435 | #define TBCTL_UPDATE_UPPER 0x0000000200000000ull /* Set upper 32 bits */ | 435 | #define TBCTL_UPDATE_UPPER 0x0000000200000000ull /* Set upper 32 bits */ |
| 436 | #define TBCTL_UPDATE_LOWER 0x0000000300000000ull /* Set lower 32 bits */ | 436 | #define TBCTL_UPDATE_LOWER 0x0000000300000000ull /* Set lower 32 bits */ |
| 437 | 437 | ||
| 438 | #ifndef SPRN_SVR | 438 | #ifndef SPRN_SVR |
| 439 | #define SPRN_SVR 0x11E /* System Version Register */ | 439 | #define SPRN_SVR 0x11E /* System Version Register */ |
| 440 | #endif | 440 | #endif |
| 441 | #define SPRN_THRM1 0x3FC /* Thermal Management Register 1 */ | 441 | #define SPRN_THRM1 0x3FC /* Thermal Management Register 1 */ |
| 442 | /* these bits were defined in inverted endian sense originally, ugh, confusing */ | 442 | /* these bits were defined in inverted endian sense originally, ugh, confusing */ |
| 443 | #define THRM1_TIN (1 << 31) | 443 | #define THRM1_TIN (1 << 31) |
| 444 | #define THRM1_TIV (1 << 30) | 444 | #define THRM1_TIV (1 << 30) |
| 445 | #define THRM1_THRES(x) ((x&0x7f)<<23) | 445 | #define THRM1_THRES(x) ((x&0x7f)<<23) |
| 446 | #define THRM3_SITV(x) ((x&0x3fff)<<1) | 446 | #define THRM3_SITV(x) ((x&0x3fff)<<1) |
| 447 | #define THRM1_TID (1<<2) | 447 | #define THRM1_TID (1<<2) |
| 448 | #define THRM1_TIE (1<<1) | 448 | #define THRM1_TIE (1<<1) |
| 449 | #define THRM1_V (1<<0) | 449 | #define THRM1_V (1<<0) |
| 450 | #define SPRN_THRM2 0x3FD /* Thermal Management Register 2 */ | 450 | #define SPRN_THRM2 0x3FD /* Thermal Management Register 2 */ |
| 451 | #define SPRN_THRM3 0x3FE /* Thermal Management Register 3 */ | 451 | #define SPRN_THRM3 0x3FE /* Thermal Management Register 3 */ |
| 452 | #define THRM3_E (1<<0) | 452 | #define THRM3_E (1<<0) |
| 453 | #define SPRN_TLBMISS 0x3D4 /* 980 7450 TLB Miss Register */ | 453 | #define SPRN_TLBMISS 0x3D4 /* 980 7450 TLB Miss Register */ |
| 454 | #define SPRN_UMMCR0 0x3A8 /* User Monitor Mode Control Register 0 */ | 454 | #define SPRN_UMMCR0 0x3A8 /* User Monitor Mode Control Register 0 */ |
| 455 | #define SPRN_UMMCR1 0x3AC /* User Monitor Mode Control Register 0 */ | 455 | #define SPRN_UMMCR1 0x3AC /* User Monitor Mode Control Register 0 */ |
| 456 | #define SPRN_UPMC1 0x3A9 /* User Performance Counter Register 1 */ | 456 | #define SPRN_UPMC1 0x3A9 /* User Performance Counter Register 1 */ |
| 457 | #define SPRN_UPMC2 0x3AA /* User Performance Counter Register 2 */ | 457 | #define SPRN_UPMC2 0x3AA /* User Performance Counter Register 2 */ |
| 458 | #define SPRN_UPMC3 0x3AD /* User Performance Counter Register 3 */ | 458 | #define SPRN_UPMC3 0x3AD /* User Performance Counter Register 3 */ |
| 459 | #define SPRN_UPMC4 0x3AE /* User Performance Counter Register 4 */ | 459 | #define SPRN_UPMC4 0x3AE /* User Performance Counter Register 4 */ |
| 460 | #define SPRN_USIA 0x3AB /* User Sampled Instruction Address Register */ | 460 | #define SPRN_USIA 0x3AB /* User Sampled Instruction Address Register */ |
| 461 | #define SPRN_VRSAVE 0x100 /* Vector Register Save Register */ | 461 | #define SPRN_VRSAVE 0x100 /* Vector Register Save Register */ |
| 462 | #define SPRN_XER 0x001 /* Fixed Point Exception Register */ | 462 | #define SPRN_XER 0x001 /* Fixed Point Exception Register */ |
| 463 | 463 | ||
| 464 | #define SPRN_SCOMC 0x114 /* SCOM Access Control */ | 464 | #define SPRN_SCOMC 0x114 /* SCOM Access Control */ |
| 465 | #define SPRN_SCOMD 0x115 /* SCOM Access DATA */ | 465 | #define SPRN_SCOMD 0x115 /* SCOM Access DATA */ |
| 466 | 466 | ||
| 467 | /* Performance monitor SPRs */ | 467 | /* Performance monitor SPRs */ |
| 468 | #ifdef CONFIG_PPC64 | 468 | #ifdef CONFIG_PPC64 |
| 469 | #define SPRN_MMCR0 795 | 469 | #define SPRN_MMCR0 795 |
| 470 | #define MMCR0_FC 0x80000000UL /* freeze counters */ | 470 | #define MMCR0_FC 0x80000000UL /* freeze counters */ |
| 471 | #define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */ | 471 | #define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */ |
| 472 | #define MMCR0_KERNEL_DISABLE MMCR0_FCS | 472 | #define MMCR0_KERNEL_DISABLE MMCR0_FCS |
| 473 | #define MMCR0_FCP 0x20000000UL /* freeze in problem state */ | 473 | #define MMCR0_FCP 0x20000000UL /* freeze in problem state */ |
| 474 | #define MMCR0_PROBLEM_DISABLE MMCR0_FCP | 474 | #define MMCR0_PROBLEM_DISABLE MMCR0_FCP |
| 475 | #define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */ | 475 | #define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */ |
| 476 | #define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */ | 476 | #define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */ |
| 477 | #define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */ | 477 | #define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */ |
| 478 | #define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */ | 478 | #define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */ |
| 479 | #define MMCR0_TBEE 0x00400000UL /* time base exception enable */ | 479 | #define MMCR0_TBEE 0x00400000UL /* time base exception enable */ |
| 480 | #define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/ | 480 | #define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/ |
| 481 | #define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/ | 481 | #define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/ |
| 482 | #define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */ | 482 | #define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */ |
| 483 | #define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */ | 483 | #define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */ |
| 484 | #define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */ | 484 | #define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */ |
| 485 | #define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */ | 485 | #define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */ |
| 486 | #define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */ | 486 | #define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */ |
| 487 | #define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */ | 487 | #define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */ |
| 488 | #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ | 488 | #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ |
| 489 | #define SPRN_MMCR1 798 | 489 | #define SPRN_MMCR1 798 |
| 490 | #define SPRN_MMCRA 0x312 | 490 | #define SPRN_MMCRA 0x312 |
| 491 | #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ | 491 | #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ |
| 492 | #define MMCRA_SDAR_DCACHE_MISS 0x40000000UL | ||
| 493 | #define MMCRA_SDAR_ERAT_MISS 0x20000000UL | ||
| 492 | #define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */ | 494 | #define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */ |
| 493 | #define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */ | 495 | #define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */ |
| 494 | #define MMCRA_SLOT 0x07000000UL /* SLOT bits (37-39) */ | 496 | #define MMCRA_SLOT 0x07000000UL /* SLOT bits (37-39) */ |
| 495 | #define MMCRA_SLOT_SHIFT 24 | 497 | #define MMCRA_SLOT_SHIFT 24 |
| 496 | #define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */ | 498 | #define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */ |
| 497 | #define POWER6_MMCRA_SDSYNC 0x0000080000000000ULL /* SDAR/SIAR synced */ | 499 | #define POWER6_MMCRA_SDSYNC 0x0000080000000000ULL /* SDAR/SIAR synced */ |
| 498 | #define POWER6_MMCRA_SIHV 0x0000040000000000ULL | 500 | #define POWER6_MMCRA_SIHV 0x0000040000000000ULL |
| 499 | #define POWER6_MMCRA_SIPR 0x0000020000000000ULL | 501 | #define POWER6_MMCRA_SIPR 0x0000020000000000ULL |
| 500 | #define POWER6_MMCRA_THRM 0x00000020UL | 502 | #define POWER6_MMCRA_THRM 0x00000020UL |
| 501 | #define POWER6_MMCRA_OTHER 0x0000000EUL | 503 | #define POWER6_MMCRA_OTHER 0x0000000EUL |
| 502 | #define SPRN_PMC1 787 | 504 | #define SPRN_PMC1 787 |
| 503 | #define SPRN_PMC2 788 | 505 | #define SPRN_PMC2 788 |
| 504 | #define SPRN_PMC3 789 | 506 | #define SPRN_PMC3 789 |
| 505 | #define SPRN_PMC4 790 | 507 | #define SPRN_PMC4 790 |
| 506 | #define SPRN_PMC5 791 | 508 | #define SPRN_PMC5 791 |
| 507 | #define SPRN_PMC6 792 | 509 | #define SPRN_PMC6 792 |
| 508 | #define SPRN_PMC7 793 | 510 | #define SPRN_PMC7 793 |
| 509 | #define SPRN_PMC8 794 | 511 | #define SPRN_PMC8 794 |
| 510 | #define SPRN_SIAR 780 | 512 | #define SPRN_SIAR 780 |
| 511 | #define SPRN_SDAR 781 | 513 | #define SPRN_SDAR 781 |
| 512 | 514 | ||
| 513 | #define SPRN_PA6T_MMCR0 795 | 515 | #define SPRN_PA6T_MMCR0 795 |
| 514 | #define PA6T_MMCR0_EN0 0x0000000000000001UL | 516 | #define PA6T_MMCR0_EN0 0x0000000000000001UL |
| 515 | #define PA6T_MMCR0_EN1 0x0000000000000002UL | 517 | #define PA6T_MMCR0_EN1 0x0000000000000002UL |
| 516 | #define PA6T_MMCR0_EN2 0x0000000000000004UL | 518 | #define PA6T_MMCR0_EN2 0x0000000000000004UL |
| 517 | #define PA6T_MMCR0_EN3 0x0000000000000008UL | 519 | #define PA6T_MMCR0_EN3 0x0000000000000008UL |
| 518 | #define PA6T_MMCR0_EN4 0x0000000000000010UL | 520 | #define PA6T_MMCR0_EN4 0x0000000000000010UL |
| 519 | #define PA6T_MMCR0_EN5 0x0000000000000020UL | 521 | #define PA6T_MMCR0_EN5 0x0000000000000020UL |
| 520 | #define PA6T_MMCR0_SUPEN 0x0000000000000040UL | 522 | #define PA6T_MMCR0_SUPEN 0x0000000000000040UL |
| 521 | #define PA6T_MMCR0_PREN 0x0000000000000080UL | 523 | #define PA6T_MMCR0_PREN 0x0000000000000080UL |
| 522 | #define PA6T_MMCR0_HYPEN 0x0000000000000100UL | 524 | #define PA6T_MMCR0_HYPEN 0x0000000000000100UL |
| 523 | #define PA6T_MMCR0_FCM0 0x0000000000000200UL | 525 | #define PA6T_MMCR0_FCM0 0x0000000000000200UL |
| 524 | #define PA6T_MMCR0_FCM1 0x0000000000000400UL | 526 | #define PA6T_MMCR0_FCM1 0x0000000000000400UL |
| 525 | #define PA6T_MMCR0_INTGEN 0x0000000000000800UL | 527 | #define PA6T_MMCR0_INTGEN 0x0000000000000800UL |
| 526 | #define PA6T_MMCR0_INTEN0 0x0000000000001000UL | 528 | #define PA6T_MMCR0_INTEN0 0x0000000000001000UL |
| 527 | #define PA6T_MMCR0_INTEN1 0x0000000000002000UL | 529 | #define PA6T_MMCR0_INTEN1 0x0000000000002000UL |
| 528 | #define PA6T_MMCR0_INTEN2 0x0000000000004000UL | 530 | #define PA6T_MMCR0_INTEN2 0x0000000000004000UL |
| 529 | #define PA6T_MMCR0_INTEN3 0x0000000000008000UL | 531 | #define PA6T_MMCR0_INTEN3 0x0000000000008000UL |
| 530 | #define PA6T_MMCR0_INTEN4 0x0000000000010000UL | 532 | #define PA6T_MMCR0_INTEN4 0x0000000000010000UL |
| 531 | #define PA6T_MMCR0_INTEN5 0x0000000000020000UL | 533 | #define PA6T_MMCR0_INTEN5 0x0000000000020000UL |
| 532 | #define PA6T_MMCR0_DISCNT 0x0000000000040000UL | 534 | #define PA6T_MMCR0_DISCNT 0x0000000000040000UL |
| 533 | #define PA6T_MMCR0_UOP 0x0000000000080000UL | 535 | #define PA6T_MMCR0_UOP 0x0000000000080000UL |
| 534 | #define PA6T_MMCR0_TRG 0x0000000000100000UL | 536 | #define PA6T_MMCR0_TRG 0x0000000000100000UL |
| 535 | #define PA6T_MMCR0_TRGEN 0x0000000000200000UL | 537 | #define PA6T_MMCR0_TRGEN 0x0000000000200000UL |
| 536 | #define PA6T_MMCR0_TRGREG 0x0000000001600000UL | 538 | #define PA6T_MMCR0_TRGREG 0x0000000001600000UL |
| 537 | #define PA6T_MMCR0_SIARLOG 0x0000000002000000UL | 539 | #define PA6T_MMCR0_SIARLOG 0x0000000002000000UL |
| 538 | #define PA6T_MMCR0_SDARLOG 0x0000000004000000UL | 540 | #define PA6T_MMCR0_SDARLOG 0x0000000004000000UL |
| 539 | #define PA6T_MMCR0_PROEN 0x0000000008000000UL | 541 | #define PA6T_MMCR0_PROEN 0x0000000008000000UL |
| 540 | #define PA6T_MMCR0_PROLOG 0x0000000010000000UL | 542 | #define PA6T_MMCR0_PROLOG 0x0000000010000000UL |
| 541 | #define PA6T_MMCR0_DAMEN2 0x0000000020000000UL | 543 | #define PA6T_MMCR0_DAMEN2 0x0000000020000000UL |
| 542 | #define PA6T_MMCR0_DAMEN3 0x0000000040000000UL | 544 | #define PA6T_MMCR0_DAMEN3 0x0000000040000000UL |
| 543 | #define PA6T_MMCR0_DAMEN4 0x0000000080000000UL | 545 | #define PA6T_MMCR0_DAMEN4 0x0000000080000000UL |
| 544 | #define PA6T_MMCR0_DAMEN5 0x0000000100000000UL | 546 | #define PA6T_MMCR0_DAMEN5 0x0000000100000000UL |
| 545 | #define PA6T_MMCR0_DAMSEL2 0x0000000200000000UL | 547 | #define PA6T_MMCR0_DAMSEL2 0x0000000200000000UL |
| 546 | #define PA6T_MMCR0_DAMSEL3 0x0000000400000000UL | 548 | #define PA6T_MMCR0_DAMSEL3 0x0000000400000000UL |
| 547 | #define PA6T_MMCR0_DAMSEL4 0x0000000800000000UL | 549 | #define PA6T_MMCR0_DAMSEL4 0x0000000800000000UL |
| 548 | #define PA6T_MMCR0_DAMSEL5 0x0000001000000000UL | 550 | #define PA6T_MMCR0_DAMSEL5 0x0000001000000000UL |
| 549 | #define PA6T_MMCR0_HANDDIS 0x0000002000000000UL | 551 | #define PA6T_MMCR0_HANDDIS 0x0000002000000000UL |
| 550 | #define PA6T_MMCR0_PCTEN 0x0000004000000000UL | 552 | #define PA6T_MMCR0_PCTEN 0x0000004000000000UL |
| 551 | #define PA6T_MMCR0_SOCEN 0x0000008000000000UL | 553 | #define PA6T_MMCR0_SOCEN 0x0000008000000000UL |
| 552 | #define PA6T_MMCR0_SOCMOD 0x0000010000000000UL | 554 | #define PA6T_MMCR0_SOCMOD 0x0000010000000000UL |
| 553 | 555 | ||
| 554 | #define SPRN_PA6T_MMCR1 798 | 556 | #define SPRN_PA6T_MMCR1 798 |
| 555 | #define PA6T_MMCR1_ES2 0x00000000000000ffUL | 557 | #define PA6T_MMCR1_ES2 0x00000000000000ffUL |
| 556 | #define PA6T_MMCR1_ES3 0x000000000000ff00UL | 558 | #define PA6T_MMCR1_ES3 0x000000000000ff00UL |
| 557 | #define PA6T_MMCR1_ES4 0x0000000000ff0000UL | 559 | #define PA6T_MMCR1_ES4 0x0000000000ff0000UL |
| 558 | #define PA6T_MMCR1_ES5 0x00000000ff000000UL | 560 | #define PA6T_MMCR1_ES5 0x00000000ff000000UL |
| 559 | 561 | ||
| 560 | #define SPRN_PA6T_UPMC0 771 /* User PerfMon Counter 0 */ | 562 | #define SPRN_PA6T_UPMC0 771 /* User PerfMon Counter 0 */ |
| 561 | #define SPRN_PA6T_UPMC1 772 /* ... */ | 563 | #define SPRN_PA6T_UPMC1 772 /* ... */ |
| 562 | #define SPRN_PA6T_UPMC2 773 | 564 | #define SPRN_PA6T_UPMC2 773 |
| 563 | #define SPRN_PA6T_UPMC3 774 | 565 | #define SPRN_PA6T_UPMC3 774 |
| 564 | #define SPRN_PA6T_UPMC4 775 | 566 | #define SPRN_PA6T_UPMC4 775 |
| 565 | #define SPRN_PA6T_UPMC5 776 | 567 | #define SPRN_PA6T_UPMC5 776 |
| 566 | #define SPRN_PA6T_UMMCR0 779 /* User Monitor Mode Control Register 0 */ | 568 | #define SPRN_PA6T_UMMCR0 779 /* User Monitor Mode Control Register 0 */ |
| 567 | #define SPRN_PA6T_SIAR 780 /* Sampled Instruction Address */ | 569 | #define SPRN_PA6T_SIAR 780 /* Sampled Instruction Address */ |
| 568 | #define SPRN_PA6T_UMMCR1 782 /* User Monitor Mode Control Register 1 */ | 570 | #define SPRN_PA6T_UMMCR1 782 /* User Monitor Mode Control Register 1 */ |
| 569 | #define SPRN_PA6T_SIER 785 /* Sampled Instruction Event Register */ | 571 | #define SPRN_PA6T_SIER 785 /* Sampled Instruction Event Register */ |
| 570 | #define SPRN_PA6T_PMC0 787 | 572 | #define SPRN_PA6T_PMC0 787 |
| 571 | #define SPRN_PA6T_PMC1 788 | 573 | #define SPRN_PA6T_PMC1 788 |
| 572 | #define SPRN_PA6T_PMC2 789 | 574 | #define SPRN_PA6T_PMC2 789 |
| 573 | #define SPRN_PA6T_PMC3 790 | 575 | #define SPRN_PA6T_PMC3 790 |
| 574 | #define SPRN_PA6T_PMC4 791 | 576 | #define SPRN_PA6T_PMC4 791 |
| 575 | #define SPRN_PA6T_PMC5 792 | 577 | #define SPRN_PA6T_PMC5 792 |
| 576 | #define SPRN_PA6T_TSR0 793 /* Timestamp Register 0 */ | 578 | #define SPRN_PA6T_TSR0 793 /* Timestamp Register 0 */ |
| 577 | #define SPRN_PA6T_TSR1 794 /* Timestamp Register 1 */ | 579 | #define SPRN_PA6T_TSR1 794 /* Timestamp Register 1 */ |
| 578 | #define SPRN_PA6T_TSR2 799 /* Timestamp Register 2 */ | 580 | #define SPRN_PA6T_TSR2 799 /* Timestamp Register 2 */ |
| 579 | #define SPRN_PA6T_TSR3 784 /* Timestamp Register 3 */ | 581 | #define SPRN_PA6T_TSR3 784 /* Timestamp Register 3 */ |
| 580 | 582 | ||
| 581 | #define SPRN_PA6T_IER 981 /* Icache Error Register */ | 583 | #define SPRN_PA6T_IER 981 /* Icache Error Register */ |
| 582 | #define SPRN_PA6T_DER 982 /* Dcache Error Register */ | 584 | #define SPRN_PA6T_DER 982 /* Dcache Error Register */ |
| 583 | #define SPRN_PA6T_BER 862 /* BIU Error Address Register */ | 585 | #define SPRN_PA6T_BER 862 /* BIU Error Address Register */ |
| 584 | #define SPRN_PA6T_MER 849 /* MMU Error Register */ | 586 | #define SPRN_PA6T_MER 849 /* MMU Error Register */ |
| 585 | 587 | ||
| 586 | #define SPRN_PA6T_IMA0 880 /* Instruction Match Array 0 */ | 588 | #define SPRN_PA6T_IMA0 880 /* Instruction Match Array 0 */ |
| 587 | #define SPRN_PA6T_IMA1 881 /* ... */ | 589 | #define SPRN_PA6T_IMA1 881 /* ... */ |
| 588 | #define SPRN_PA6T_IMA2 882 | 590 | #define SPRN_PA6T_IMA2 882 |
| 589 | #define SPRN_PA6T_IMA3 883 | 591 | #define SPRN_PA6T_IMA3 883 |
| 590 | #define SPRN_PA6T_IMA4 884 | 592 | #define SPRN_PA6T_IMA4 884 |
| 591 | #define SPRN_PA6T_IMA5 885 | 593 | #define SPRN_PA6T_IMA5 885 |
| 592 | #define SPRN_PA6T_IMA6 886 | 594 | #define SPRN_PA6T_IMA6 886 |
| 593 | #define SPRN_PA6T_IMA7 887 | 595 | #define SPRN_PA6T_IMA7 887 |
| 594 | #define SPRN_PA6T_IMA8 888 | 596 | #define SPRN_PA6T_IMA8 888 |
| 595 | #define SPRN_PA6T_IMA9 889 | 597 | #define SPRN_PA6T_IMA9 889 |
| 596 | #define SPRN_PA6T_BTCR 978 /* Breakpoint and Tagging Control Register */ | 598 | #define SPRN_PA6T_BTCR 978 /* Breakpoint and Tagging Control Register */ |
| 597 | #define SPRN_PA6T_IMAAT 979 /* Instruction Match Array Action Table */ | 599 | #define SPRN_PA6T_IMAAT 979 /* Instruction Match Array Action Table */ |
| 598 | #define SPRN_PA6T_PCCR 1019 /* Power Counter Control Register */ | 600 | #define SPRN_PA6T_PCCR 1019 /* Power Counter Control Register */ |
| 599 | #define SPRN_BKMK 1020 /* Cell Bookmark Register */ | 601 | #define SPRN_BKMK 1020 /* Cell Bookmark Register */ |
| 600 | #define SPRN_PA6T_RPCCR 1021 /* Retire PC Trace Control Register */ | 602 | #define SPRN_PA6T_RPCCR 1021 /* Retire PC Trace Control Register */ |
| 601 | 603 | ||
| 602 | 604 | ||
| 603 | #else /* 32-bit */ | 605 | #else /* 32-bit */ |
| 604 | #define SPRN_MMCR0 952 /* Monitor Mode Control Register 0 */ | 606 | #define SPRN_MMCR0 952 /* Monitor Mode Control Register 0 */ |
| 605 | #define MMCR0_FC 0x80000000UL /* freeze counters */ | 607 | #define MMCR0_FC 0x80000000UL /* freeze counters */ |
| 606 | #define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */ | 608 | #define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */ |
| 607 | #define MMCR0_FCP 0x20000000UL /* freeze in problem state */ | 609 | #define MMCR0_FCP 0x20000000UL /* freeze in problem state */ |
| 608 | #define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */ | 610 | #define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */ |
| 609 | #define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */ | 611 | #define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */ |
| 610 | #define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */ | 612 | #define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */ |
| 611 | #define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */ | 613 | #define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */ |
| 612 | #define MMCR0_TBEE 0x00400000UL /* time base exception enable */ | 614 | #define MMCR0_TBEE 0x00400000UL /* time base exception enable */ |
| 613 | #define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/ | 615 | #define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/ |
| 614 | #define MMCR0_PMCnCE 0x00004000UL /* count enable for all but PMC 1*/ | 616 | #define MMCR0_PMCnCE 0x00004000UL /* count enable for all but PMC 1*/ |
| 615 | #define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */ | 617 | #define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */ |
| 616 | #define MMCR0_PMC1SEL 0x00001fc0UL /* PMC 1 Event */ | 618 | #define MMCR0_PMC1SEL 0x00001fc0UL /* PMC 1 Event */ |
| 617 | #define MMCR0_PMC2SEL 0x0000003fUL /* PMC 2 Event */ | 619 | #define MMCR0_PMC2SEL 0x0000003fUL /* PMC 2 Event */ |
| 618 | 620 | ||
| 619 | #define SPRN_MMCR1 956 | 621 | #define SPRN_MMCR1 956 |
| 620 | #define MMCR1_PMC3SEL 0xf8000000UL /* PMC 3 Event */ | 622 | #define MMCR1_PMC3SEL 0xf8000000UL /* PMC 3 Event */ |
| 621 | #define MMCR1_PMC4SEL 0x07c00000UL /* PMC 4 Event */ | 623 | #define MMCR1_PMC4SEL 0x07c00000UL /* PMC 4 Event */ |
| 622 | #define MMCR1_PMC5SEL 0x003e0000UL /* PMC 5 Event */ | 624 | #define MMCR1_PMC5SEL 0x003e0000UL /* PMC 5 Event */ |
| 623 | #define MMCR1_PMC6SEL 0x0001f800UL /* PMC 6 Event */ | 625 | #define MMCR1_PMC6SEL 0x0001f800UL /* PMC 6 Event */ |
| 624 | #define SPRN_MMCR2 944 | 626 | #define SPRN_MMCR2 944 |
| 625 | #define SPRN_PMC1 953 /* Performance Counter Register 1 */ | 627 | #define SPRN_PMC1 953 /* Performance Counter Register 1 */ |
| 626 | #define SPRN_PMC2 954 /* Performance Counter Register 2 */ | 628 | #define SPRN_PMC2 954 /* Performance Counter Register 2 */ |
| 627 | #define SPRN_PMC3 957 /* Performance Counter Register 3 */ | 629 | #define SPRN_PMC3 957 /* Performance Counter Register 3 */ |
| 628 | #define SPRN_PMC4 958 /* Performance Counter Register 4 */ | 630 | #define SPRN_PMC4 958 /* Performance Counter Register 4 */ |
| 629 | #define SPRN_PMC5 945 /* Performance Counter Register 5 */ | 631 | #define SPRN_PMC5 945 /* Performance Counter Register 5 */ |
| 630 | #define SPRN_PMC6 946 /* Performance Counter Register 6 */ | 632 | #define SPRN_PMC6 946 /* Performance Counter Register 6 */ |
| 631 | 633 | ||
| 632 | #define SPRN_SIAR 955 /* Sampled Instruction Address Register */ | 634 | #define SPRN_SIAR 955 /* Sampled Instruction Address Register */ |
| 633 | 635 | ||
| 634 | /* Bit definitions for MMCR0 and PMC1 / PMC2. */ | 636 | /* Bit definitions for MMCR0 and PMC1 / PMC2. */ |
| 635 | #define MMCR0_PMC1_CYCLES (1 << 7) | 637 | #define MMCR0_PMC1_CYCLES (1 << 7) |
| 636 | #define MMCR0_PMC1_ICACHEMISS (5 << 7) | 638 | #define MMCR0_PMC1_ICACHEMISS (5 << 7) |
| 637 | #define MMCR0_PMC1_DTLB (6 << 7) | 639 | #define MMCR0_PMC1_DTLB (6 << 7) |
| 638 | #define MMCR0_PMC2_DCACHEMISS 0x6 | 640 | #define MMCR0_PMC2_DCACHEMISS 0x6 |
| 639 | #define MMCR0_PMC2_CYCLES 0x1 | 641 | #define MMCR0_PMC2_CYCLES 0x1 |
| 640 | #define MMCR0_PMC2_ITLB 0x7 | 642 | #define MMCR0_PMC2_ITLB 0x7 |
| 641 | #define MMCR0_PMC2_LOADMISSTIME 0x5 | 643 | #define MMCR0_PMC2_LOADMISSTIME 0x5 |
| 642 | #endif | 644 | #endif |
| 643 | 645 | ||
| 644 | /* | 646 | /* |
| 645 | * SPRG usage: | 647 | * SPRG usage: |
| 646 | * | 648 | * |
| 647 | * All 64-bit: | 649 | * All 64-bit: |
| 648 | * - SPRG1 stores PACA pointer | 650 | * - SPRG1 stores PACA pointer |
| 649 | * | 651 | * |
| 650 | * 64-bit server: | 652 | * 64-bit server: |
| 651 | * - SPRG0 unused (reserved for HV on Power4) | 653 | * - SPRG0 unused (reserved for HV on Power4) |
| 652 | * - SPRG2 scratch for exception vectors | 654 | * - SPRG2 scratch for exception vectors |
| 653 | * - SPRG3 unused (user visible) | 655 | * - SPRG3 unused (user visible) |
| 654 | * | 656 | * |
| 655 | * 64-bit embedded | 657 | * 64-bit embedded |
| 656 | * - SPRG0 generic exception scratch | 658 | * - SPRG0 generic exception scratch |
| 657 | * - SPRG2 TLB exception stack | 659 | * - SPRG2 TLB exception stack |
| 658 | * - SPRG3 unused (user visible) | 660 | * - SPRG3 unused (user visible) |
| 659 | * - SPRG4 unused (user visible) | 661 | * - SPRG4 unused (user visible) |
| 660 | * - SPRG6 TLB miss scratch (user visible, sorry !) | 662 | * - SPRG6 TLB miss scratch (user visible, sorry !) |
| 661 | * - SPRG7 critical exception scratch | 663 | * - SPRG7 critical exception scratch |
| 662 | * - SPRG8 machine check exception scratch | 664 | * - SPRG8 machine check exception scratch |
| 663 | * - SPRG9 debug exception scratch | 665 | * - SPRG9 debug exception scratch |
| 664 | * | 666 | * |
| 665 | * All 32-bit: | 667 | * All 32-bit: |
| 666 | * - SPRG3 current thread_info pointer | 668 | * - SPRG3 current thread_info pointer |
| 667 | * (virtual on BookE, physical on others) | 669 | * (virtual on BookE, physical on others) |
| 668 | * | 670 | * |
| 669 | * 32-bit classic: | 671 | * 32-bit classic: |
| 670 | * - SPRG0 scratch for exception vectors | 672 | * - SPRG0 scratch for exception vectors |
| 671 | * - SPRG1 scratch for exception vectors | 673 | * - SPRG1 scratch for exception vectors |
| 672 | * - SPRG2 indicator that we are in RTAS | 674 | * - SPRG2 indicator that we are in RTAS |
| 673 | * - SPRG4 (603 only) pseudo TLB LRU data | 675 | * - SPRG4 (603 only) pseudo TLB LRU data |
| 674 | * | 676 | * |
| 675 | * 32-bit 40x: | 677 | * 32-bit 40x: |
| 676 | * - SPRG0 scratch for exception vectors | 678 | * - SPRG0 scratch for exception vectors |
| 677 | * - SPRG1 scratch for exception vectors | 679 | * - SPRG1 scratch for exception vectors |
| 678 | * - SPRG2 scratch for exception vectors | 680 | * - SPRG2 scratch for exception vectors |
| 679 | * - SPRG4 scratch for exception vectors (not 403) | 681 | * - SPRG4 scratch for exception vectors (not 403) |
| 680 | * - SPRG5 scratch for exception vectors (not 403) | 682 | * - SPRG5 scratch for exception vectors (not 403) |
| 681 | * - SPRG6 scratch for exception vectors (not 403) | 683 | * - SPRG6 scratch for exception vectors (not 403) |
| 682 | * - SPRG7 scratch for exception vectors (not 403) | 684 | * - SPRG7 scratch for exception vectors (not 403) |
| 683 | * | 685 | * |
| 684 | * 32-bit 440 and FSL BookE: | 686 | * 32-bit 440 and FSL BookE: |
| 685 | * - SPRG0 scratch for exception vectors | 687 | * - SPRG0 scratch for exception vectors |
| 686 | * - SPRG1 scratch for exception vectors (*) | 688 | * - SPRG1 scratch for exception vectors (*) |
| 687 | * - SPRG2 scratch for crit interrupts handler | 689 | * - SPRG2 scratch for crit interrupts handler |
| 688 | * - SPRG4 scratch for exception vectors | 690 | * - SPRG4 scratch for exception vectors |
| 689 | * - SPRG5 scratch for exception vectors | 691 | * - SPRG5 scratch for exception vectors |
| 690 | * - SPRG6 scratch for machine check handler | 692 | * - SPRG6 scratch for machine check handler |
| 691 | * - SPRG7 scratch for exception vectors | 693 | * - SPRG7 scratch for exception vectors |
| 692 | * - SPRG9 scratch for debug vectors (e500 only) | 694 | * - SPRG9 scratch for debug vectors (e500 only) |
| 693 | * | 695 | * |
| 694 | * Additionally, BookE separates "read" and "write" | 696 | * Additionally, BookE separates "read" and "write" |
| 695 | * of those registers. That allows to use the userspace | 697 | * of those registers. That allows to use the userspace |
| 696 | * readable variant for reads, which can avoid a fault | 698 | * readable variant for reads, which can avoid a fault |
| 697 | * with KVM type virtualization. | 699 | * with KVM type virtualization. |
| 698 | * | 700 | * |
| 699 | * (*) Under KVM, the host SPRG1 is used to point to | 701 | * (*) Under KVM, the host SPRG1 is used to point to |
| 700 | * the current VCPU data structure | 702 | * the current VCPU data structure |
| 701 | * | 703 | * |
| 702 | * 32-bit 8xx: | 704 | * 32-bit 8xx: |
| 703 | * - SPRG0 scratch for exception vectors | 705 | * - SPRG0 scratch for exception vectors |
| 704 | * - SPRG1 scratch for exception vectors | 706 | * - SPRG1 scratch for exception vectors |
| 705 | * - SPRG2 apparently unused but initialized | 707 | * - SPRG2 apparently unused but initialized |
| 706 | * | 708 | * |
| 707 | */ | 709 | */ |
| 708 | #ifdef CONFIG_PPC64 | 710 | #ifdef CONFIG_PPC64 |
| 709 | #define SPRN_SPRG_PACA SPRN_SPRG1 | 711 | #define SPRN_SPRG_PACA SPRN_SPRG1 |
| 710 | #else | 712 | #else |
| 711 | #define SPRN_SPRG_THREAD SPRN_SPRG3 | 713 | #define SPRN_SPRG_THREAD SPRN_SPRG3 |
| 712 | #endif | 714 | #endif |
| 713 | 715 | ||
| 714 | #ifdef CONFIG_PPC_BOOK3S_64 | 716 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 715 | #define SPRN_SPRG_SCRATCH0 SPRN_SPRG2 | 717 | #define SPRN_SPRG_SCRATCH0 SPRN_SPRG2 |
| 716 | #endif | 718 | #endif |
| 717 | 719 | ||
| 718 | #ifdef CONFIG_PPC_BOOK3E_64 | 720 | #ifdef CONFIG_PPC_BOOK3E_64 |
| 719 | #define SPRN_SPRG_MC_SCRATCH SPRN_SPRG8 | 721 | #define SPRN_SPRG_MC_SCRATCH SPRN_SPRG8 |
| 720 | #define SPRN_SPRG_CRIT_SCRATCH SPRN_SPRG7 | 722 | #define SPRN_SPRG_CRIT_SCRATCH SPRN_SPRG7 |
| 721 | #define SPRN_SPRG_DBG_SCRATCH SPRN_SPRG9 | 723 | #define SPRN_SPRG_DBG_SCRATCH SPRN_SPRG9 |
| 722 | #define SPRN_SPRG_TLB_EXFRAME SPRN_SPRG2 | 724 | #define SPRN_SPRG_TLB_EXFRAME SPRN_SPRG2 |
| 723 | #define SPRN_SPRG_TLB_SCRATCH SPRN_SPRG6 | 725 | #define SPRN_SPRG_TLB_SCRATCH SPRN_SPRG6 |
| 724 | #define SPRN_SPRG_GEN_SCRATCH SPRN_SPRG0 | 726 | #define SPRN_SPRG_GEN_SCRATCH SPRN_SPRG0 |
| 725 | #endif | 727 | #endif |
| 726 | 728 | ||
| 727 | #ifdef CONFIG_PPC_BOOK3S_32 | 729 | #ifdef CONFIG_PPC_BOOK3S_32 |
| 728 | #define SPRN_SPRG_SCRATCH0 SPRN_SPRG0 | 730 | #define SPRN_SPRG_SCRATCH0 SPRN_SPRG0 |
| 729 | #define SPRN_SPRG_SCRATCH1 SPRN_SPRG1 | 731 | #define SPRN_SPRG_SCRATCH1 SPRN_SPRG1 |
| 730 | #define SPRN_SPRG_RTAS SPRN_SPRG2 | 732 | #define SPRN_SPRG_RTAS SPRN_SPRG2 |
| 731 | #define SPRN_SPRG_603_LRU SPRN_SPRG4 | 733 | #define SPRN_SPRG_603_LRU SPRN_SPRG4 |
| 732 | #endif | 734 | #endif |
| 733 | 735 | ||
| 734 | #ifdef CONFIG_40x | 736 | #ifdef CONFIG_40x |
| 735 | #define SPRN_SPRG_SCRATCH0 SPRN_SPRG0 | 737 | #define SPRN_SPRG_SCRATCH0 SPRN_SPRG0 |
| 736 | #define SPRN_SPRG_SCRATCH1 SPRN_SPRG1 | 738 | #define SPRN_SPRG_SCRATCH1 SPRN_SPRG1 |
| 737 | #define SPRN_SPRG_SCRATCH2 SPRN_SPRG2 | 739 | #define SPRN_SPRG_SCRATCH2 SPRN_SPRG2 |
| 738 | #define SPRN_SPRG_SCRATCH3 SPRN_SPRG4 | 740 | #define SPRN_SPRG_SCRATCH3 SPRN_SPRG4 |
| 739 | #define SPRN_SPRG_SCRATCH4 SPRN_SPRG5 | 741 | #define SPRN_SPRG_SCRATCH4 SPRN_SPRG5 |
| 740 | #define SPRN_SPRG_SCRATCH5 SPRN_SPRG6 | 742 | #define SPRN_SPRG_SCRATCH5 SPRN_SPRG6 |
| 741 | #define SPRN_SPRG_SCRATCH6 SPRN_SPRG7 | 743 | #define SPRN_SPRG_SCRATCH6 SPRN_SPRG7 |
| 742 | #endif | 744 | #endif |
| 743 | 745 | ||
| 744 | #ifdef CONFIG_BOOKE | 746 | #ifdef CONFIG_BOOKE |
| 745 | #define SPRN_SPRG_RSCRATCH0 SPRN_SPRG0 | 747 | #define SPRN_SPRG_RSCRATCH0 SPRN_SPRG0 |
| 746 | #define SPRN_SPRG_WSCRATCH0 SPRN_SPRG0 | 748 | #define SPRN_SPRG_WSCRATCH0 SPRN_SPRG0 |
| 747 | #define SPRN_SPRG_RSCRATCH1 SPRN_SPRG1 | 749 | #define SPRN_SPRG_RSCRATCH1 SPRN_SPRG1 |
| 748 | #define SPRN_SPRG_WSCRATCH1 SPRN_SPRG1 | 750 | #define SPRN_SPRG_WSCRATCH1 SPRN_SPRG1 |
| 749 | #define SPRN_SPRG_RSCRATCH_CRIT SPRN_SPRG2 | 751 | #define SPRN_SPRG_RSCRATCH_CRIT SPRN_SPRG2 |
| 750 | #define SPRN_SPRG_WSCRATCH_CRIT SPRN_SPRG2 | 752 | #define SPRN_SPRG_WSCRATCH_CRIT SPRN_SPRG2 |
| 751 | #define SPRN_SPRG_RSCRATCH2 SPRN_SPRG4R | 753 | #define SPRN_SPRG_RSCRATCH2 SPRN_SPRG4R |
| 752 | #define SPRN_SPRG_WSCRATCH2 SPRN_SPRG4W | 754 | #define SPRN_SPRG_WSCRATCH2 SPRN_SPRG4W |
| 753 | #define SPRN_SPRG_RSCRATCH3 SPRN_SPRG5R | 755 | #define SPRN_SPRG_RSCRATCH3 SPRN_SPRG5R |
| 754 | #define SPRN_SPRG_WSCRATCH3 SPRN_SPRG5W | 756 | #define SPRN_SPRG_WSCRATCH3 SPRN_SPRG5W |
| 755 | #define SPRN_SPRG_RSCRATCH_MC SPRN_SPRG6R | 757 | #define SPRN_SPRG_RSCRATCH_MC SPRN_SPRG6R |
| 756 | #define SPRN_SPRG_WSCRATCH_MC SPRN_SPRG6W | 758 | #define SPRN_SPRG_WSCRATCH_MC SPRN_SPRG6W |
| 757 | #define SPRN_SPRG_RSCRATCH4 SPRN_SPRG7R | 759 | #define SPRN_SPRG_RSCRATCH4 SPRN_SPRG7R |
| 758 | #define SPRN_SPRG_WSCRATCH4 SPRN_SPRG7W | 760 | #define SPRN_SPRG_WSCRATCH4 SPRN_SPRG7W |
| 759 | #ifdef CONFIG_E200 | 761 | #ifdef CONFIG_E200 |
| 760 | #define SPRN_SPRG_RSCRATCH_DBG SPRN_SPRG6R | 762 | #define SPRN_SPRG_RSCRATCH_DBG SPRN_SPRG6R |
| 761 | #define SPRN_SPRG_WSCRATCH_DBG SPRN_SPRG6W | 763 | #define SPRN_SPRG_WSCRATCH_DBG SPRN_SPRG6W |
| 762 | #else | 764 | #else |
| 763 | #define SPRN_SPRG_RSCRATCH_DBG SPRN_SPRG9 | 765 | #define SPRN_SPRG_RSCRATCH_DBG SPRN_SPRG9 |
| 764 | #define SPRN_SPRG_WSCRATCH_DBG SPRN_SPRG9 | 766 | #define SPRN_SPRG_WSCRATCH_DBG SPRN_SPRG9 |
| 765 | #endif | 767 | #endif |
| 766 | #define SPRN_SPRG_RVCPU SPRN_SPRG1 | 768 | #define SPRN_SPRG_RVCPU SPRN_SPRG1 |
| 767 | #define SPRN_SPRG_WVCPU SPRN_SPRG1 | 769 | #define SPRN_SPRG_WVCPU SPRN_SPRG1 |
| 768 | #endif | 770 | #endif |
| 769 | 771 | ||
| 770 | #ifdef CONFIG_8xx | 772 | #ifdef CONFIG_8xx |
| 771 | #define SPRN_SPRG_SCRATCH0 SPRN_SPRG0 | 773 | #define SPRN_SPRG_SCRATCH0 SPRN_SPRG0 |
| 772 | #define SPRN_SPRG_SCRATCH1 SPRN_SPRG1 | 774 | #define SPRN_SPRG_SCRATCH1 SPRN_SPRG1 |
| 773 | #endif | 775 | #endif |
| 774 | 776 | ||
| 775 | /* | 777 | /* |
| 776 | * An mtfsf instruction with the L bit set. On CPUs that support this a | 778 | * An mtfsf instruction with the L bit set. On CPUs that support this a |
| 777 | * full 64bits of FPSCR is restored and on other CPUs the L bit is ignored. | 779 | * full 64bits of FPSCR is restored and on other CPUs the L bit is ignored. |
| 778 | * | 780 | * |
| 779 | * Until binutils gets the new form of mtfsf, hardwire the instruction. | 781 | * Until binutils gets the new form of mtfsf, hardwire the instruction. |
| 780 | */ | 782 | */ |
| 781 | #ifdef CONFIG_PPC64 | 783 | #ifdef CONFIG_PPC64 |
| 782 | #define MTFSF_L(REG) \ | 784 | #define MTFSF_L(REG) \ |
| 783 | .long (0xfc00058e | ((0xff) << 17) | ((REG) << 11) | (1 << 25)) | 785 | .long (0xfc00058e | ((0xff) << 17) | ((REG) << 11) | (1 << 25)) |
| 784 | #else | 786 | #else |
| 785 | #define MTFSF_L(REG) mtfsf 0xff, (REG) | 787 | #define MTFSF_L(REG) mtfsf 0xff, (REG) |
| 786 | #endif | 788 | #endif |
| 787 | 789 | ||
| 788 | /* Processor Version Register (PVR) field extraction */ | 790 | /* Processor Version Register (PVR) field extraction */ |
| 789 | 791 | ||
| 790 | #define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */ | 792 | #define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */ |
| 791 | #define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */ | 793 | #define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */ |
| 792 | 794 | ||
| 793 | #define __is_processor(pv) (PVR_VER(mfspr(SPRN_PVR)) == (pv)) | 795 | #define __is_processor(pv) (PVR_VER(mfspr(SPRN_PVR)) == (pv)) |
| 794 | 796 | ||
| 795 | /* | 797 | /* |
| 796 | * IBM has further subdivided the standard PowerPC 16-bit version and | 798 | * IBM has further subdivided the standard PowerPC 16-bit version and |
| 797 | * revision subfields of the PVR for the PowerPC 403s into the following: | 799 | * revision subfields of the PVR for the PowerPC 403s into the following: |
| 798 | */ | 800 | */ |
| 799 | 801 | ||
| 800 | #define PVR_FAM(pvr) (((pvr) >> 20) & 0xFFF) /* Family field */ | 802 | #define PVR_FAM(pvr) (((pvr) >> 20) & 0xFFF) /* Family field */ |
| 801 | #define PVR_MEM(pvr) (((pvr) >> 16) & 0xF) /* Member field */ | 803 | #define PVR_MEM(pvr) (((pvr) >> 16) & 0xF) /* Member field */ |
| 802 | #define PVR_CORE(pvr) (((pvr) >> 12) & 0xF) /* Core field */ | 804 | #define PVR_CORE(pvr) (((pvr) >> 12) & 0xF) /* Core field */ |
| 803 | #define PVR_CFG(pvr) (((pvr) >> 8) & 0xF) /* Configuration field */ | 805 | #define PVR_CFG(pvr) (((pvr) >> 8) & 0xF) /* Configuration field */ |
| 804 | #define PVR_MAJ(pvr) (((pvr) >> 4) & 0xF) /* Major revision field */ | 806 | #define PVR_MAJ(pvr) (((pvr) >> 4) & 0xF) /* Major revision field */ |
| 805 | #define PVR_MIN(pvr) (((pvr) >> 0) & 0xF) /* Minor revision field */ | 807 | #define PVR_MIN(pvr) (((pvr) >> 0) & 0xF) /* Minor revision field */ |
| 806 | 808 | ||
| 807 | /* Processor Version Numbers */ | 809 | /* Processor Version Numbers */ |
| 808 | 810 | ||
| 809 | #define PVR_403GA 0x00200000 | 811 | #define PVR_403GA 0x00200000 |
| 810 | #define PVR_403GB 0x00200100 | 812 | #define PVR_403GB 0x00200100 |
| 811 | #define PVR_403GC 0x00200200 | 813 | #define PVR_403GC 0x00200200 |
| 812 | #define PVR_403GCX 0x00201400 | 814 | #define PVR_403GCX 0x00201400 |
| 813 | #define PVR_405GP 0x40110000 | 815 | #define PVR_405GP 0x40110000 |
| 814 | #define PVR_STB03XXX 0x40310000 | 816 | #define PVR_STB03XXX 0x40310000 |
| 815 | #define PVR_NP405H 0x41410000 | 817 | #define PVR_NP405H 0x41410000 |
| 816 | #define PVR_NP405L 0x41610000 | 818 | #define PVR_NP405L 0x41610000 |
| 817 | #define PVR_601 0x00010000 | 819 | #define PVR_601 0x00010000 |
| 818 | #define PVR_602 0x00050000 | 820 | #define PVR_602 0x00050000 |
| 819 | #define PVR_603 0x00030000 | 821 | #define PVR_603 0x00030000 |
| 820 | #define PVR_603e 0x00060000 | 822 | #define PVR_603e 0x00060000 |
| 821 | #define PVR_603ev 0x00070000 | 823 | #define PVR_603ev 0x00070000 |
| 822 | #define PVR_603r 0x00071000 | 824 | #define PVR_603r 0x00071000 |
| 823 | #define PVR_604 0x00040000 | 825 | #define PVR_604 0x00040000 |
| 824 | #define PVR_604e 0x00090000 | 826 | #define PVR_604e 0x00090000 |
| 825 | #define PVR_604r 0x000A0000 | 827 | #define PVR_604r 0x000A0000 |
| 826 | #define PVR_620 0x00140000 | 828 | #define PVR_620 0x00140000 |
| 827 | #define PVR_740 0x00080000 | 829 | #define PVR_740 0x00080000 |
| 828 | #define PVR_750 PVR_740 | 830 | #define PVR_750 PVR_740 |
| 829 | #define PVR_740P 0x10080000 | 831 | #define PVR_740P 0x10080000 |
| 830 | #define PVR_750P PVR_740P | 832 | #define PVR_750P PVR_740P |
| 831 | #define PVR_7400 0x000C0000 | 833 | #define PVR_7400 0x000C0000 |
| 832 | #define PVR_7410 0x800C0000 | 834 | #define PVR_7410 0x800C0000 |
| 833 | #define PVR_7450 0x80000000 | 835 | #define PVR_7450 0x80000000 |
| 834 | #define PVR_8540 0x80200000 | 836 | #define PVR_8540 0x80200000 |
| 835 | #define PVR_8560 0x80200000 | 837 | #define PVR_8560 0x80200000 |
| 836 | /* | 838 | /* |
| 837 | * For the 8xx processors, all of them report the same PVR family for | 839 | * For the 8xx processors, all of them report the same PVR family for |
| 838 | * the PowerPC core. The various versions of these processors must be | 840 | * the PowerPC core. The various versions of these processors must be |
| 839 | * differentiated by the version number in the Communication Processor | 841 | * differentiated by the version number in the Communication Processor |
| 840 | * Module (CPM). | 842 | * Module (CPM). |
| 841 | */ | 843 | */ |
| 842 | #define PVR_821 0x00500000 | 844 | #define PVR_821 0x00500000 |
| 843 | #define PVR_823 PVR_821 | 845 | #define PVR_823 PVR_821 |
| 844 | #define PVR_850 PVR_821 | 846 | #define PVR_850 PVR_821 |
| 845 | #define PVR_860 PVR_821 | 847 | #define PVR_860 PVR_821 |
| 846 | #define PVR_8240 0x00810100 | 848 | #define PVR_8240 0x00810100 |
| 847 | #define PVR_8245 0x80811014 | 849 | #define PVR_8245 0x80811014 |
| 848 | #define PVR_8260 PVR_8240 | 850 | #define PVR_8260 PVR_8240 |
| 849 | 851 | ||
| 850 | /* 64-bit processors */ | 852 | /* 64-bit processors */ |
| 851 | /* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */ | 853 | /* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */ |
| 852 | #define PV_NORTHSTAR 0x0033 | 854 | #define PV_NORTHSTAR 0x0033 |
| 853 | #define PV_PULSAR 0x0034 | 855 | #define PV_PULSAR 0x0034 |
| 854 | #define PV_POWER4 0x0035 | 856 | #define PV_POWER4 0x0035 |
| 855 | #define PV_ICESTAR 0x0036 | 857 | #define PV_ICESTAR 0x0036 |
| 856 | #define PV_SSTAR 0x0037 | 858 | #define PV_SSTAR 0x0037 |
| 857 | #define PV_POWER4p 0x0038 | 859 | #define PV_POWER4p 0x0038 |
| 858 | #define PV_970 0x0039 | 860 | #define PV_970 0x0039 |
| 859 | #define PV_POWER5 0x003A | 861 | #define PV_POWER5 0x003A |
| 860 | #define PV_POWER5p 0x003B | 862 | #define PV_POWER5p 0x003B |
| 861 | #define PV_970FX 0x003C | 863 | #define PV_970FX 0x003C |
| 862 | #define PV_630 0x0040 | 864 | #define PV_630 0x0040 |
| 863 | #define PV_630p 0x0041 | 865 | #define PV_630p 0x0041 |
| 864 | #define PV_970MP 0x0044 | 866 | #define PV_970MP 0x0044 |
| 865 | #define PV_970GX 0x0045 | 867 | #define PV_970GX 0x0045 |
| 866 | #define PV_BE 0x0070 | 868 | #define PV_BE 0x0070 |
| 867 | #define PV_PA6T 0x0090 | 869 | #define PV_PA6T 0x0090 |
| 868 | 870 | ||
| 869 | /* Macros for setting and retrieving special purpose registers */ | 871 | /* Macros for setting and retrieving special purpose registers */ |
| 870 | #ifndef __ASSEMBLY__ | 872 | #ifndef __ASSEMBLY__ |
| 871 | #define mfmsr() ({unsigned long rval; \ | 873 | #define mfmsr() ({unsigned long rval; \ |
| 872 | asm volatile("mfmsr %0" : "=r" (rval)); rval;}) | 874 | asm volatile("mfmsr %0" : "=r" (rval)); rval;}) |
| 873 | #ifdef CONFIG_PPC64 | 875 | #ifdef CONFIG_PPC64 |
| 874 | #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \ | 876 | #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \ |
| 875 | : : "r" (v) : "memory") | 877 | : : "r" (v) : "memory") |
| 876 | #define mtmsrd(v) __mtmsrd((v), 0) | 878 | #define mtmsrd(v) __mtmsrd((v), 0) |
| 877 | #define mtmsr(v) mtmsrd(v) | 879 | #define mtmsr(v) mtmsrd(v) |
| 878 | #else | 880 | #else |
| 879 | #define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v) : "memory") | 881 | #define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v) : "memory") |
| 880 | #endif | 882 | #endif |
| 881 | 883 | ||
| 882 | #define mfspr(rn) ({unsigned long rval; \ | 884 | #define mfspr(rn) ({unsigned long rval; \ |
| 883 | asm volatile("mfspr %0," __stringify(rn) \ | 885 | asm volatile("mfspr %0," __stringify(rn) \ |
| 884 | : "=r" (rval)); rval;}) | 886 | : "=r" (rval)); rval;}) |
| 885 | #define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)\ | 887 | #define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)\ |
| 886 | : "memory") | 888 | : "memory") |
| 887 | 889 | ||
| 888 | #ifdef __powerpc64__ | 890 | #ifdef __powerpc64__ |
| 889 | #ifdef CONFIG_PPC_CELL | 891 | #ifdef CONFIG_PPC_CELL |
| 890 | #define mftb() ({unsigned long rval; \ | 892 | #define mftb() ({unsigned long rval; \ |
| 891 | asm volatile( \ | 893 | asm volatile( \ |
| 892 | "90: mftb %0;\n" \ | 894 | "90: mftb %0;\n" \ |
| 893 | "97: cmpwi %0,0;\n" \ | 895 | "97: cmpwi %0,0;\n" \ |
| 894 | " beq- 90b;\n" \ | 896 | " beq- 90b;\n" \ |
| 895 | "99:\n" \ | 897 | "99:\n" \ |
| 896 | ".section __ftr_fixup,\"a\"\n" \ | 898 | ".section __ftr_fixup,\"a\"\n" \ |
| 897 | ".align 3\n" \ | 899 | ".align 3\n" \ |
| 898 | "98:\n" \ | 900 | "98:\n" \ |
| 899 | " .llong %1\n" \ | 901 | " .llong %1\n" \ |
| 900 | " .llong %1\n" \ | 902 | " .llong %1\n" \ |
| 901 | " .llong 97b-98b\n" \ | 903 | " .llong 97b-98b\n" \ |
| 902 | " .llong 99b-98b\n" \ | 904 | " .llong 99b-98b\n" \ |
| 903 | " .llong 0\n" \ | 905 | " .llong 0\n" \ |
| 904 | " .llong 0\n" \ | 906 | " .llong 0\n" \ |
| 905 | ".previous" \ | 907 | ".previous" \ |
| 906 | : "=r" (rval) : "i" (CPU_FTR_CELL_TB_BUG)); rval;}) | 908 | : "=r" (rval) : "i" (CPU_FTR_CELL_TB_BUG)); rval;}) |
| 907 | #else | 909 | #else |
| 908 | #define mftb() ({unsigned long rval; \ | 910 | #define mftb() ({unsigned long rval; \ |
| 909 | asm volatile("mftb %0" : "=r" (rval)); rval;}) | 911 | asm volatile("mftb %0" : "=r" (rval)); rval;}) |
| 910 | #endif /* !CONFIG_PPC_CELL */ | 912 | #endif /* !CONFIG_PPC_CELL */ |
| 911 | 913 | ||
| 912 | #else /* __powerpc64__ */ | 914 | #else /* __powerpc64__ */ |
| 913 | 915 | ||
| 914 | #define mftbl() ({unsigned long rval; \ | 916 | #define mftbl() ({unsigned long rval; \ |
| 915 | asm volatile("mftbl %0" : "=r" (rval)); rval;}) | 917 | asm volatile("mftbl %0" : "=r" (rval)); rval;}) |
| 916 | #define mftbu() ({unsigned long rval; \ | 918 | #define mftbu() ({unsigned long rval; \ |
| 917 | asm volatile("mftbu %0" : "=r" (rval)); rval;}) | 919 | asm volatile("mftbu %0" : "=r" (rval)); rval;}) |
| 918 | #endif /* !__powerpc64__ */ | 920 | #endif /* !__powerpc64__ */ |
| 919 | 921 | ||
| 920 | #define mttbl(v) asm volatile("mttbl %0":: "r"(v)) | 922 | #define mttbl(v) asm volatile("mttbl %0":: "r"(v)) |
| 921 | #define mttbu(v) asm volatile("mttbu %0":: "r"(v)) | 923 | #define mttbu(v) asm volatile("mttbu %0":: "r"(v)) |
| 922 | 924 | ||
| 923 | #ifdef CONFIG_PPC32 | 925 | #ifdef CONFIG_PPC32 |
| 924 | #define mfsrin(v) ({unsigned int rval; \ | 926 | #define mfsrin(v) ({unsigned int rval; \ |
| 925 | asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \ | 927 | asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \ |
| 926 | rval;}) | 928 | rval;}) |
| 927 | #endif | 929 | #endif |
| 928 | 930 | ||
| 929 | #define proc_trap() asm volatile("trap") | 931 | #define proc_trap() asm volatile("trap") |
| 930 | 932 | ||
| 931 | #ifdef CONFIG_PPC64 | 933 | #ifdef CONFIG_PPC64 |
| 932 | 934 | ||
| 933 | extern void ppc64_runlatch_on(void); | 935 | extern void ppc64_runlatch_on(void); |
| 934 | extern void ppc64_runlatch_off(void); | 936 | extern void ppc64_runlatch_off(void); |
| 935 | 937 | ||
| 936 | extern unsigned long scom970_read(unsigned int address); | 938 | extern unsigned long scom970_read(unsigned int address); |
| 937 | extern void scom970_write(unsigned int address, unsigned long value); | 939 | extern void scom970_write(unsigned int address, unsigned long value); |
| 938 | 940 | ||
| 939 | #else | 941 | #else |
| 940 | #define ppc64_runlatch_on() | 942 | #define ppc64_runlatch_on() |
| 941 | #define ppc64_runlatch_off() | 943 | #define ppc64_runlatch_off() |
| 942 | 944 | ||
| 943 | #endif /* CONFIG_PPC64 */ | 945 | #endif /* CONFIG_PPC64 */ |
| 944 | 946 | ||
| 945 | #define __get_SP() ({unsigned long sp; \ | 947 | #define __get_SP() ({unsigned long sp; \ |
| 946 | asm volatile("mr %0,1": "=r" (sp)); sp;}) | 948 | asm volatile("mr %0,1": "=r" (sp)); sp;}) |
| 947 | 949 | ||
| 948 | struct pt_regs; | 950 | struct pt_regs; |
| 949 | 951 | ||
| 950 | extern void ppc_save_regs(struct pt_regs *regs); | 952 | extern void ppc_save_regs(struct pt_regs *regs); |
| 951 | 953 | ||
| 952 | #endif /* __ASSEMBLY__ */ | 954 | #endif /* __ASSEMBLY__ */ |
| 953 | #endif /* __KERNEL__ */ | 955 | #endif /* __KERNEL__ */ |
| 954 | #endif /* _ASM_POWERPC_REG_H */ | 956 | #endif /* _ASM_POWERPC_REG_H */ |
| 955 | 957 |
arch/powerpc/include/asm/trace.h
| File was created | 1 | #undef TRACE_SYSTEM | |
| 2 | #define TRACE_SYSTEM powerpc | ||
| 3 | |||
| 4 | #if !defined(_TRACE_POWERPC_H) || defined(TRACE_HEADER_MULTI_READ) | ||
| 5 | #define _TRACE_POWERPC_H | ||
| 6 | |||
| 7 | #include <linux/tracepoint.h> | ||
| 8 | |||
| 9 | struct pt_regs; | ||
| 10 | |||
| 11 | TRACE_EVENT(irq_entry, | ||
| 12 | |||
| 13 | TP_PROTO(struct pt_regs *regs), | ||
| 14 | |||
| 15 | TP_ARGS(regs), | ||
| 16 | |||
| 17 | TP_STRUCT__entry( | ||
| 18 | __field(struct pt_regs *, regs) | ||
| 19 | ), | ||
| 20 | |||
| 21 | TP_fast_assign( | ||
| 22 | __entry->regs = regs; | ||
| 23 | ), | ||
| 24 | |||
| 25 | TP_printk("pt_regs=%p", __entry->regs) | ||
| 26 | ); | ||
| 27 | |||
| 28 | TRACE_EVENT(irq_exit, | ||
| 29 | |||
| 30 | TP_PROTO(struct pt_regs *regs), | ||
| 31 | |||
| 32 | TP_ARGS(regs), | ||
| 33 | |||
| 34 | TP_STRUCT__entry( | ||
| 35 | __field(struct pt_regs *, regs) | ||
| 36 | ), | ||
| 37 | |||
| 38 | TP_fast_assign( | ||
| 39 | __entry->regs = regs; | ||
| 40 | ), | ||
| 41 | |||
| 42 | TP_printk("pt_regs=%p", __entry->regs) | ||
| 43 | ); | ||
| 44 | |||
| 45 | TRACE_EVENT(timer_interrupt_entry, | ||
| 46 | |||
| 47 | TP_PROTO(struct pt_regs *regs), | ||
| 48 | |||
| 49 | TP_ARGS(regs), | ||
| 50 | |||
| 51 | TP_STRUCT__entry( | ||
| 52 | __field(struct pt_regs *, regs) | ||
| 53 | ), | ||
| 54 | |||
| 55 | TP_fast_assign( | ||
| 56 | __entry->regs = regs; | ||
| 57 | ), | ||
| 58 | |||
| 59 | TP_printk("pt_regs=%p", __entry->regs) | ||
| 60 | ); | ||
| 61 | |||
| 62 | TRACE_EVENT(timer_interrupt_exit, | ||
| 63 | |||
| 64 | TP_PROTO(struct pt_regs *regs), | ||
| 65 | |||
| 66 | TP_ARGS(regs), | ||
| 67 | |||
| 68 | TP_STRUCT__entry( | ||
| 69 | __field(struct pt_regs *, regs) | ||
| 70 | ), | ||
| 71 | |||
| 72 | TP_fast_assign( | ||
| 73 | __entry->regs = regs; | ||
| 74 | ), | ||
| 75 | |||
| 76 | TP_printk("pt_regs=%p", __entry->regs) | ||
| 77 | ); | ||
| 78 | |||
| 79 | #ifdef CONFIG_PPC_PSERIES | ||
| 80 | extern void hcall_tracepoint_regfunc(void); | ||
| 81 | extern void hcall_tracepoint_unregfunc(void); | ||
| 82 | |||
| 83 | TRACE_EVENT_FN(hcall_entry, | ||
| 84 | |||
| 85 | TP_PROTO(unsigned long opcode, unsigned long *args), | ||
| 86 | |||
| 87 | TP_ARGS(opcode, args), | ||
| 88 | |||
| 89 | TP_STRUCT__entry( | ||
| 90 | __field(unsigned long, opcode) | ||
| 91 | ), | ||
| 92 | |||
| 93 | TP_fast_assign( | ||
| 94 | __entry->opcode = opcode; | ||
| 95 | ), | ||
| 96 | |||
| 97 | TP_printk("opcode=%lu", __entry->opcode), | ||
| 98 | |||
| 99 | hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc | ||
| 100 | ); | ||
| 101 | |||
| 102 | TRACE_EVENT_FN(hcall_exit, | ||
| 103 | |||
| 104 | TP_PROTO(unsigned long opcode, unsigned long retval, | ||
| 105 | unsigned long *retbuf), | ||
| 106 | |||
| 107 | TP_ARGS(opcode, retval, retbuf), | ||
| 108 | |||
| 109 | TP_STRUCT__entry( | ||
| 110 | __field(unsigned long, opcode) | ||
| 111 | __field(unsigned long, retval) | ||
| 112 | ), | ||
| 113 | |||
| 114 | TP_fast_assign( | ||
| 115 | __entry->opcode = opcode; | ||
| 116 | __entry->retval = retval; | ||
| 117 | ), | ||
| 118 | |||
| 119 | TP_printk("opcode=%lu retval=%lu", __entry->opcode, __entry->retval), | ||
| 120 | |||
| 121 | hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc | ||
| 122 | ); | ||
| 123 | #endif | ||
| 124 | |||
| 125 | #endif /* _TRACE_POWERPC_H */ | ||
| 126 | |||
| 127 | #undef TRACE_INCLUDE_PATH | ||
| 128 | #undef TRACE_INCLUDE_FILE | ||
| 129 | |||
| 130 | #define TRACE_INCLUDE_PATH asm | ||
| 131 | #define TRACE_INCLUDE_FILE trace | ||
| 132 | |||
| 133 | #include <trace/define_trace.h> | ||
| 134 |
arch/powerpc/kernel/align.c
| 1 | /* align.c - handle alignment exceptions for the Power PC. | 1 | /* align.c - handle alignment exceptions for the Power PC. |
| 2 | * | 2 | * |
| 3 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> | 3 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> |
| 4 | * Copyright (c) 1998-1999 TiVo, Inc. | 4 | * Copyright (c) 1998-1999 TiVo, Inc. |
| 5 | * PowerPC 403GCX modifications. | 5 | * PowerPC 403GCX modifications. |
| 6 | * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> | 6 | * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> |
| 7 | * PowerPC 403GCX/405GP modifications. | 7 | * PowerPC 403GCX/405GP modifications. |
| 8 | * Copyright (c) 2001-2002 PPC64 team, IBM Corp | 8 | * Copyright (c) 2001-2002 PPC64 team, IBM Corp |
| 9 | * 64-bit and Power4 support | 9 | * 64-bit and Power4 support |
| 10 | * Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp | 10 | * Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp |
| 11 | * <benh@kernel.crashing.org> | 11 | * <benh@kernel.crashing.org> |
| 12 | * Merge ppc32 and ppc64 implementations | 12 | * Merge ppc32 and ppc64 implementations |
| 13 | * | 13 | * |
| 14 | * This program is free software; you can redistribute it and/or | 14 | * This program is free software; you can redistribute it and/or |
| 15 | * modify it under the terms of the GNU General Public License | 15 | * modify it under the terms of the GNU General Public License |
| 16 | * as published by the Free Software Foundation; either version | 16 | * as published by the Free Software Foundation; either version |
| 17 | * 2 of the License, or (at your option) any later version. | 17 | * 2 of the License, or (at your option) any later version. |
| 18 | */ | 18 | */ |
| 19 | 19 | ||
| 20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
| 21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
| 22 | #include <asm/processor.h> | 22 | #include <asm/processor.h> |
| 23 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
| 24 | #include <asm/system.h> | 24 | #include <asm/system.h> |
| 25 | #include <asm/cache.h> | 25 | #include <asm/cache.h> |
| 26 | #include <asm/cputable.h> | 26 | #include <asm/cputable.h> |
| 27 | #include <asm/emulated_ops.h> | 27 | #include <asm/emulated_ops.h> |
| 28 | 28 | ||
| 29 | struct aligninfo { | 29 | struct aligninfo { |
| 30 | unsigned char len; | 30 | unsigned char len; |
| 31 | unsigned char flags; | 31 | unsigned char flags; |
| 32 | }; | 32 | }; |
| 33 | 33 | ||
| 34 | #define IS_XFORM(inst) (((inst) >> 26) == 31) | 34 | #define IS_XFORM(inst) (((inst) >> 26) == 31) |
| 35 | #define IS_DSFORM(inst) (((inst) >> 26) >= 56) | 35 | #define IS_DSFORM(inst) (((inst) >> 26) >= 56) |
| 36 | 36 | ||
| 37 | #define INVALID { 0, 0 } | 37 | #define INVALID { 0, 0 } |
| 38 | 38 | ||
| 39 | /* Bits in the flags field */ | 39 | /* Bits in the flags field */ |
| 40 | #define LD 0 /* load */ | 40 | #define LD 0 /* load */ |
| 41 | #define ST 1 /* store */ | 41 | #define ST 1 /* store */ |
| 42 | #define SE 2 /* sign-extend value, or FP ld/st as word */ | 42 | #define SE 2 /* sign-extend value, or FP ld/st as word */ |
| 43 | #define F 4 /* to/from fp regs */ | 43 | #define F 4 /* to/from fp regs */ |
| 44 | #define U 8 /* update index register */ | 44 | #define U 8 /* update index register */ |
| 45 | #define M 0x10 /* multiple load/store */ | 45 | #define M 0x10 /* multiple load/store */ |
| 46 | #define SW 0x20 /* byte swap */ | 46 | #define SW 0x20 /* byte swap */ |
| 47 | #define S 0x40 /* single-precision fp or... */ | 47 | #define S 0x40 /* single-precision fp or... */ |
| 48 | #define SX 0x40 /* ... byte count in XER */ | 48 | #define SX 0x40 /* ... byte count in XER */ |
| 49 | #define HARD 0x80 /* string, stwcx. */ | 49 | #define HARD 0x80 /* string, stwcx. */ |
| 50 | #define E4 0x40 /* SPE endianness is word */ | 50 | #define E4 0x40 /* SPE endianness is word */ |
| 51 | #define E8 0x80 /* SPE endianness is double word */ | 51 | #define E8 0x80 /* SPE endianness is double word */ |
| 52 | #define SPLT 0x80 /* VSX SPLAT load */ | 52 | #define SPLT 0x80 /* VSX SPLAT load */ |
| 53 | 53 | ||
| 54 | /* DSISR bits reported for a DCBZ instruction: */ | 54 | /* DSISR bits reported for a DCBZ instruction: */ |
| 55 | #define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */ | 55 | #define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */ |
| 56 | 56 | ||
| 57 | #define SWAP(a, b) (t = (a), (a) = (b), (b) = t) | 57 | #define SWAP(a, b) (t = (a), (a) = (b), (b) = t) |
| 58 | 58 | ||
| 59 | /* | 59 | /* |
| 60 | * The PowerPC stores certain bits of the instruction that caused the | 60 | * The PowerPC stores certain bits of the instruction that caused the |
| 61 | * alignment exception in the DSISR register. This array maps those | 61 | * alignment exception in the DSISR register. This array maps those |
| 62 | * bits to information about the operand length and what the | 62 | * bits to information about the operand length and what the |
| 63 | * instruction would do. | 63 | * instruction would do. |
| 64 | */ | 64 | */ |
| 65 | static struct aligninfo aligninfo[128] = { | 65 | static struct aligninfo aligninfo[128] = { |
| 66 | { 4, LD }, /* 00 0 0000: lwz / lwarx */ | 66 | { 4, LD }, /* 00 0 0000: lwz / lwarx */ |
| 67 | INVALID, /* 00 0 0001 */ | 67 | INVALID, /* 00 0 0001 */ |
| 68 | { 4, ST }, /* 00 0 0010: stw */ | 68 | { 4, ST }, /* 00 0 0010: stw */ |
| 69 | INVALID, /* 00 0 0011 */ | 69 | INVALID, /* 00 0 0011 */ |
| 70 | { 2, LD }, /* 00 0 0100: lhz */ | 70 | { 2, LD }, /* 00 0 0100: lhz */ |
| 71 | { 2, LD+SE }, /* 00 0 0101: lha */ | 71 | { 2, LD+SE }, /* 00 0 0101: lha */ |
| 72 | { 2, ST }, /* 00 0 0110: sth */ | 72 | { 2, ST }, /* 00 0 0110: sth */ |
| 73 | { 4, LD+M }, /* 00 0 0111: lmw */ | 73 | { 4, LD+M }, /* 00 0 0111: lmw */ |
| 74 | { 4, LD+F+S }, /* 00 0 1000: lfs */ | 74 | { 4, LD+F+S }, /* 00 0 1000: lfs */ |
| 75 | { 8, LD+F }, /* 00 0 1001: lfd */ | 75 | { 8, LD+F }, /* 00 0 1001: lfd */ |
| 76 | { 4, ST+F+S }, /* 00 0 1010: stfs */ | 76 | { 4, ST+F+S }, /* 00 0 1010: stfs */ |
| 77 | { 8, ST+F }, /* 00 0 1011: stfd */ | 77 | { 8, ST+F }, /* 00 0 1011: stfd */ |
| 78 | INVALID, /* 00 0 1100 */ | 78 | INVALID, /* 00 0 1100 */ |
| 79 | { 8, LD }, /* 00 0 1101: ld/ldu/lwa */ | 79 | { 8, LD }, /* 00 0 1101: ld/ldu/lwa */ |
| 80 | INVALID, /* 00 0 1110 */ | 80 | INVALID, /* 00 0 1110 */ |
| 81 | { 8, ST }, /* 00 0 1111: std/stdu */ | 81 | { 8, ST }, /* 00 0 1111: std/stdu */ |
| 82 | { 4, LD+U }, /* 00 1 0000: lwzu */ | 82 | { 4, LD+U }, /* 00 1 0000: lwzu */ |
| 83 | INVALID, /* 00 1 0001 */ | 83 | INVALID, /* 00 1 0001 */ |
| 84 | { 4, ST+U }, /* 00 1 0010: stwu */ | 84 | { 4, ST+U }, /* 00 1 0010: stwu */ |
| 85 | INVALID, /* 00 1 0011 */ | 85 | INVALID, /* 00 1 0011 */ |
| 86 | { 2, LD+U }, /* 00 1 0100: lhzu */ | 86 | { 2, LD+U }, /* 00 1 0100: lhzu */ |
| 87 | { 2, LD+SE+U }, /* 00 1 0101: lhau */ | 87 | { 2, LD+SE+U }, /* 00 1 0101: lhau */ |
| 88 | { 2, ST+U }, /* 00 1 0110: sthu */ | 88 | { 2, ST+U }, /* 00 1 0110: sthu */ |
| 89 | { 4, ST+M }, /* 00 1 0111: stmw */ | 89 | { 4, ST+M }, /* 00 1 0111: stmw */ |
| 90 | { 4, LD+F+S+U }, /* 00 1 1000: lfsu */ | 90 | { 4, LD+F+S+U }, /* 00 1 1000: lfsu */ |
| 91 | { 8, LD+F+U }, /* 00 1 1001: lfdu */ | 91 | { 8, LD+F+U }, /* 00 1 1001: lfdu */ |
| 92 | { 4, ST+F+S+U }, /* 00 1 1010: stfsu */ | 92 | { 4, ST+F+S+U }, /* 00 1 1010: stfsu */ |
| 93 | { 8, ST+F+U }, /* 00 1 1011: stfdu */ | 93 | { 8, ST+F+U }, /* 00 1 1011: stfdu */ |
| 94 | { 16, LD+F }, /* 00 1 1100: lfdp */ | 94 | { 16, LD+F }, /* 00 1 1100: lfdp */ |
| 95 | INVALID, /* 00 1 1101 */ | 95 | INVALID, /* 00 1 1101 */ |
| 96 | { 16, ST+F }, /* 00 1 1110: stfdp */ | 96 | { 16, ST+F }, /* 00 1 1110: stfdp */ |
| 97 | INVALID, /* 00 1 1111 */ | 97 | INVALID, /* 00 1 1111 */ |
| 98 | { 8, LD }, /* 01 0 0000: ldx */ | 98 | { 8, LD }, /* 01 0 0000: ldx */ |
| 99 | INVALID, /* 01 0 0001 */ | 99 | INVALID, /* 01 0 0001 */ |
| 100 | { 8, ST }, /* 01 0 0010: stdx */ | 100 | { 8, ST }, /* 01 0 0010: stdx */ |
| 101 | INVALID, /* 01 0 0011 */ | 101 | INVALID, /* 01 0 0011 */ |
| 102 | INVALID, /* 01 0 0100 */ | 102 | INVALID, /* 01 0 0100 */ |
| 103 | { 4, LD+SE }, /* 01 0 0101: lwax */ | 103 | { 4, LD+SE }, /* 01 0 0101: lwax */ |
| 104 | INVALID, /* 01 0 0110 */ | 104 | INVALID, /* 01 0 0110 */ |
| 105 | INVALID, /* 01 0 0111 */ | 105 | INVALID, /* 01 0 0111 */ |
| 106 | { 4, LD+M+HARD+SX }, /* 01 0 1000: lswx */ | 106 | { 4, LD+M+HARD+SX }, /* 01 0 1000: lswx */ |
| 107 | { 4, LD+M+HARD }, /* 01 0 1001: lswi */ | 107 | { 4, LD+M+HARD }, /* 01 0 1001: lswi */ |
| 108 | { 4, ST+M+HARD+SX }, /* 01 0 1010: stswx */ | 108 | { 4, ST+M+HARD+SX }, /* 01 0 1010: stswx */ |
| 109 | { 4, ST+M+HARD }, /* 01 0 1011: stswi */ | 109 | { 4, ST+M+HARD }, /* 01 0 1011: stswi */ |
| 110 | INVALID, /* 01 0 1100 */ | 110 | INVALID, /* 01 0 1100 */ |
| 111 | { 8, LD+U }, /* 01 0 1101: ldu */ | 111 | { 8, LD+U }, /* 01 0 1101: ldu */ |
| 112 | INVALID, /* 01 0 1110 */ | 112 | INVALID, /* 01 0 1110 */ |
| 113 | { 8, ST+U }, /* 01 0 1111: stdu */ | 113 | { 8, ST+U }, /* 01 0 1111: stdu */ |
| 114 | { 8, LD+U }, /* 01 1 0000: ldux */ | 114 | { 8, LD+U }, /* 01 1 0000: ldux */ |
| 115 | INVALID, /* 01 1 0001 */ | 115 | INVALID, /* 01 1 0001 */ |
| 116 | { 8, ST+U }, /* 01 1 0010: stdux */ | 116 | { 8, ST+U }, /* 01 1 0010: stdux */ |
| 117 | INVALID, /* 01 1 0011 */ | 117 | INVALID, /* 01 1 0011 */ |
| 118 | INVALID, /* 01 1 0100 */ | 118 | INVALID, /* 01 1 0100 */ |
| 119 | { 4, LD+SE+U }, /* 01 1 0101: lwaux */ | 119 | { 4, LD+SE+U }, /* 01 1 0101: lwaux */ |
| 120 | INVALID, /* 01 1 0110 */ | 120 | INVALID, /* 01 1 0110 */ |
| 121 | INVALID, /* 01 1 0111 */ | 121 | INVALID, /* 01 1 0111 */ |
| 122 | INVALID, /* 01 1 1000 */ | 122 | INVALID, /* 01 1 1000 */ |
| 123 | INVALID, /* 01 1 1001 */ | 123 | INVALID, /* 01 1 1001 */ |
| 124 | INVALID, /* 01 1 1010 */ | 124 | INVALID, /* 01 1 1010 */ |
| 125 | INVALID, /* 01 1 1011 */ | 125 | INVALID, /* 01 1 1011 */ |
| 126 | INVALID, /* 01 1 1100 */ | 126 | INVALID, /* 01 1 1100 */ |
| 127 | INVALID, /* 01 1 1101 */ | 127 | INVALID, /* 01 1 1101 */ |
| 128 | INVALID, /* 01 1 1110 */ | 128 | INVALID, /* 01 1 1110 */ |
| 129 | INVALID, /* 01 1 1111 */ | 129 | INVALID, /* 01 1 1111 */ |
| 130 | INVALID, /* 10 0 0000 */ | 130 | INVALID, /* 10 0 0000 */ |
| 131 | INVALID, /* 10 0 0001 */ | 131 | INVALID, /* 10 0 0001 */ |
| 132 | INVALID, /* 10 0 0010: stwcx. */ | 132 | INVALID, /* 10 0 0010: stwcx. */ |
| 133 | INVALID, /* 10 0 0011 */ | 133 | INVALID, /* 10 0 0011 */ |
| 134 | INVALID, /* 10 0 0100 */ | 134 | INVALID, /* 10 0 0100 */ |
| 135 | INVALID, /* 10 0 0101 */ | 135 | INVALID, /* 10 0 0101 */ |
| 136 | INVALID, /* 10 0 0110 */ | 136 | INVALID, /* 10 0 0110 */ |
| 137 | INVALID, /* 10 0 0111 */ | 137 | INVALID, /* 10 0 0111 */ |
| 138 | { 4, LD+SW }, /* 10 0 1000: lwbrx */ | 138 | { 4, LD+SW }, /* 10 0 1000: lwbrx */ |
| 139 | INVALID, /* 10 0 1001 */ | 139 | INVALID, /* 10 0 1001 */ |
| 140 | { 4, ST+SW }, /* 10 0 1010: stwbrx */ | 140 | { 4, ST+SW }, /* 10 0 1010: stwbrx */ |
| 141 | INVALID, /* 10 0 1011 */ | 141 | INVALID, /* 10 0 1011 */ |
| 142 | { 2, LD+SW }, /* 10 0 1100: lhbrx */ | 142 | { 2, LD+SW }, /* 10 0 1100: lhbrx */ |
| 143 | { 4, LD+SE }, /* 10 0 1101 lwa */ | 143 | { 4, LD+SE }, /* 10 0 1101 lwa */ |
| 144 | { 2, ST+SW }, /* 10 0 1110: sthbrx */ | 144 | { 2, ST+SW }, /* 10 0 1110: sthbrx */ |
| 145 | INVALID, /* 10 0 1111 */ | 145 | INVALID, /* 10 0 1111 */ |
| 146 | INVALID, /* 10 1 0000 */ | 146 | INVALID, /* 10 1 0000 */ |
| 147 | INVALID, /* 10 1 0001 */ | 147 | INVALID, /* 10 1 0001 */ |
| 148 | INVALID, /* 10 1 0010 */ | 148 | INVALID, /* 10 1 0010 */ |
| 149 | INVALID, /* 10 1 0011 */ | 149 | INVALID, /* 10 1 0011 */ |
| 150 | INVALID, /* 10 1 0100 */ | 150 | INVALID, /* 10 1 0100 */ |
| 151 | INVALID, /* 10 1 0101 */ | 151 | INVALID, /* 10 1 0101 */ |
| 152 | INVALID, /* 10 1 0110 */ | 152 | INVALID, /* 10 1 0110 */ |
| 153 | INVALID, /* 10 1 0111 */ | 153 | INVALID, /* 10 1 0111 */ |
| 154 | INVALID, /* 10 1 1000 */ | 154 | INVALID, /* 10 1 1000 */ |
| 155 | INVALID, /* 10 1 1001 */ | 155 | INVALID, /* 10 1 1001 */ |
| 156 | INVALID, /* 10 1 1010 */ | 156 | INVALID, /* 10 1 1010 */ |
| 157 | INVALID, /* 10 1 1011 */ | 157 | INVALID, /* 10 1 1011 */ |
| 158 | INVALID, /* 10 1 1100 */ | 158 | INVALID, /* 10 1 1100 */ |
| 159 | INVALID, /* 10 1 1101 */ | 159 | INVALID, /* 10 1 1101 */ |
| 160 | INVALID, /* 10 1 1110 */ | 160 | INVALID, /* 10 1 1110 */ |
| 161 | { 0, ST+HARD }, /* 10 1 1111: dcbz */ | 161 | { 0, ST+HARD }, /* 10 1 1111: dcbz */ |
| 162 | { 4, LD }, /* 11 0 0000: lwzx */ | 162 | { 4, LD }, /* 11 0 0000: lwzx */ |
| 163 | INVALID, /* 11 0 0001 */ | 163 | INVALID, /* 11 0 0001 */ |
| 164 | { 4, ST }, /* 11 0 0010: stwx */ | 164 | { 4, ST }, /* 11 0 0010: stwx */ |
| 165 | INVALID, /* 11 0 0011 */ | 165 | INVALID, /* 11 0 0011 */ |
| 166 | { 2, LD }, /* 11 0 0100: lhzx */ | 166 | { 2, LD }, /* 11 0 0100: lhzx */ |
| 167 | { 2, LD+SE }, /* 11 0 0101: lhax */ | 167 | { 2, LD+SE }, /* 11 0 0101: lhax */ |
| 168 | { 2, ST }, /* 11 0 0110: sthx */ | 168 | { 2, ST }, /* 11 0 0110: sthx */ |
| 169 | INVALID, /* 11 0 0111 */ | 169 | INVALID, /* 11 0 0111 */ |
| 170 | { 4, LD+F+S }, /* 11 0 1000: lfsx */ | 170 | { 4, LD+F+S }, /* 11 0 1000: lfsx */ |
| 171 | { 8, LD+F }, /* 11 0 1001: lfdx */ | 171 | { 8, LD+F }, /* 11 0 1001: lfdx */ |
| 172 | { 4, ST+F+S }, /* 11 0 1010: stfsx */ | 172 | { 4, ST+F+S }, /* 11 0 1010: stfsx */ |
| 173 | { 8, ST+F }, /* 11 0 1011: stfdx */ | 173 | { 8, ST+F }, /* 11 0 1011: stfdx */ |
| 174 | { 16, LD+F }, /* 11 0 1100: lfdpx */ | 174 | { 16, LD+F }, /* 11 0 1100: lfdpx */ |
| 175 | { 4, LD+F+SE }, /* 11 0 1101: lfiwax */ | 175 | { 4, LD+F+SE }, /* 11 0 1101: lfiwax */ |
| 176 | { 16, ST+F }, /* 11 0 1110: stfdpx */ | 176 | { 16, ST+F }, /* 11 0 1110: stfdpx */ |
| 177 | { 4, ST+F }, /* 11 0 1111: stfiwx */ | 177 | { 4, ST+F }, /* 11 0 1111: stfiwx */ |
| 178 | { 4, LD+U }, /* 11 1 0000: lwzux */ | 178 | { 4, LD+U }, /* 11 1 0000: lwzux */ |
| 179 | INVALID, /* 11 1 0001 */ | 179 | INVALID, /* 11 1 0001 */ |
| 180 | { 4, ST+U }, /* 11 1 0010: stwux */ | 180 | { 4, ST+U }, /* 11 1 0010: stwux */ |
| 181 | INVALID, /* 11 1 0011 */ | 181 | INVALID, /* 11 1 0011 */ |
| 182 | { 2, LD+U }, /* 11 1 0100: lhzux */ | 182 | { 2, LD+U }, /* 11 1 0100: lhzux */ |
| 183 | { 2, LD+SE+U }, /* 11 1 0101: lhaux */ | 183 | { 2, LD+SE+U }, /* 11 1 0101: lhaux */ |
| 184 | { 2, ST+U }, /* 11 1 0110: sthux */ | 184 | { 2, ST+U }, /* 11 1 0110: sthux */ |
| 185 | INVALID, /* 11 1 0111 */ | 185 | INVALID, /* 11 1 0111 */ |
| 186 | { 4, LD+F+S+U }, /* 11 1 1000: lfsux */ | 186 | { 4, LD+F+S+U }, /* 11 1 1000: lfsux */ |
| 187 | { 8, LD+F+U }, /* 11 1 1001: lfdux */ | 187 | { 8, LD+F+U }, /* 11 1 1001: lfdux */ |
| 188 | { 4, ST+F+S+U }, /* 11 1 1010: stfsux */ | 188 | { 4, ST+F+S+U }, /* 11 1 1010: stfsux */ |
| 189 | { 8, ST+F+U }, /* 11 1 1011: stfdux */ | 189 | { 8, ST+F+U }, /* 11 1 1011: stfdux */ |
| 190 | INVALID, /* 11 1 1100 */ | 190 | INVALID, /* 11 1 1100 */ |
| 191 | { 4, LD+F }, /* 11 1 1101: lfiwzx */ | 191 | { 4, LD+F }, /* 11 1 1101: lfiwzx */ |
| 192 | INVALID, /* 11 1 1110 */ | 192 | INVALID, /* 11 1 1110 */ |
| 193 | INVALID, /* 11 1 1111 */ | 193 | INVALID, /* 11 1 1111 */ |
| 194 | }; | 194 | }; |
| 195 | 195 | ||
| 196 | /* | 196 | /* |
| 197 | * Create a DSISR value from the instruction | 197 | * Create a DSISR value from the instruction |
| 198 | */ | 198 | */ |
| 199 | static inline unsigned make_dsisr(unsigned instr) | 199 | static inline unsigned make_dsisr(unsigned instr) |
| 200 | { | 200 | { |
| 201 | unsigned dsisr; | 201 | unsigned dsisr; |
| 202 | 202 | ||
| 203 | 203 | ||
| 204 | /* bits 6:15 --> 22:31 */ | 204 | /* bits 6:15 --> 22:31 */ |
| 205 | dsisr = (instr & 0x03ff0000) >> 16; | 205 | dsisr = (instr & 0x03ff0000) >> 16; |
| 206 | 206 | ||
| 207 | if (IS_XFORM(instr)) { | 207 | if (IS_XFORM(instr)) { |
| 208 | /* bits 29:30 --> 15:16 */ | 208 | /* bits 29:30 --> 15:16 */ |
| 209 | dsisr |= (instr & 0x00000006) << 14; | 209 | dsisr |= (instr & 0x00000006) << 14; |
| 210 | /* bit 25 --> 17 */ | 210 | /* bit 25 --> 17 */ |
| 211 | dsisr |= (instr & 0x00000040) << 8; | 211 | dsisr |= (instr & 0x00000040) << 8; |
| 212 | /* bits 21:24 --> 18:21 */ | 212 | /* bits 21:24 --> 18:21 */ |
| 213 | dsisr |= (instr & 0x00000780) << 3; | 213 | dsisr |= (instr & 0x00000780) << 3; |
| 214 | } else { | 214 | } else { |
| 215 | /* bit 5 --> 17 */ | 215 | /* bit 5 --> 17 */ |
| 216 | dsisr |= (instr & 0x04000000) >> 12; | 216 | dsisr |= (instr & 0x04000000) >> 12; |
| 217 | /* bits 1: 4 --> 18:21 */ | 217 | /* bits 1: 4 --> 18:21 */ |
| 218 | dsisr |= (instr & 0x78000000) >> 17; | 218 | dsisr |= (instr & 0x78000000) >> 17; |
| 219 | /* bits 30:31 --> 12:13 */ | 219 | /* bits 30:31 --> 12:13 */ |
| 220 | if (IS_DSFORM(instr)) | 220 | if (IS_DSFORM(instr)) |
| 221 | dsisr |= (instr & 0x00000003) << 18; | 221 | dsisr |= (instr & 0x00000003) << 18; |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | return dsisr; | 224 | return dsisr; |
| 225 | } | 225 | } |
| 226 | 226 | ||
| 227 | /* | 227 | /* |
| 228 | * The dcbz (data cache block zero) instruction | 228 | * The dcbz (data cache block zero) instruction |
| 229 | * gives an alignment fault if used on non-cacheable | 229 | * gives an alignment fault if used on non-cacheable |
| 230 | * memory. We handle the fault mainly for the | 230 | * memory. We handle the fault mainly for the |
| 231 | * case when we are running with the cache disabled | 231 | * case when we are running with the cache disabled |
| 232 | * for debugging. | 232 | * for debugging. |
| 233 | */ | 233 | */ |
| 234 | static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr) | 234 | static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr) |
| 235 | { | 235 | { |
| 236 | long __user *p; | 236 | long __user *p; |
| 237 | int i, size; | 237 | int i, size; |
| 238 | 238 | ||
| 239 | #ifdef __powerpc64__ | 239 | #ifdef __powerpc64__ |
| 240 | size = ppc64_caches.dline_size; | 240 | size = ppc64_caches.dline_size; |
| 241 | #else | 241 | #else |
| 242 | size = L1_CACHE_BYTES; | 242 | size = L1_CACHE_BYTES; |
| 243 | #endif | 243 | #endif |
| 244 | p = (long __user *) (regs->dar & -size); | 244 | p = (long __user *) (regs->dar & -size); |
| 245 | if (user_mode(regs) && !access_ok(VERIFY_WRITE, p, size)) | 245 | if (user_mode(regs) && !access_ok(VERIFY_WRITE, p, size)) |
| 246 | return -EFAULT; | 246 | return -EFAULT; |
| 247 | for (i = 0; i < size / sizeof(long); ++i) | 247 | for (i = 0; i < size / sizeof(long); ++i) |
| 248 | if (__put_user_inatomic(0, p+i)) | 248 | if (__put_user_inatomic(0, p+i)) |
| 249 | return -EFAULT; | 249 | return -EFAULT; |
| 250 | return 1; | 250 | return 1; |
| 251 | } | 251 | } |
| 252 | 252 | ||
| 253 | /* | 253 | /* |
| 254 | * Emulate load & store multiple instructions | 254 | * Emulate load & store multiple instructions |
| 255 | * On 64-bit machines, these instructions only affect/use the | 255 | * On 64-bit machines, these instructions only affect/use the |
| 256 | * bottom 4 bytes of each register, and the loads clear the | 256 | * bottom 4 bytes of each register, and the loads clear the |
| 257 | * top 4 bytes of the affected register. | 257 | * top 4 bytes of the affected register. |
| 258 | */ | 258 | */ |
| 259 | #ifdef CONFIG_PPC64 | 259 | #ifdef CONFIG_PPC64 |
| 260 | #define REG_BYTE(rp, i) *((u8 *)((rp) + ((i) >> 2)) + ((i) & 3) + 4) | 260 | #define REG_BYTE(rp, i) *((u8 *)((rp) + ((i) >> 2)) + ((i) & 3) + 4) |
| 261 | #else | 261 | #else |
| 262 | #define REG_BYTE(rp, i) *((u8 *)(rp) + (i)) | 262 | #define REG_BYTE(rp, i) *((u8 *)(rp) + (i)) |
| 263 | #endif | 263 | #endif |
| 264 | 264 | ||
| 265 | #define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz)) | 265 | #define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz)) |
| 266 | 266 | ||
| 267 | static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr, | 267 | static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr, |
| 268 | unsigned int reg, unsigned int nb, | 268 | unsigned int reg, unsigned int nb, |
| 269 | unsigned int flags, unsigned int instr, | 269 | unsigned int flags, unsigned int instr, |
| 270 | unsigned long swiz) | 270 | unsigned long swiz) |
| 271 | { | 271 | { |
| 272 | unsigned long *rptr; | 272 | unsigned long *rptr; |
| 273 | unsigned int nb0, i, bswiz; | 273 | unsigned int nb0, i, bswiz; |
| 274 | unsigned long p; | 274 | unsigned long p; |
| 275 | 275 | ||
| 276 | /* | 276 | /* |
| 277 | * We do not try to emulate 8 bytes multiple as they aren't really | 277 | * We do not try to emulate 8 bytes multiple as they aren't really |
| 278 | * available in our operating environments and we don't try to | 278 | * available in our operating environments and we don't try to |
| 279 | * emulate multiples operations in kernel land as they should never | 279 | * emulate multiples operations in kernel land as they should never |
| 280 | * be used/generated there at least not on unaligned boundaries | 280 | * be used/generated there at least not on unaligned boundaries |
| 281 | */ | 281 | */ |
| 282 | if (unlikely((nb > 4) || !user_mode(regs))) | 282 | if (unlikely((nb > 4) || !user_mode(regs))) |
| 283 | return 0; | 283 | return 0; |
| 284 | 284 | ||
| 285 | /* lmw, stmw, lswi/x, stswi/x */ | 285 | /* lmw, stmw, lswi/x, stswi/x */ |
| 286 | nb0 = 0; | 286 | nb0 = 0; |
| 287 | if (flags & HARD) { | 287 | if (flags & HARD) { |
| 288 | if (flags & SX) { | 288 | if (flags & SX) { |
| 289 | nb = regs->xer & 127; | 289 | nb = regs->xer & 127; |
| 290 | if (nb == 0) | 290 | if (nb == 0) |
| 291 | return 1; | 291 | return 1; |
| 292 | } else { | 292 | } else { |
| 293 | unsigned long pc = regs->nip ^ (swiz & 4); | 293 | unsigned long pc = regs->nip ^ (swiz & 4); |
| 294 | 294 | ||
| 295 | if (__get_user_inatomic(instr, | 295 | if (__get_user_inatomic(instr, |
| 296 | (unsigned int __user *)pc)) | 296 | (unsigned int __user *)pc)) |
| 297 | return -EFAULT; | 297 | return -EFAULT; |
| 298 | if (swiz == 0 && (flags & SW)) | 298 | if (swiz == 0 && (flags & SW)) |
| 299 | instr = cpu_to_le32(instr); | 299 | instr = cpu_to_le32(instr); |
| 300 | nb = (instr >> 11) & 0x1f; | 300 | nb = (instr >> 11) & 0x1f; |
| 301 | if (nb == 0) | 301 | if (nb == 0) |
| 302 | nb = 32; | 302 | nb = 32; |
| 303 | } | 303 | } |
| 304 | if (nb + reg * 4 > 128) { | 304 | if (nb + reg * 4 > 128) { |
| 305 | nb0 = nb + reg * 4 - 128; | 305 | nb0 = nb + reg * 4 - 128; |
| 306 | nb = 128 - reg * 4; | 306 | nb = 128 - reg * 4; |
| 307 | } | 307 | } |
| 308 | } else { | 308 | } else { |
| 309 | /* lwm, stmw */ | 309 | /* lwm, stmw */ |
| 310 | nb = (32 - reg) * 4; | 310 | nb = (32 - reg) * 4; |
| 311 | } | 311 | } |
| 312 | 312 | ||
| 313 | if (!access_ok((flags & ST ? VERIFY_WRITE: VERIFY_READ), addr, nb+nb0)) | 313 | if (!access_ok((flags & ST ? VERIFY_WRITE: VERIFY_READ), addr, nb+nb0)) |
| 314 | return -EFAULT; /* bad address */ | 314 | return -EFAULT; /* bad address */ |
| 315 | 315 | ||
| 316 | rptr = ®s->gpr[reg]; | 316 | rptr = ®s->gpr[reg]; |
| 317 | p = (unsigned long) addr; | 317 | p = (unsigned long) addr; |
| 318 | bswiz = (flags & SW)? 3: 0; | 318 | bswiz = (flags & SW)? 3: 0; |
| 319 | 319 | ||
| 320 | if (!(flags & ST)) { | 320 | if (!(flags & ST)) { |
| 321 | /* | 321 | /* |
| 322 | * This zeroes the top 4 bytes of the affected registers | 322 | * This zeroes the top 4 bytes of the affected registers |
| 323 | * in 64-bit mode, and also zeroes out any remaining | 323 | * in 64-bit mode, and also zeroes out any remaining |
| 324 | * bytes of the last register for lsw*. | 324 | * bytes of the last register for lsw*. |
| 325 | */ | 325 | */ |
| 326 | memset(rptr, 0, ((nb + 3) / 4) * sizeof(unsigned long)); | 326 | memset(rptr, 0, ((nb + 3) / 4) * sizeof(unsigned long)); |
| 327 | if (nb0 > 0) | 327 | if (nb0 > 0) |
| 328 | memset(®s->gpr[0], 0, | 328 | memset(®s->gpr[0], 0, |
| 329 | ((nb0 + 3) / 4) * sizeof(unsigned long)); | 329 | ((nb0 + 3) / 4) * sizeof(unsigned long)); |
| 330 | 330 | ||
| 331 | for (i = 0; i < nb; ++i, ++p) | 331 | for (i = 0; i < nb; ++i, ++p) |
| 332 | if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz), | 332 | if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz), |
| 333 | SWIZ_PTR(p))) | 333 | SWIZ_PTR(p))) |
| 334 | return -EFAULT; | 334 | return -EFAULT; |
| 335 | if (nb0 > 0) { | 335 | if (nb0 > 0) { |
| 336 | rptr = ®s->gpr[0]; | 336 | rptr = ®s->gpr[0]; |
| 337 | addr += nb; | 337 | addr += nb; |
| 338 | for (i = 0; i < nb0; ++i, ++p) | 338 | for (i = 0; i < nb0; ++i, ++p) |
| 339 | if (__get_user_inatomic(REG_BYTE(rptr, | 339 | if (__get_user_inatomic(REG_BYTE(rptr, |
| 340 | i ^ bswiz), | 340 | i ^ bswiz), |
| 341 | SWIZ_PTR(p))) | 341 | SWIZ_PTR(p))) |
| 342 | return -EFAULT; | 342 | return -EFAULT; |
| 343 | } | 343 | } |
| 344 | 344 | ||
| 345 | } else { | 345 | } else { |
| 346 | for (i = 0; i < nb; ++i, ++p) | 346 | for (i = 0; i < nb; ++i, ++p) |
| 347 | if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz), | 347 | if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz), |
| 348 | SWIZ_PTR(p))) | 348 | SWIZ_PTR(p))) |
| 349 | return -EFAULT; | 349 | return -EFAULT; |
| 350 | if (nb0 > 0) { | 350 | if (nb0 > 0) { |
| 351 | rptr = ®s->gpr[0]; | 351 | rptr = ®s->gpr[0]; |
| 352 | addr += nb; | 352 | addr += nb; |
| 353 | for (i = 0; i < nb0; ++i, ++p) | 353 | for (i = 0; i < nb0; ++i, ++p) |
| 354 | if (__put_user_inatomic(REG_BYTE(rptr, | 354 | if (__put_user_inatomic(REG_BYTE(rptr, |
| 355 | i ^ bswiz), | 355 | i ^ bswiz), |
| 356 | SWIZ_PTR(p))) | 356 | SWIZ_PTR(p))) |
| 357 | return -EFAULT; | 357 | return -EFAULT; |
| 358 | } | 358 | } |
| 359 | } | 359 | } |
| 360 | return 1; | 360 | return 1; |
| 361 | } | 361 | } |
| 362 | 362 | ||
| 363 | /* | 363 | /* |
| 364 | * Emulate floating-point pair loads and stores. | 364 | * Emulate floating-point pair loads and stores. |
| 365 | * Only POWER6 has these instructions, and it does true little-endian, | 365 | * Only POWER6 has these instructions, and it does true little-endian, |
| 366 | * so we don't need the address swizzling. | 366 | * so we don't need the address swizzling. |
| 367 | */ | 367 | */ |
| 368 | static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg, | 368 | static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg, |
| 369 | unsigned int flags) | 369 | unsigned int flags) |
| 370 | { | 370 | { |
| 371 | char *ptr0 = (char *) ¤t->thread.TS_FPR(reg); | 371 | char *ptr0 = (char *) ¤t->thread.TS_FPR(reg); |
| 372 | char *ptr1 = (char *) ¤t->thread.TS_FPR(reg+1); | 372 | char *ptr1 = (char *) ¤t->thread.TS_FPR(reg+1); |
| 373 | int i, ret, sw = 0; | 373 | int i, ret, sw = 0; |
| 374 | 374 | ||
| 375 | if (!(flags & F)) | 375 | if (!(flags & F)) |
| 376 | return 0; | 376 | return 0; |
| 377 | if (reg & 1) | 377 | if (reg & 1) |
| 378 | return 0; /* invalid form: FRS/FRT must be even */ | 378 | return 0; /* invalid form: FRS/FRT must be even */ |
| 379 | if (flags & SW) | 379 | if (flags & SW) |
| 380 | sw = 7; | 380 | sw = 7; |
| 381 | ret = 0; | 381 | ret = 0; |
| 382 | for (i = 0; i < 8; ++i) { | 382 | for (i = 0; i < 8; ++i) { |
| 383 | if (!(flags & ST)) { | 383 | if (!(flags & ST)) { |
| 384 | ret |= __get_user(ptr0[i^sw], addr + i); | 384 | ret |= __get_user(ptr0[i^sw], addr + i); |
| 385 | ret |= __get_user(ptr1[i^sw], addr + i + 8); | 385 | ret |= __get_user(ptr1[i^sw], addr + i + 8); |
| 386 | } else { | 386 | } else { |
| 387 | ret |= __put_user(ptr0[i^sw], addr + i); | 387 | ret |= __put_user(ptr0[i^sw], addr + i); |
| 388 | ret |= __put_user(ptr1[i^sw], addr + i + 8); | 388 | ret |= __put_user(ptr1[i^sw], addr + i + 8); |
| 389 | } | 389 | } |
| 390 | } | 390 | } |
| 391 | if (ret) | 391 | if (ret) |
| 392 | return -EFAULT; | 392 | return -EFAULT; |
| 393 | return 1; /* exception handled and fixed up */ | 393 | return 1; /* exception handled and fixed up */ |
| 394 | } | 394 | } |
| 395 | 395 | ||
| 396 | #ifdef CONFIG_SPE | 396 | #ifdef CONFIG_SPE |
| 397 | 397 | ||
| 398 | static struct aligninfo spe_aligninfo[32] = { | 398 | static struct aligninfo spe_aligninfo[32] = { |
| 399 | { 8, LD+E8 }, /* 0 00 00: evldd[x] */ | 399 | { 8, LD+E8 }, /* 0 00 00: evldd[x] */ |
| 400 | { 8, LD+E4 }, /* 0 00 01: evldw[x] */ | 400 | { 8, LD+E4 }, /* 0 00 01: evldw[x] */ |
| 401 | { 8, LD }, /* 0 00 10: evldh[x] */ | 401 | { 8, LD }, /* 0 00 10: evldh[x] */ |
| 402 | INVALID, /* 0 00 11 */ | 402 | INVALID, /* 0 00 11 */ |
| 403 | { 2, LD }, /* 0 01 00: evlhhesplat[x] */ | 403 | { 2, LD }, /* 0 01 00: evlhhesplat[x] */ |
| 404 | INVALID, /* 0 01 01 */ | 404 | INVALID, /* 0 01 01 */ |
| 405 | { 2, LD }, /* 0 01 10: evlhhousplat[x] */ | 405 | { 2, LD }, /* 0 01 10: evlhhousplat[x] */ |
| 406 | { 2, LD+SE }, /* 0 01 11: evlhhossplat[x] */ | 406 | { 2, LD+SE }, /* 0 01 11: evlhhossplat[x] */ |
| 407 | { 4, LD }, /* 0 10 00: evlwhe[x] */ | 407 | { 4, LD }, /* 0 10 00: evlwhe[x] */ |
| 408 | INVALID, /* 0 10 01 */ | 408 | INVALID, /* 0 10 01 */ |
| 409 | { 4, LD }, /* 0 10 10: evlwhou[x] */ | 409 | { 4, LD }, /* 0 10 10: evlwhou[x] */ |
| 410 | { 4, LD+SE }, /* 0 10 11: evlwhos[x] */ | 410 | { 4, LD+SE }, /* 0 10 11: evlwhos[x] */ |
| 411 | { 4, LD+E4 }, /* 0 11 00: evlwwsplat[x] */ | 411 | { 4, LD+E4 }, /* 0 11 00: evlwwsplat[x] */ |
| 412 | INVALID, /* 0 11 01 */ | 412 | INVALID, /* 0 11 01 */ |
| 413 | { 4, LD }, /* 0 11 10: evlwhsplat[x] */ | 413 | { 4, LD }, /* 0 11 10: evlwhsplat[x] */ |
| 414 | INVALID, /* 0 11 11 */ | 414 | INVALID, /* 0 11 11 */ |
| 415 | 415 | ||
| 416 | { 8, ST+E8 }, /* 1 00 00: evstdd[x] */ | 416 | { 8, ST+E8 }, /* 1 00 00: evstdd[x] */ |
| 417 | { 8, ST+E4 }, /* 1 00 01: evstdw[x] */ | 417 | { 8, ST+E4 }, /* 1 00 01: evstdw[x] */ |
| 418 | { 8, ST }, /* 1 00 10: evstdh[x] */ | 418 | { 8, ST }, /* 1 00 10: evstdh[x] */ |
| 419 | INVALID, /* 1 00 11 */ | 419 | INVALID, /* 1 00 11 */ |
| 420 | INVALID, /* 1 01 00 */ | 420 | INVALID, /* 1 01 00 */ |
| 421 | INVALID, /* 1 01 01 */ | 421 | INVALID, /* 1 01 01 */ |
| 422 | INVALID, /* 1 01 10 */ | 422 | INVALID, /* 1 01 10 */ |
| 423 | INVALID, /* 1 01 11 */ | 423 | INVALID, /* 1 01 11 */ |
| 424 | { 4, ST }, /* 1 10 00: evstwhe[x] */ | 424 | { 4, ST }, /* 1 10 00: evstwhe[x] */ |
| 425 | INVALID, /* 1 10 01 */ | 425 | INVALID, /* 1 10 01 */ |
| 426 | { 4, ST }, /* 1 10 10: evstwho[x] */ | 426 | { 4, ST }, /* 1 10 10: evstwho[x] */ |
| 427 | INVALID, /* 1 10 11 */ | 427 | INVALID, /* 1 10 11 */ |
| 428 | { 4, ST+E4 }, /* 1 11 00: evstwwe[x] */ | 428 | { 4, ST+E4 }, /* 1 11 00: evstwwe[x] */ |
| 429 | INVALID, /* 1 11 01 */ | 429 | INVALID, /* 1 11 01 */ |
| 430 | { 4, ST+E4 }, /* 1 11 10: evstwwo[x] */ | 430 | { 4, ST+E4 }, /* 1 11 10: evstwwo[x] */ |
| 431 | INVALID, /* 1 11 11 */ | 431 | INVALID, /* 1 11 11 */ |
| 432 | }; | 432 | }; |
| 433 | 433 | ||
| 434 | #define EVLDD 0x00 | 434 | #define EVLDD 0x00 |
| 435 | #define EVLDW 0x01 | 435 | #define EVLDW 0x01 |
| 436 | #define EVLDH 0x02 | 436 | #define EVLDH 0x02 |
| 437 | #define EVLHHESPLAT 0x04 | 437 | #define EVLHHESPLAT 0x04 |
| 438 | #define EVLHHOUSPLAT 0x06 | 438 | #define EVLHHOUSPLAT 0x06 |
| 439 | #define EVLHHOSSPLAT 0x07 | 439 | #define EVLHHOSSPLAT 0x07 |
| 440 | #define EVLWHE 0x08 | 440 | #define EVLWHE 0x08 |
| 441 | #define EVLWHOU 0x0A | 441 | #define EVLWHOU 0x0A |
| 442 | #define EVLWHOS 0x0B | 442 | #define EVLWHOS 0x0B |
| 443 | #define EVLWWSPLAT 0x0C | 443 | #define EVLWWSPLAT 0x0C |
| 444 | #define EVLWHSPLAT 0x0E | 444 | #define EVLWHSPLAT 0x0E |
| 445 | #define EVSTDD 0x10 | 445 | #define EVSTDD 0x10 |
| 446 | #define EVSTDW 0x11 | 446 | #define EVSTDW 0x11 |
| 447 | #define EVSTDH 0x12 | 447 | #define EVSTDH 0x12 |
| 448 | #define EVSTWHE 0x18 | 448 | #define EVSTWHE 0x18 |
| 449 | #define EVSTWHO 0x1A | 449 | #define EVSTWHO 0x1A |
| 450 | #define EVSTWWE 0x1C | 450 | #define EVSTWWE 0x1C |
| 451 | #define EVSTWWO 0x1E | 451 | #define EVSTWWO 0x1E |
| 452 | 452 | ||
| 453 | /* | 453 | /* |
| 454 | * Emulate SPE loads and stores. | 454 | * Emulate SPE loads and stores. |
| 455 | * Only Book-E has these instructions, and it does true little-endian, | 455 | * Only Book-E has these instructions, and it does true little-endian, |
| 456 | * so we don't need the address swizzling. | 456 | * so we don't need the address swizzling. |
| 457 | */ | 457 | */ |
| 458 | static int emulate_spe(struct pt_regs *regs, unsigned int reg, | 458 | static int emulate_spe(struct pt_regs *regs, unsigned int reg, |
| 459 | unsigned int instr) | 459 | unsigned int instr) |
| 460 | { | 460 | { |
| 461 | int t, ret; | 461 | int t, ret; |
| 462 | union { | 462 | union { |
| 463 | u64 ll; | 463 | u64 ll; |
| 464 | u32 w[2]; | 464 | u32 w[2]; |
| 465 | u16 h[4]; | 465 | u16 h[4]; |
| 466 | u8 v[8]; | 466 | u8 v[8]; |
| 467 | } data, temp; | 467 | } data, temp; |
| 468 | unsigned char __user *p, *addr; | 468 | unsigned char __user *p, *addr; |
| 469 | unsigned long *evr = ¤t->thread.evr[reg]; | 469 | unsigned long *evr = ¤t->thread.evr[reg]; |
| 470 | unsigned int nb, flags; | 470 | unsigned int nb, flags; |
| 471 | 471 | ||
| 472 | instr = (instr >> 1) & 0x1f; | 472 | instr = (instr >> 1) & 0x1f; |
| 473 | 473 | ||
| 474 | /* DAR has the operand effective address */ | 474 | /* DAR has the operand effective address */ |
| 475 | addr = (unsigned char __user *)regs->dar; | 475 | addr = (unsigned char __user *)regs->dar; |
| 476 | 476 | ||
| 477 | nb = spe_aligninfo[instr].len; | 477 | nb = spe_aligninfo[instr].len; |
| 478 | flags = spe_aligninfo[instr].flags; | 478 | flags = spe_aligninfo[instr].flags; |
| 479 | 479 | ||
| 480 | /* Verify the address of the operand */ | 480 | /* Verify the address of the operand */ |
| 481 | if (unlikely(user_mode(regs) && | 481 | if (unlikely(user_mode(regs) && |
| 482 | !access_ok((flags & ST ? VERIFY_WRITE : VERIFY_READ), | 482 | !access_ok((flags & ST ? VERIFY_WRITE : VERIFY_READ), |
| 483 | addr, nb))) | 483 | addr, nb))) |
| 484 | return -EFAULT; | 484 | return -EFAULT; |
| 485 | 485 | ||
| 486 | /* userland only */ | 486 | /* userland only */ |
| 487 | if (unlikely(!user_mode(regs))) | 487 | if (unlikely(!user_mode(regs))) |
| 488 | return 0; | 488 | return 0; |
| 489 | 489 | ||
| 490 | flush_spe_to_thread(current); | 490 | flush_spe_to_thread(current); |
| 491 | 491 | ||
| 492 | /* If we are loading, get the data from user space, else | 492 | /* If we are loading, get the data from user space, else |
| 493 | * get it from register values | 493 | * get it from register values |
| 494 | */ | 494 | */ |
| 495 | if (flags & ST) { | 495 | if (flags & ST) { |
| 496 | data.ll = 0; | 496 | data.ll = 0; |
| 497 | switch (instr) { | 497 | switch (instr) { |
| 498 | case EVSTDD: | 498 | case EVSTDD: |
| 499 | case EVSTDW: | 499 | case EVSTDW: |
| 500 | case EVSTDH: | 500 | case EVSTDH: |
| 501 | data.w[0] = *evr; | 501 | data.w[0] = *evr; |
| 502 | data.w[1] = regs->gpr[reg]; | 502 | data.w[1] = regs->gpr[reg]; |
| 503 | break; | 503 | break; |
| 504 | case EVSTWHE: | 504 | case EVSTWHE: |
| 505 | data.h[2] = *evr >> 16; | 505 | data.h[2] = *evr >> 16; |
| 506 | data.h[3] = regs->gpr[reg] >> 16; | 506 | data.h[3] = regs->gpr[reg] >> 16; |
| 507 | break; | 507 | break; |
| 508 | case EVSTWHO: | 508 | case EVSTWHO: |
| 509 | data.h[2] = *evr & 0xffff; | 509 | data.h[2] = *evr & 0xffff; |
| 510 | data.h[3] = regs->gpr[reg] & 0xffff; | 510 | data.h[3] = regs->gpr[reg] & 0xffff; |
| 511 | break; | 511 | break; |
| 512 | case EVSTWWE: | 512 | case EVSTWWE: |
| 513 | data.w[1] = *evr; | 513 | data.w[1] = *evr; |
| 514 | break; | 514 | break; |
| 515 | case EVSTWWO: | 515 | case EVSTWWO: |
| 516 | data.w[1] = regs->gpr[reg]; | 516 | data.w[1] = regs->gpr[reg]; |
| 517 | break; | 517 | break; |
| 518 | default: | 518 | default: |
| 519 | return -EINVAL; | 519 | return -EINVAL; |
| 520 | } | 520 | } |
| 521 | } else { | 521 | } else { |
| 522 | temp.ll = data.ll = 0; | 522 | temp.ll = data.ll = 0; |
| 523 | ret = 0; | 523 | ret = 0; |
| 524 | p = addr; | 524 | p = addr; |
| 525 | 525 | ||
| 526 | switch (nb) { | 526 | switch (nb) { |
| 527 | case 8: | 527 | case 8: |
| 528 | ret |= __get_user_inatomic(temp.v[0], p++); | 528 | ret |= __get_user_inatomic(temp.v[0], p++); |
| 529 | ret |= __get_user_inatomic(temp.v[1], p++); | 529 | ret |= __get_user_inatomic(temp.v[1], p++); |
| 530 | ret |= __get_user_inatomic(temp.v[2], p++); | 530 | ret |= __get_user_inatomic(temp.v[2], p++); |
| 531 | ret |= __get_user_inatomic(temp.v[3], p++); | 531 | ret |= __get_user_inatomic(temp.v[3], p++); |
| 532 | case 4: | 532 | case 4: |
| 533 | ret |= __get_user_inatomic(temp.v[4], p++); | 533 | ret |= __get_user_inatomic(temp.v[4], p++); |
| 534 | ret |= __get_user_inatomic(temp.v[5], p++); | 534 | ret |= __get_user_inatomic(temp.v[5], p++); |
| 535 | case 2: | 535 | case 2: |
| 536 | ret |= __get_user_inatomic(temp.v[6], p++); | 536 | ret |= __get_user_inatomic(temp.v[6], p++); |
| 537 | ret |= __get_user_inatomic(temp.v[7], p++); | 537 | ret |= __get_user_inatomic(temp.v[7], p++); |
| 538 | if (unlikely(ret)) | 538 | if (unlikely(ret)) |
| 539 | return -EFAULT; | 539 | return -EFAULT; |
| 540 | } | 540 | } |
| 541 | 541 | ||
| 542 | switch (instr) { | 542 | switch (instr) { |
| 543 | case EVLDD: | 543 | case EVLDD: |
| 544 | case EVLDW: | 544 | case EVLDW: |
| 545 | case EVLDH: | 545 | case EVLDH: |
| 546 | data.ll = temp.ll; | 546 | data.ll = temp.ll; |
| 547 | break; | 547 | break; |
| 548 | case EVLHHESPLAT: | 548 | case EVLHHESPLAT: |
| 549 | data.h[0] = temp.h[3]; | 549 | data.h[0] = temp.h[3]; |
| 550 | data.h[2] = temp.h[3]; | 550 | data.h[2] = temp.h[3]; |
| 551 | break; | 551 | break; |
| 552 | case EVLHHOUSPLAT: | 552 | case EVLHHOUSPLAT: |
| 553 | case EVLHHOSSPLAT: | 553 | case EVLHHOSSPLAT: |
| 554 | data.h[1] = temp.h[3]; | 554 | data.h[1] = temp.h[3]; |
| 555 | data.h[3] = temp.h[3]; | 555 | data.h[3] = temp.h[3]; |
| 556 | break; | 556 | break; |
| 557 | case EVLWHE: | 557 | case EVLWHE: |
| 558 | data.h[0] = temp.h[2]; | 558 | data.h[0] = temp.h[2]; |
| 559 | data.h[2] = temp.h[3]; | 559 | data.h[2] = temp.h[3]; |
| 560 | break; | 560 | break; |
| 561 | case EVLWHOU: | 561 | case EVLWHOU: |
| 562 | case EVLWHOS: | 562 | case EVLWHOS: |
| 563 | data.h[1] = temp.h[2]; | 563 | data.h[1] = temp.h[2]; |
| 564 | data.h[3] = temp.h[3]; | 564 | data.h[3] = temp.h[3]; |
| 565 | break; | 565 | break; |
| 566 | case EVLWWSPLAT: | 566 | case EVLWWSPLAT: |
| 567 | data.w[0] = temp.w[1]; | 567 | data.w[0] = temp.w[1]; |
| 568 | data.w[1] = temp.w[1]; | 568 | data.w[1] = temp.w[1]; |
| 569 | break; | 569 | break; |
| 570 | case EVLWHSPLAT: | 570 | case EVLWHSPLAT: |
| 571 | data.h[0] = temp.h[2]; | 571 | data.h[0] = temp.h[2]; |
| 572 | data.h[1] = temp.h[2]; | 572 | data.h[1] = temp.h[2]; |
| 573 | data.h[2] = temp.h[3]; | 573 | data.h[2] = temp.h[3]; |
| 574 | data.h[3] = temp.h[3]; | 574 | data.h[3] = temp.h[3]; |
| 575 | break; | 575 | break; |
| 576 | default: | 576 | default: |
| 577 | return -EINVAL; | 577 | return -EINVAL; |
| 578 | } | 578 | } |
| 579 | } | 579 | } |
| 580 | 580 | ||
| 581 | if (flags & SW) { | 581 | if (flags & SW) { |
| 582 | switch (flags & 0xf0) { | 582 | switch (flags & 0xf0) { |
| 583 | case E8: | 583 | case E8: |
| 584 | SWAP(data.v[0], data.v[7]); | 584 | SWAP(data.v[0], data.v[7]); |
| 585 | SWAP(data.v[1], data.v[6]); | 585 | SWAP(data.v[1], data.v[6]); |
| 586 | SWAP(data.v[2], data.v[5]); | 586 | SWAP(data.v[2], data.v[5]); |
| 587 | SWAP(data.v[3], data.v[4]); | 587 | SWAP(data.v[3], data.v[4]); |
| 588 | break; | 588 | break; |
| 589 | case E4: | 589 | case E4: |
| 590 | 590 | ||
| 591 | SWAP(data.v[0], data.v[3]); | 591 | SWAP(data.v[0], data.v[3]); |
| 592 | SWAP(data.v[1], data.v[2]); | 592 | SWAP(data.v[1], data.v[2]); |
| 593 | SWAP(data.v[4], data.v[7]); | 593 | SWAP(data.v[4], data.v[7]); |
| 594 | SWAP(data.v[5], data.v[6]); | 594 | SWAP(data.v[5], data.v[6]); |
| 595 | break; | 595 | break; |
| 596 | /* Its half word endian */ | 596 | /* Its half word endian */ |
| 597 | default: | 597 | default: |
| 598 | SWAP(data.v[0], data.v[1]); | 598 | SWAP(data.v[0], data.v[1]); |
| 599 | SWAP(data.v[2], data.v[3]); | 599 | SWAP(data.v[2], data.v[3]); |
| 600 | SWAP(data.v[4], data.v[5]); | 600 | SWAP(data.v[4], data.v[5]); |
| 601 | SWAP(data.v[6], data.v[7]); | 601 | SWAP(data.v[6], data.v[7]); |
| 602 | break; | 602 | break; |
| 603 | } | 603 | } |
| 604 | } | 604 | } |
| 605 | 605 | ||
| 606 | if (flags & SE) { | 606 | if (flags & SE) { |
| 607 | data.w[0] = (s16)data.h[1]; | 607 | data.w[0] = (s16)data.h[1]; |
| 608 | data.w[1] = (s16)data.h[3]; | 608 | data.w[1] = (s16)data.h[3]; |
| 609 | } | 609 | } |
| 610 | 610 | ||
| 611 | /* Store result to memory or update registers */ | 611 | /* Store result to memory or update registers */ |
| 612 | if (flags & ST) { | 612 | if (flags & ST) { |
| 613 | ret = 0; | 613 | ret = 0; |
| 614 | p = addr; | 614 | p = addr; |
| 615 | switch (nb) { | 615 | switch (nb) { |
| 616 | case 8: | 616 | case 8: |
| 617 | ret |= __put_user_inatomic(data.v[0], p++); | 617 | ret |= __put_user_inatomic(data.v[0], p++); |
| 618 | ret |= __put_user_inatomic(data.v[1], p++); | 618 | ret |= __put_user_inatomic(data.v[1], p++); |
| 619 | ret |= __put_user_inatomic(data.v[2], p++); | 619 | ret |= __put_user_inatomic(data.v[2], p++); |
| 620 | ret |= __put_user_inatomic(data.v[3], p++); | 620 | ret |= __put_user_inatomic(data.v[3], p++); |
| 621 | case 4: | 621 | case 4: |
| 622 | ret |= __put_user_inatomic(data.v[4], p++); | 622 | ret |= __put_user_inatomic(data.v[4], p++); |
| 623 | ret |= __put_user_inatomic(data.v[5], p++); | 623 | ret |= __put_user_inatomic(data.v[5], p++); |
| 624 | case 2: | 624 | case 2: |
| 625 | ret |= __put_user_inatomic(data.v[6], p++); | 625 | ret |= __put_user_inatomic(data.v[6], p++); |
| 626 | ret |= __put_user_inatomic(data.v[7], p++); | 626 | ret |= __put_user_inatomic(data.v[7], p++); |
| 627 | } | 627 | } |
| 628 | if (unlikely(ret)) | 628 | if (unlikely(ret)) |
| 629 | return -EFAULT; | 629 | return -EFAULT; |
| 630 | } else { | 630 | } else { |
| 631 | *evr = data.w[0]; | 631 | *evr = data.w[0]; |
| 632 | regs->gpr[reg] = data.w[1]; | 632 | regs->gpr[reg] = data.w[1]; |
| 633 | } | 633 | } |
| 634 | 634 | ||
| 635 | return 1; | 635 | return 1; |
| 636 | } | 636 | } |
| 637 | #endif /* CONFIG_SPE */ | 637 | #endif /* CONFIG_SPE */ |
| 638 | 638 | ||
| 639 | #ifdef CONFIG_VSX | 639 | #ifdef CONFIG_VSX |
| 640 | /* | 640 | /* |
| 641 | * Emulate VSX instructions... | 641 | * Emulate VSX instructions... |
| 642 | */ | 642 | */ |
| 643 | static int emulate_vsx(unsigned char __user *addr, unsigned int reg, | 643 | static int emulate_vsx(unsigned char __user *addr, unsigned int reg, |
| 644 | unsigned int areg, struct pt_regs *regs, | 644 | unsigned int areg, struct pt_regs *regs, |
| 645 | unsigned int flags, unsigned int length) | 645 | unsigned int flags, unsigned int length) |
| 646 | { | 646 | { |
| 647 | char *ptr; | 647 | char *ptr; |
| 648 | int ret = 0; | 648 | int ret = 0; |
| 649 | 649 | ||
| 650 | flush_vsx_to_thread(current); | 650 | flush_vsx_to_thread(current); |
| 651 | 651 | ||
| 652 | if (reg < 32) | 652 | if (reg < 32) |
| 653 | ptr = (char *) ¤t->thread.TS_FPR(reg); | 653 | ptr = (char *) ¤t->thread.TS_FPR(reg); |
| 654 | else | 654 | else |
| 655 | ptr = (char *) ¤t->thread.vr[reg - 32]; | 655 | ptr = (char *) ¤t->thread.vr[reg - 32]; |
| 656 | 656 | ||
| 657 | if (flags & ST) | 657 | if (flags & ST) |
| 658 | ret = __copy_to_user(addr, ptr, length); | 658 | ret = __copy_to_user(addr, ptr, length); |
| 659 | else { | 659 | else { |
| 660 | if (flags & SPLT){ | 660 | if (flags & SPLT){ |
| 661 | ret = __copy_from_user(ptr, addr, length); | 661 | ret = __copy_from_user(ptr, addr, length); |
| 662 | ptr += length; | 662 | ptr += length; |
| 663 | } | 663 | } |
| 664 | ret |= __copy_from_user(ptr, addr, length); | 664 | ret |= __copy_from_user(ptr, addr, length); |
| 665 | } | 665 | } |
| 666 | if (flags & U) | 666 | if (flags & U) |
| 667 | regs->gpr[areg] = regs->dar; | 667 | regs->gpr[areg] = regs->dar; |
| 668 | if (ret) | 668 | if (ret) |
| 669 | return -EFAULT; | 669 | return -EFAULT; |
| 670 | return 1; | 670 | return 1; |
| 671 | } | 671 | } |
| 672 | #endif | 672 | #endif |
| 673 | 673 | ||
| 674 | /* | 674 | /* |
| 675 | * Called on alignment exception. Attempts to fixup | 675 | * Called on alignment exception. Attempts to fixup |
| 676 | * | 676 | * |
| 677 | * Return 1 on success | 677 | * Return 1 on success |
| 678 | * Return 0 if unable to handle the interrupt | 678 | * Return 0 if unable to handle the interrupt |
| 679 | * Return -EFAULT if data address is bad | 679 | * Return -EFAULT if data address is bad |
| 680 | */ | 680 | */ |
| 681 | 681 | ||
| 682 | int fix_alignment(struct pt_regs *regs) | 682 | int fix_alignment(struct pt_regs *regs) |
| 683 | { | 683 | { |
| 684 | unsigned int instr, nb, flags, instruction = 0; | 684 | unsigned int instr, nb, flags, instruction = 0; |
| 685 | unsigned int reg, areg; | 685 | unsigned int reg, areg; |
| 686 | unsigned int dsisr; | 686 | unsigned int dsisr; |
| 687 | unsigned char __user *addr; | 687 | unsigned char __user *addr; |
| 688 | unsigned long p, swiz; | 688 | unsigned long p, swiz; |
| 689 | int ret, t; | 689 | int ret, t; |
| 690 | union { | 690 | union { |
| 691 | u64 ll; | 691 | u64 ll; |
| 692 | double dd; | 692 | double dd; |
| 693 | unsigned char v[8]; | 693 | unsigned char v[8]; |
| 694 | struct { | 694 | struct { |
| 695 | unsigned hi32; | 695 | unsigned hi32; |
| 696 | int low32; | 696 | int low32; |
| 697 | } x32; | 697 | } x32; |
| 698 | struct { | 698 | struct { |
| 699 | unsigned char hi48[6]; | 699 | unsigned char hi48[6]; |
| 700 | short low16; | 700 | short low16; |
| 701 | } x16; | 701 | } x16; |
| 702 | } data; | 702 | } data; |
| 703 | 703 | ||
| 704 | /* | 704 | /* |
| 705 | * We require a complete register set, if not, then our assembly | 705 | * We require a complete register set, if not, then our assembly |
| 706 | * is broken | 706 | * is broken |
| 707 | */ | 707 | */ |
| 708 | CHECK_FULL_REGS(regs); | 708 | CHECK_FULL_REGS(regs); |
| 709 | 709 | ||
| 710 | dsisr = regs->dsisr; | 710 | dsisr = regs->dsisr; |
| 711 | 711 | ||
| 712 | /* Some processors don't provide us with a DSISR we can use here, | 712 | /* Some processors don't provide us with a DSISR we can use here, |
| 713 | * let's make one up from the instruction | 713 | * let's make one up from the instruction |
| 714 | */ | 714 | */ |
| 715 | if (cpu_has_feature(CPU_FTR_NODSISRALIGN)) { | 715 | if (cpu_has_feature(CPU_FTR_NODSISRALIGN)) { |
| 716 | unsigned long pc = regs->nip; | 716 | unsigned long pc = regs->nip; |
| 717 | 717 | ||
| 718 | if (cpu_has_feature(CPU_FTR_PPC_LE) && (regs->msr & MSR_LE)) | 718 | if (cpu_has_feature(CPU_FTR_PPC_LE) && (regs->msr & MSR_LE)) |
| 719 | pc ^= 4; | 719 | pc ^= 4; |
| 720 | if (unlikely(__get_user_inatomic(instr, | 720 | if (unlikely(__get_user_inatomic(instr, |
| 721 | (unsigned int __user *)pc))) | 721 | (unsigned int __user *)pc))) |
| 722 | return -EFAULT; | 722 | return -EFAULT; |
| 723 | if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE)) | 723 | if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE)) |
| 724 | instr = cpu_to_le32(instr); | 724 | instr = cpu_to_le32(instr); |
| 725 | dsisr = make_dsisr(instr); | 725 | dsisr = make_dsisr(instr); |
| 726 | instruction = instr; | 726 | instruction = instr; |
| 727 | } | 727 | } |
| 728 | 728 | ||
| 729 | /* extract the operation and registers from the dsisr */ | 729 | /* extract the operation and registers from the dsisr */ |
| 730 | reg = (dsisr >> 5) & 0x1f; /* source/dest register */ | 730 | reg = (dsisr >> 5) & 0x1f; /* source/dest register */ |
| 731 | areg = dsisr & 0x1f; /* register to update */ | 731 | areg = dsisr & 0x1f; /* register to update */ |
| 732 | 732 | ||
| 733 | #ifdef CONFIG_SPE | 733 | #ifdef CONFIG_SPE |
| 734 | if ((instr >> 26) == 0x4) { | 734 | if ((instr >> 26) == 0x4) { |
| 735 | PPC_WARN_EMULATED(spe); | 735 | PPC_WARN_ALIGNMENT(spe, regs); |
| 736 | return emulate_spe(regs, reg, instr); | 736 | return emulate_spe(regs, reg, instr); |
| 737 | } | 737 | } |
| 738 | #endif | 738 | #endif |
| 739 | 739 | ||
| 740 | instr = (dsisr >> 10) & 0x7f; | 740 | instr = (dsisr >> 10) & 0x7f; |
| 741 | instr |= (dsisr >> 13) & 0x60; | 741 | instr |= (dsisr >> 13) & 0x60; |
| 742 | 742 | ||
| 743 | /* Lookup the operation in our table */ | 743 | /* Lookup the operation in our table */ |
| 744 | nb = aligninfo[instr].len; | 744 | nb = aligninfo[instr].len; |
| 745 | flags = aligninfo[instr].flags; | 745 | flags = aligninfo[instr].flags; |
| 746 | 746 | ||
| 747 | /* Byteswap little endian loads and stores */ | 747 | /* Byteswap little endian loads and stores */ |
| 748 | swiz = 0; | 748 | swiz = 0; |
| 749 | if (regs->msr & MSR_LE) { | 749 | if (regs->msr & MSR_LE) { |
| 750 | flags ^= SW; | 750 | flags ^= SW; |
| 751 | /* | 751 | /* |
| 752 | * So-called "PowerPC little endian" mode works by | 752 | * So-called "PowerPC little endian" mode works by |
| 753 | * swizzling addresses rather than by actually doing | 753 | * swizzling addresses rather than by actually doing |
| 754 | * any byte-swapping. To emulate this, we XOR each | 754 | * any byte-swapping. To emulate this, we XOR each |
| 755 | * byte address with 7. We also byte-swap, because | 755 | * byte address with 7. We also byte-swap, because |
| 756 | * the processor's address swizzling depends on the | 756 | * the processor's address swizzling depends on the |
| 757 | * operand size (it xors the address with 7 for bytes, | 757 | * operand size (it xors the address with 7 for bytes, |
| 758 | * 6 for halfwords, 4 for words, 0 for doublewords) but | 758 | * 6 for halfwords, 4 for words, 0 for doublewords) but |
| 759 | * we will xor with 7 and load/store each byte separately. | 759 | * we will xor with 7 and load/store each byte separately. |
| 760 | */ | 760 | */ |
| 761 | if (cpu_has_feature(CPU_FTR_PPC_LE)) | 761 | if (cpu_has_feature(CPU_FTR_PPC_LE)) |
| 762 | swiz = 7; | 762 | swiz = 7; |
| 763 | } | 763 | } |
| 764 | 764 | ||
| 765 | /* DAR has the operand effective address */ | 765 | /* DAR has the operand effective address */ |
| 766 | addr = (unsigned char __user *)regs->dar; | 766 | addr = (unsigned char __user *)regs->dar; |
| 767 | 767 | ||
| 768 | #ifdef CONFIG_VSX | 768 | #ifdef CONFIG_VSX |
| 769 | if ((instruction & 0xfc00003e) == 0x7c000018) { | 769 | if ((instruction & 0xfc00003e) == 0x7c000018) { |
| 770 | /* Additional register addressing bit (64 VSX vs 32 FPR/GPR */ | 770 | /* Additional register addressing bit (64 VSX vs 32 FPR/GPR */ |
| 771 | reg |= (instruction & 0x1) << 5; | 771 | reg |= (instruction & 0x1) << 5; |
| 772 | /* Simple inline decoder instead of a table */ | 772 | /* Simple inline decoder instead of a table */ |
| 773 | if (instruction & 0x200) | 773 | if (instruction & 0x200) |
| 774 | nb = 16; | 774 | nb = 16; |
| 775 | else if (instruction & 0x080) | 775 | else if (instruction & 0x080) |
| 776 | nb = 8; | 776 | nb = 8; |
| 777 | else | 777 | else |
| 778 | nb = 4; | 778 | nb = 4; |
| 779 | flags = 0; | 779 | flags = 0; |
| 780 | if (instruction & 0x100) | 780 | if (instruction & 0x100) |
| 781 | flags |= ST; | 781 | flags |= ST; |
| 782 | if (instruction & 0x040) | 782 | if (instruction & 0x040) |
| 783 | flags |= U; | 783 | flags |= U; |
| 784 | /* splat load needs a special decoder */ | 784 | /* splat load needs a special decoder */ |
| 785 | if ((instruction & 0x400) == 0){ | 785 | if ((instruction & 0x400) == 0){ |
| 786 | flags |= SPLT; | 786 | flags |= SPLT; |
| 787 | nb = 8; | 787 | nb = 8; |
| 788 | } | 788 | } |
| 789 | PPC_WARN_EMULATED(vsx); | 789 | PPC_WARN_ALIGNMENT(vsx, regs); |
| 790 | return emulate_vsx(addr, reg, areg, regs, flags, nb); | 790 | return emulate_vsx(addr, reg, areg, regs, flags, nb); |
| 791 | } | 791 | } |
| 792 | #endif | 792 | #endif |
| 793 | /* A size of 0 indicates an instruction we don't support, with | 793 | /* A size of 0 indicates an instruction we don't support, with |
| 794 | * the exception of DCBZ which is handled as a special case here | 794 | * the exception of DCBZ which is handled as a special case here |
| 795 | */ | 795 | */ |
| 796 | if (instr == DCBZ) { | 796 | if (instr == DCBZ) { |
| 797 | PPC_WARN_EMULATED(dcbz); | 797 | PPC_WARN_ALIGNMENT(dcbz, regs); |
| 798 | return emulate_dcbz(regs, addr); | 798 | return emulate_dcbz(regs, addr); |
| 799 | } | 799 | } |
| 800 | if (unlikely(nb == 0)) | 800 | if (unlikely(nb == 0)) |
| 801 | return 0; | 801 | return 0; |
| 802 | 802 | ||
| 803 | /* Load/Store Multiple instructions are handled in their own | 803 | /* Load/Store Multiple instructions are handled in their own |
| 804 | * function | 804 | * function |
| 805 | */ | 805 | */ |
| 806 | if (flags & M) { | 806 | if (flags & M) { |
| 807 | PPC_WARN_EMULATED(multiple); | 807 | PPC_WARN_ALIGNMENT(multiple, regs); |
| 808 | return emulate_multiple(regs, addr, reg, nb, | 808 | return emulate_multiple(regs, addr, reg, nb, |
| 809 | flags, instr, swiz); | 809 | flags, instr, swiz); |
| 810 | } | 810 | } |
| 811 | 811 | ||
| 812 | /* Verify the address of the operand */ | 812 | /* Verify the address of the operand */ |
| 813 | if (unlikely(user_mode(regs) && | 813 | if (unlikely(user_mode(regs) && |
| 814 | !access_ok((flags & ST ? VERIFY_WRITE : VERIFY_READ), | 814 | !access_ok((flags & ST ? VERIFY_WRITE : VERIFY_READ), |
| 815 | addr, nb))) | 815 | addr, nb))) |
| 816 | return -EFAULT; | 816 | return -EFAULT; |
| 817 | 817 | ||
| 818 | /* Force the fprs into the save area so we can reference them */ | 818 | /* Force the fprs into the save area so we can reference them */ |
| 819 | if (flags & F) { | 819 | if (flags & F) { |
| 820 | /* userland only */ | 820 | /* userland only */ |
| 821 | if (unlikely(!user_mode(regs))) | 821 | if (unlikely(!user_mode(regs))) |
| 822 | return 0; | 822 | return 0; |
| 823 | flush_fp_to_thread(current); | 823 | flush_fp_to_thread(current); |
| 824 | } | 824 | } |
| 825 | 825 | ||
| 826 | /* Special case for 16-byte FP loads and stores */ | 826 | /* Special case for 16-byte FP loads and stores */ |
| 827 | if (nb == 16) { | 827 | if (nb == 16) { |
| 828 | PPC_WARN_EMULATED(fp_pair); | 828 | PPC_WARN_ALIGNMENT(fp_pair, regs); |
| 829 | return emulate_fp_pair(addr, reg, flags); | 829 | return emulate_fp_pair(addr, reg, flags); |
| 830 | } | 830 | } |
| 831 | 831 | ||
| 832 | PPC_WARN_EMULATED(unaligned); | 832 | PPC_WARN_ALIGNMENT(unaligned, regs); |
| 833 | 833 | ||
| 834 | /* If we are loading, get the data from user space, else | 834 | /* If we are loading, get the data from user space, else |
| 835 | * get it from register values | 835 | * get it from register values |
| 836 | */ | 836 | */ |
| 837 | if (!(flags & ST)) { | 837 | if (!(flags & ST)) { |
| 838 | data.ll = 0; | 838 | data.ll = 0; |
| 839 | ret = 0; | 839 | ret = 0; |
| 840 | p = (unsigned long) addr; | 840 | p = (unsigned long) addr; |
| 841 | switch (nb) { | 841 | switch (nb) { |
| 842 | case 8: | 842 | case 8: |
| 843 | ret |= __get_user_inatomic(data.v[0], SWIZ_PTR(p++)); | 843 | ret |= __get_user_inatomic(data.v[0], SWIZ_PTR(p++)); |
| 844 | ret |= __get_user_inatomic(data.v[1], SWIZ_PTR(p++)); | 844 | ret |= __get_user_inatomic(data.v[1], SWIZ_PTR(p++)); |
| 845 | ret |= __get_user_inatomic(data.v[2], SWIZ_PTR(p++)); | 845 | ret |= __get_user_inatomic(data.v[2], SWIZ_PTR(p++)); |
| 846 | ret |= __get_user_inatomic(data.v[3], SWIZ_PTR(p++)); | 846 | ret |= __get_user_inatomic(data.v[3], SWIZ_PTR(p++)); |
| 847 | case 4: | 847 | case 4: |
| 848 | ret |= __get_user_inatomic(data.v[4], SWIZ_PTR(p++)); | 848 | ret |= __get_user_inatomic(data.v[4], SWIZ_PTR(p++)); |
| 849 | ret |= __get_user_inatomic(data.v[5], SWIZ_PTR(p++)); | 849 | ret |= __get_user_inatomic(data.v[5], SWIZ_PTR(p++)); |
| 850 | case 2: | 850 | case 2: |
| 851 | ret |= __get_user_inatomic(data.v[6], SWIZ_PTR(p++)); | 851 | ret |= __get_user_inatomic(data.v[6], SWIZ_PTR(p++)); |
| 852 | ret |= __get_user_inatomic(data.v[7], SWIZ_PTR(p++)); | 852 | ret |= __get_user_inatomic(data.v[7], SWIZ_PTR(p++)); |
| 853 | if (unlikely(ret)) | 853 | if (unlikely(ret)) |
| 854 | return -EFAULT; | 854 | return -EFAULT; |
| 855 | } | 855 | } |
| 856 | } else if (flags & F) { | 856 | } else if (flags & F) { |
| 857 | data.dd = current->thread.TS_FPR(reg); | 857 | data.dd = current->thread.TS_FPR(reg); |
| 858 | if (flags & S) { | 858 | if (flags & S) { |
| 859 | /* Single-precision FP store requires conversion... */ | 859 | /* Single-precision FP store requires conversion... */ |
| 860 | #ifdef CONFIG_PPC_FPU | 860 | #ifdef CONFIG_PPC_FPU |
| 861 | preempt_disable(); | 861 | preempt_disable(); |
| 862 | enable_kernel_fp(); | 862 | enable_kernel_fp(); |
| 863 | cvt_df(&data.dd, (float *)&data.v[4], ¤t->thread); | 863 | cvt_df(&data.dd, (float *)&data.v[4], ¤t->thread); |
| 864 | preempt_enable(); | 864 | preempt_enable(); |
| 865 | #else | 865 | #else |
| 866 | return 0; | 866 | return 0; |
| 867 | #endif | 867 | #endif |
| 868 | } | 868 | } |
| 869 | } else | 869 | } else |
| 870 | data.ll = regs->gpr[reg]; | 870 | data.ll = regs->gpr[reg]; |
| 871 | 871 | ||
| 872 | if (flags & SW) { | 872 | if (flags & SW) { |
| 873 | switch (nb) { | 873 | switch (nb) { |
| 874 | case 8: | 874 | case 8: |
| 875 | SWAP(data.v[0], data.v[7]); | 875 | SWAP(data.v[0], data.v[7]); |
| 876 | SWAP(data.v[1], data.v[6]); | 876 | SWAP(data.v[1], data.v[6]); |
| 877 | SWAP(data.v[2], data.v[5]); | 877 | SWAP(data.v[2], data.v[5]); |
| 878 | SWAP(data.v[3], data.v[4]); | 878 | SWAP(data.v[3], data.v[4]); |
| 879 | break; | 879 | break; |
| 880 | case 4: | 880 | case 4: |
| 881 | SWAP(data.v[4], data.v[7]); | 881 | SWAP(data.v[4], data.v[7]); |
| 882 | SWAP(data.v[5], data.v[6]); | 882 | SWAP(data.v[5], data.v[6]); |
| 883 | break; | 883 | break; |
| 884 | case 2: | 884 | case 2: |
| 885 | SWAP(data.v[6], data.v[7]); | 885 | SWAP(data.v[6], data.v[7]); |
| 886 | break; | 886 | break; |
| 887 | } | 887 | } |
| 888 | } | 888 | } |
| 889 | 889 | ||
| 890 | /* Perform other misc operations like sign extension | 890 | /* Perform other misc operations like sign extension |
| 891 | * or floating point single precision conversion | 891 | * or floating point single precision conversion |
| 892 | */ | 892 | */ |
| 893 | switch (flags & ~(U|SW)) { | 893 | switch (flags & ~(U|SW)) { |
| 894 | case LD+SE: /* sign extending integer loads */ | 894 | case LD+SE: /* sign extending integer loads */ |
| 895 | case LD+F+SE: /* sign extend for lfiwax */ | 895 | case LD+F+SE: /* sign extend for lfiwax */ |
| 896 | if ( nb == 2 ) | 896 | if ( nb == 2 ) |
| 897 | data.ll = data.x16.low16; | 897 | data.ll = data.x16.low16; |
| 898 | else /* nb must be 4 */ | 898 | else /* nb must be 4 */ |
| 899 | data.ll = data.x32.low32; | 899 | data.ll = data.x32.low32; |
| 900 | break; | 900 | break; |
| 901 | 901 | ||
| 902 | /* Single-precision FP load requires conversion... */ | 902 | /* Single-precision FP load requires conversion... */ |
| 903 | case LD+F+S: | 903 | case LD+F+S: |
| 904 | #ifdef CONFIG_PPC_FPU | 904 | #ifdef CONFIG_PPC_FPU |
| 905 | preempt_disable(); | 905 | preempt_disable(); |
| 906 | enable_kernel_fp(); | 906 | enable_kernel_fp(); |
| 907 | cvt_fd((float *)&data.v[4], &data.dd, ¤t->thread); | 907 | cvt_fd((float *)&data.v[4], &data.dd, ¤t->thread); |
| 908 | preempt_enable(); | 908 | preempt_enable(); |
| 909 | #else | 909 | #else |
| 910 | return 0; | 910 | return 0; |
| 911 | #endif | 911 | #endif |
| 912 | break; | 912 | break; |
| 913 | } | 913 | } |
| 914 | 914 | ||
| 915 | /* Store result to memory or update registers */ | 915 | /* Store result to memory or update registers */ |
| 916 | if (flags & ST) { | 916 | if (flags & ST) { |
| 917 | ret = 0; | 917 | ret = 0; |
| 918 | p = (unsigned long) addr; | 918 | p = (unsigned long) addr; |
| 919 | switch (nb) { | 919 | switch (nb) { |
| 920 | case 8: | 920 | case 8: |
| 921 | ret |= __put_user_inatomic(data.v[0], SWIZ_PTR(p++)); | 921 | ret |= __put_user_inatomic(data.v[0], SWIZ_PTR(p++)); |
| 922 | ret |= __put_user_inatomic(data.v[1], SWIZ_PTR(p++)); | 922 | ret |= __put_user_inatomic(data.v[1], SWIZ_PTR(p++)); |
| 923 | ret |= __put_user_inatomic(data.v[2], SWIZ_PTR(p++)); | 923 | ret |= __put_user_inatomic(data.v[2], SWIZ_PTR(p++)); |
| 924 | ret |= __put_user_inatomic(data.v[3], SWIZ_PTR(p++)); | 924 | ret |= __put_user_inatomic(data.v[3], SWIZ_PTR(p++)); |
| 925 | case 4: | 925 | case 4: |
| 926 | ret |= __put_user_inatomic(data.v[4], SWIZ_PTR(p++)); | 926 | ret |= __put_user_inatomic(data.v[4], SWIZ_PTR(p++)); |
| 927 | ret |= __put_user_inatomic(data.v[5], SWIZ_PTR(p++)); | 927 | ret |= __put_user_inatomic(data.v[5], SWIZ_PTR(p++)); |
| 928 | case 2: | 928 | case 2: |
| 929 | ret |= __put_user_inatomic(data.v[6], SWIZ_PTR(p++)); | 929 | ret |= __put_user_inatomic(data.v[6], SWIZ_PTR(p++)); |
| 930 | ret |= __put_user_inatomic(data.v[7], SWIZ_PTR(p++)); | 930 | ret |= __put_user_inatomic(data.v[7], SWIZ_PTR(p++)); |
| 931 | } | 931 | } |
| 932 | if (unlikely(ret)) | 932 | if (unlikely(ret)) |
| 933 | return -EFAULT; | 933 | return -EFAULT; |
| 934 | } else if (flags & F) | 934 | } else if (flags & F) |
| 935 | current->thread.TS_FPR(reg) = data.dd; | 935 | current->thread.TS_FPR(reg) = data.dd; |
| 936 | else | 936 | else |
| 937 | regs->gpr[reg] = data.ll; | 937 | regs->gpr[reg] = data.ll; |
| 938 | 938 | ||
| 939 | /* Update RA as needed */ | 939 | /* Update RA as needed */ |
| 940 | if (flags & U) | 940 | if (flags & U) |
| 941 | regs->gpr[areg] = regs->dar; | 941 | regs->gpr[areg] = regs->dar; |
| 942 | 942 | ||
| 943 | return 1; | 943 | return 1; |
| 944 | } | 944 | } |
| 945 | 945 |
arch/powerpc/kernel/entry_64.S
| 1 | /* | 1 | /* |
| 2 | * PowerPC version | 2 | * PowerPC version |
| 3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 4 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | 4 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP |
| 5 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | 5 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> |
| 6 | * Adapted for Power Macintosh by Paul Mackerras. | 6 | * Adapted for Power Macintosh by Paul Mackerras. |
| 7 | * Low-level exception handlers and MMU support | 7 | * Low-level exception handlers and MMU support |
| 8 | * rewritten by Paul Mackerras. | 8 | * rewritten by Paul Mackerras. |
| 9 | * Copyright (C) 1996 Paul Mackerras. | 9 | * Copyright (C) 1996 Paul Mackerras. |
| 10 | * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). | 10 | * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). |
| 11 | * | 11 | * |
| 12 | * This file contains the system call entry code, context switch | 12 | * This file contains the system call entry code, context switch |
| 13 | * code, and exception/interrupt return code for PowerPC. | 13 | * code, and exception/interrupt return code for PowerPC. |
| 14 | * | 14 | * |
| 15 | * This program is free software; you can redistribute it and/or | 15 | * This program is free software; you can redistribute it and/or |
| 16 | * modify it under the terms of the GNU General Public License | 16 | * modify it under the terms of the GNU General Public License |
| 17 | * as published by the Free Software Foundation; either version | 17 | * as published by the Free Software Foundation; either version |
| 18 | * 2 of the License, or (at your option) any later version. | 18 | * 2 of the License, or (at your option) any later version. |
| 19 | */ | 19 | */ |
| 20 | 20 | ||
| 21 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
| 22 | #include <asm/unistd.h> | 22 | #include <asm/unistd.h> |
| 23 | #include <asm/processor.h> | 23 | #include <asm/processor.h> |
| 24 | #include <asm/page.h> | 24 | #include <asm/page.h> |
| 25 | #include <asm/mmu.h> | 25 | #include <asm/mmu.h> |
| 26 | #include <asm/thread_info.h> | 26 | #include <asm/thread_info.h> |
| 27 | #include <asm/ppc_asm.h> | 27 | #include <asm/ppc_asm.h> |
| 28 | #include <asm/asm-offsets.h> | 28 | #include <asm/asm-offsets.h> |
| 29 | #include <asm/cputable.h> | 29 | #include <asm/cputable.h> |
| 30 | #include <asm/firmware.h> | 30 | #include <asm/firmware.h> |
| 31 | #include <asm/bug.h> | 31 | #include <asm/bug.h> |
| 32 | #include <asm/ptrace.h> | 32 | #include <asm/ptrace.h> |
| 33 | #include <asm/irqflags.h> | 33 | #include <asm/irqflags.h> |
| 34 | #include <asm/ftrace.h> | 34 | #include <asm/ftrace.h> |
| 35 | 35 | ||
| 36 | /* | 36 | /* |
| 37 | * System calls. | 37 | * System calls. |
| 38 | */ | 38 | */ |
| 39 | .section ".toc","aw" | 39 | .section ".toc","aw" |
| 40 | .SYS_CALL_TABLE: | 40 | .SYS_CALL_TABLE: |
| 41 | .tc .sys_call_table[TC],.sys_call_table | 41 | .tc .sys_call_table[TC],.sys_call_table |
| 42 | 42 | ||
| 43 | /* This value is used to mark exception frames on the stack. */ | 43 | /* This value is used to mark exception frames on the stack. */ |
| 44 | exception_marker: | 44 | exception_marker: |
| 45 | .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER | 45 | .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER |
| 46 | 46 | ||
| 47 | .section ".text" | 47 | .section ".text" |
| 48 | .align 7 | 48 | .align 7 |
| 49 | 49 | ||
| 50 | #undef SHOW_SYSCALLS | 50 | #undef SHOW_SYSCALLS |
| 51 | 51 | ||
| 52 | .globl system_call_common | 52 | .globl system_call_common |
| 53 | system_call_common: | 53 | system_call_common: |
| 54 | andi. r10,r12,MSR_PR | 54 | andi. r10,r12,MSR_PR |
| 55 | mr r10,r1 | 55 | mr r10,r1 |
| 56 | addi r1,r1,-INT_FRAME_SIZE | 56 | addi r1,r1,-INT_FRAME_SIZE |
| 57 | beq- 1f | 57 | beq- 1f |
| 58 | ld r1,PACAKSAVE(r13) | 58 | ld r1,PACAKSAVE(r13) |
| 59 | 1: std r10,0(r1) | 59 | 1: std r10,0(r1) |
| 60 | std r11,_NIP(r1) | 60 | std r11,_NIP(r1) |
| 61 | std r12,_MSR(r1) | 61 | std r12,_MSR(r1) |
| 62 | std r0,GPR0(r1) | 62 | std r0,GPR0(r1) |
| 63 | std r10,GPR1(r1) | 63 | std r10,GPR1(r1) |
| 64 | ACCOUNT_CPU_USER_ENTRY(r10, r11) | 64 | ACCOUNT_CPU_USER_ENTRY(r10, r11) |
| 65 | /* | 65 | /* |
| 66 | * This "crclr so" clears CR0.SO, which is the error indication on | 66 | * This "crclr so" clears CR0.SO, which is the error indication on |
| 67 | * return from this system call. There must be no cmp instruction | 67 | * return from this system call. There must be no cmp instruction |
| 68 | * between it and the "mfcr r9" below, otherwise if XER.SO is set, | 68 | * between it and the "mfcr r9" below, otherwise if XER.SO is set, |
| 69 | * CR0.SO will get set, causing all system calls to appear to fail. | 69 | * CR0.SO will get set, causing all system calls to appear to fail. |
| 70 | */ | 70 | */ |
| 71 | crclr so | 71 | crclr so |
| 72 | std r2,GPR2(r1) | 72 | std r2,GPR2(r1) |
| 73 | std r3,GPR3(r1) | 73 | std r3,GPR3(r1) |
| 74 | std r4,GPR4(r1) | 74 | std r4,GPR4(r1) |
| 75 | std r5,GPR5(r1) | 75 | std r5,GPR5(r1) |
| 76 | std r6,GPR6(r1) | 76 | std r6,GPR6(r1) |
| 77 | std r7,GPR7(r1) | 77 | std r7,GPR7(r1) |
| 78 | std r8,GPR8(r1) | 78 | std r8,GPR8(r1) |
| 79 | li r11,0 | 79 | li r11,0 |
| 80 | std r11,GPR9(r1) | 80 | std r11,GPR9(r1) |
| 81 | std r11,GPR10(r1) | 81 | std r11,GPR10(r1) |
| 82 | std r11,GPR11(r1) | 82 | std r11,GPR11(r1) |
| 83 | std r11,GPR12(r1) | 83 | std r11,GPR12(r1) |
| 84 | std r9,GPR13(r1) | 84 | std r9,GPR13(r1) |
| 85 | mfcr r9 | 85 | mfcr r9 |
| 86 | mflr r10 | 86 | mflr r10 |
| 87 | li r11,0xc01 | 87 | li r11,0xc01 |
| 88 | std r9,_CCR(r1) | 88 | std r9,_CCR(r1) |
| 89 | std r10,_LINK(r1) | 89 | std r10,_LINK(r1) |
| 90 | std r11,_TRAP(r1) | 90 | std r11,_TRAP(r1) |
| 91 | mfxer r9 | 91 | mfxer r9 |
| 92 | mfctr r10 | 92 | mfctr r10 |
| 93 | std r9,_XER(r1) | 93 | std r9,_XER(r1) |
| 94 | std r10,_CTR(r1) | 94 | std r10,_CTR(r1) |
| 95 | std r3,ORIG_GPR3(r1) | 95 | std r3,ORIG_GPR3(r1) |
| 96 | ld r2,PACATOC(r13) | 96 | ld r2,PACATOC(r13) |
| 97 | addi r9,r1,STACK_FRAME_OVERHEAD | 97 | addi r9,r1,STACK_FRAME_OVERHEAD |
| 98 | ld r11,exception_marker@toc(r2) | 98 | ld r11,exception_marker@toc(r2) |
| 99 | std r11,-16(r9) /* "regshere" marker */ | 99 | std r11,-16(r9) /* "regshere" marker */ |
| 100 | #ifdef CONFIG_TRACE_IRQFLAGS | 100 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 101 | bl .trace_hardirqs_on | 101 | bl .trace_hardirqs_on |
| 102 | REST_GPR(0,r1) | 102 | REST_GPR(0,r1) |
| 103 | REST_4GPRS(3,r1) | 103 | REST_4GPRS(3,r1) |
| 104 | REST_2GPRS(7,r1) | 104 | REST_2GPRS(7,r1) |
| 105 | addi r9,r1,STACK_FRAME_OVERHEAD | 105 | addi r9,r1,STACK_FRAME_OVERHEAD |
| 106 | ld r12,_MSR(r1) | 106 | ld r12,_MSR(r1) |
| 107 | #endif /* CONFIG_TRACE_IRQFLAGS */ | 107 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
| 108 | li r10,1 | 108 | li r10,1 |
| 109 | stb r10,PACASOFTIRQEN(r13) | 109 | stb r10,PACASOFTIRQEN(r13) |
| 110 | stb r10,PACAHARDIRQEN(r13) | 110 | stb r10,PACAHARDIRQEN(r13) |
| 111 | std r10,SOFTE(r1) | 111 | std r10,SOFTE(r1) |
| 112 | #ifdef CONFIG_PPC_ISERIES | 112 | #ifdef CONFIG_PPC_ISERIES |
| 113 | BEGIN_FW_FTR_SECTION | 113 | BEGIN_FW_FTR_SECTION |
| 114 | /* Hack for handling interrupts when soft-enabling on iSeries */ | 114 | /* Hack for handling interrupts when soft-enabling on iSeries */ |
| 115 | cmpdi cr1,r0,0x5555 /* syscall 0x5555 */ | 115 | cmpdi cr1,r0,0x5555 /* syscall 0x5555 */ |
| 116 | andi. r10,r12,MSR_PR /* from kernel */ | 116 | andi. r10,r12,MSR_PR /* from kernel */ |
| 117 | crand 4*cr0+eq,4*cr1+eq,4*cr0+eq | 117 | crand 4*cr0+eq,4*cr1+eq,4*cr0+eq |
| 118 | bne 2f | 118 | bne 2f |
| 119 | b hardware_interrupt_entry | 119 | b hardware_interrupt_entry |
| 120 | 2: | 120 | 2: |
| 121 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | 121 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
| 122 | #endif /* CONFIG_PPC_ISERIES */ | 122 | #endif /* CONFIG_PPC_ISERIES */ |
| 123 | 123 | ||
| 124 | /* Hard enable interrupts */ | 124 | /* Hard enable interrupts */ |
| 125 | #ifdef CONFIG_PPC_BOOK3E | 125 | #ifdef CONFIG_PPC_BOOK3E |
| 126 | wrteei 1 | 126 | wrteei 1 |
| 127 | #else | 127 | #else |
| 128 | mfmsr r11 | 128 | mfmsr r11 |
| 129 | ori r11,r11,MSR_EE | 129 | ori r11,r11,MSR_EE |
| 130 | mtmsrd r11,1 | 130 | mtmsrd r11,1 |
| 131 | #endif /* CONFIG_PPC_BOOK3E */ | 131 | #endif /* CONFIG_PPC_BOOK3E */ |
| 132 | 132 | ||
| 133 | #ifdef SHOW_SYSCALLS | 133 | #ifdef SHOW_SYSCALLS |
| 134 | bl .do_show_syscall | 134 | bl .do_show_syscall |
| 135 | REST_GPR(0,r1) | 135 | REST_GPR(0,r1) |
| 136 | REST_4GPRS(3,r1) | 136 | REST_4GPRS(3,r1) |
| 137 | REST_2GPRS(7,r1) | 137 | REST_2GPRS(7,r1) |
| 138 | addi r9,r1,STACK_FRAME_OVERHEAD | 138 | addi r9,r1,STACK_FRAME_OVERHEAD |
| 139 | #endif | 139 | #endif |
| 140 | clrrdi r11,r1,THREAD_SHIFT | 140 | clrrdi r11,r1,THREAD_SHIFT |
| 141 | ld r10,TI_FLAGS(r11) | 141 | ld r10,TI_FLAGS(r11) |
| 142 | andi. r11,r10,_TIF_SYSCALL_T_OR_A | 142 | andi. r11,r10,_TIF_SYSCALL_T_OR_A |
| 143 | bne- syscall_dotrace | 143 | bne- syscall_dotrace |
| 144 | syscall_dotrace_cont: | 144 | syscall_dotrace_cont: |
| 145 | cmpldi 0,r0,NR_syscalls | 145 | cmpldi 0,r0,NR_syscalls |
| 146 | bge- syscall_enosys | 146 | bge- syscall_enosys |
| 147 | 147 | ||
| 148 | system_call: /* label this so stack traces look sane */ | 148 | system_call: /* label this so stack traces look sane */ |
| 149 | /* | 149 | /* |
| 150 | * Need to vector to 32 Bit or default sys_call_table here, | 150 | * Need to vector to 32 Bit or default sys_call_table here, |
| 151 | * based on caller's run-mode / personality. | 151 | * based on caller's run-mode / personality. |
| 152 | */ | 152 | */ |
| 153 | ld r11,.SYS_CALL_TABLE@toc(2) | 153 | ld r11,.SYS_CALL_TABLE@toc(2) |
| 154 | andi. r10,r10,_TIF_32BIT | 154 | andi. r10,r10,_TIF_32BIT |
| 155 | beq 15f | 155 | beq 15f |
| 156 | addi r11,r11,8 /* use 32-bit syscall entries */ | 156 | addi r11,r11,8 /* use 32-bit syscall entries */ |
| 157 | clrldi r3,r3,32 | 157 | clrldi r3,r3,32 |
| 158 | clrldi r4,r4,32 | 158 | clrldi r4,r4,32 |
| 159 | clrldi r5,r5,32 | 159 | clrldi r5,r5,32 |
| 160 | clrldi r6,r6,32 | 160 | clrldi r6,r6,32 |
| 161 | clrldi r7,r7,32 | 161 | clrldi r7,r7,32 |
| 162 | clrldi r8,r8,32 | 162 | clrldi r8,r8,32 |
| 163 | 15: | 163 | 15: |
| 164 | slwi r0,r0,4 | 164 | slwi r0,r0,4 |
| 165 | ldx r10,r11,r0 /* Fetch system call handler [ptr] */ | 165 | ldx r10,r11,r0 /* Fetch system call handler [ptr] */ |
| 166 | mtctr r10 | 166 | mtctr r10 |
| 167 | bctrl /* Call handler */ | 167 | bctrl /* Call handler */ |
| 168 | 168 | ||
| 169 | syscall_exit: | 169 | syscall_exit: |
| 170 | std r3,RESULT(r1) | 170 | std r3,RESULT(r1) |
| 171 | #ifdef SHOW_SYSCALLS | 171 | #ifdef SHOW_SYSCALLS |
| 172 | bl .do_show_syscall_exit | 172 | bl .do_show_syscall_exit |
| 173 | ld r3,RESULT(r1) | 173 | ld r3,RESULT(r1) |
| 174 | #endif | 174 | #endif |
| 175 | clrrdi r12,r1,THREAD_SHIFT | 175 | clrrdi r12,r1,THREAD_SHIFT |
| 176 | 176 | ||
| 177 | ld r8,_MSR(r1) | 177 | ld r8,_MSR(r1) |
| 178 | #ifdef CONFIG_PPC_BOOK3S | 178 | #ifdef CONFIG_PPC_BOOK3S |
| 179 | /* No MSR:RI on BookE */ | 179 | /* No MSR:RI on BookE */ |
| 180 | andi. r10,r8,MSR_RI | 180 | andi. r10,r8,MSR_RI |
| 181 | beq- unrecov_restore | 181 | beq- unrecov_restore |
| 182 | #endif | 182 | #endif |
| 183 | 183 | ||
| 184 | /* Disable interrupts so current_thread_info()->flags can't change, | 184 | /* Disable interrupts so current_thread_info()->flags can't change, |
| 185 | * and so that we don't get interrupted after loading SRR0/1. | 185 | * and so that we don't get interrupted after loading SRR0/1. |
| 186 | */ | 186 | */ |
| 187 | #ifdef CONFIG_PPC_BOOK3E | 187 | #ifdef CONFIG_PPC_BOOK3E |
| 188 | wrteei 0 | 188 | wrteei 0 |
| 189 | #else | 189 | #else |
| 190 | mfmsr r10 | 190 | mfmsr r10 |
| 191 | rldicl r10,r10,48,1 | 191 | rldicl r10,r10,48,1 |
| 192 | rotldi r10,r10,16 | 192 | rotldi r10,r10,16 |
| 193 | mtmsrd r10,1 | 193 | mtmsrd r10,1 |
| 194 | #endif /* CONFIG_PPC_BOOK3E */ | 194 | #endif /* CONFIG_PPC_BOOK3E */ |
| 195 | 195 | ||
| 196 | ld r9,TI_FLAGS(r12) | 196 | ld r9,TI_FLAGS(r12) |
| 197 | li r11,-_LAST_ERRNO | 197 | li r11,-_LAST_ERRNO |
| 198 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) | 198 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) |
| 199 | bne- syscall_exit_work | 199 | bne- syscall_exit_work |
| 200 | cmpld r3,r11 | 200 | cmpld r3,r11 |
| 201 | ld r5,_CCR(r1) | 201 | ld r5,_CCR(r1) |
| 202 | bge- syscall_error | 202 | bge- syscall_error |
| 203 | syscall_error_cont: | 203 | syscall_error_cont: |
| 204 | ld r7,_NIP(r1) | 204 | ld r7,_NIP(r1) |
| 205 | stdcx. r0,0,r1 /* to clear the reservation */ | 205 | stdcx. r0,0,r1 /* to clear the reservation */ |
| 206 | andi. r6,r8,MSR_PR | 206 | andi. r6,r8,MSR_PR |
| 207 | ld r4,_LINK(r1) | 207 | ld r4,_LINK(r1) |
| 208 | /* | 208 | /* |
| 209 | * Clear RI before restoring r13. If we are returning to | 209 | * Clear RI before restoring r13. If we are returning to |
| 210 | * userspace and we take an exception after restoring r13, | 210 | * userspace and we take an exception after restoring r13, |
| 211 | * we end up corrupting the userspace r13 value. | 211 | * we end up corrupting the userspace r13 value. |
| 212 | */ | 212 | */ |
| 213 | #ifdef CONFIG_PPC_BOOK3S | 213 | #ifdef CONFIG_PPC_BOOK3S |
| 214 | /* No MSR:RI on BookE */ | 214 | /* No MSR:RI on BookE */ |
| 215 | li r12,MSR_RI | 215 | li r12,MSR_RI |
| 216 | andc r11,r10,r12 | 216 | andc r11,r10,r12 |
| 217 | mtmsrd r11,1 /* clear MSR.RI */ | 217 | mtmsrd r11,1 /* clear MSR.RI */ |
| 218 | #endif /* CONFIG_PPC_BOOK3S */ | 218 | #endif /* CONFIG_PPC_BOOK3S */ |
| 219 | 219 | ||
| 220 | beq- 1f | 220 | beq- 1f |
| 221 | ACCOUNT_CPU_USER_EXIT(r11, r12) | 221 | ACCOUNT_CPU_USER_EXIT(r11, r12) |
| 222 | ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ | 222 | ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ |
| 223 | 1: ld r2,GPR2(r1) | 223 | 1: ld r2,GPR2(r1) |
| 224 | ld r1,GPR1(r1) | 224 | ld r1,GPR1(r1) |
| 225 | mtlr r4 | 225 | mtlr r4 |
| 226 | mtcr r5 | 226 | mtcr r5 |
| 227 | mtspr SPRN_SRR0,r7 | 227 | mtspr SPRN_SRR0,r7 |
| 228 | mtspr SPRN_SRR1,r8 | 228 | mtspr SPRN_SRR1,r8 |
| 229 | RFI | 229 | RFI |
| 230 | b . /* prevent speculative execution */ | 230 | b . /* prevent speculative execution */ |
| 231 | 231 | ||
| 232 | syscall_error: | 232 | syscall_error: |
| 233 | oris r5,r5,0x1000 /* Set SO bit in CR */ | 233 | oris r5,r5,0x1000 /* Set SO bit in CR */ |
| 234 | neg r3,r3 | 234 | neg r3,r3 |
| 235 | std r5,_CCR(r1) | 235 | std r5,_CCR(r1) |
| 236 | b syscall_error_cont | 236 | b syscall_error_cont |
| 237 | 237 | ||
| 238 | /* Traced system call support */ | 238 | /* Traced system call support */ |
| 239 | syscall_dotrace: | 239 | syscall_dotrace: |
| 240 | bl .save_nvgprs | 240 | bl .save_nvgprs |
| 241 | addi r3,r1,STACK_FRAME_OVERHEAD | 241 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 242 | bl .do_syscall_trace_enter | 242 | bl .do_syscall_trace_enter |
| 243 | /* | 243 | /* |
| 244 | * Restore argument registers possibly just changed. | 244 | * Restore argument registers possibly just changed. |
| 245 | * We use the return value of do_syscall_trace_enter | 245 | * We use the return value of do_syscall_trace_enter |
| 246 | * for the call number to look up in the table (r0). | 246 | * for the call number to look up in the table (r0). |
| 247 | */ | 247 | */ |
| 248 | mr r0,r3 | 248 | mr r0,r3 |
| 249 | ld r3,GPR3(r1) | 249 | ld r3,GPR3(r1) |
| 250 | ld r4,GPR4(r1) | 250 | ld r4,GPR4(r1) |
| 251 | ld r5,GPR5(r1) | 251 | ld r5,GPR5(r1) |
| 252 | ld r6,GPR6(r1) | 252 | ld r6,GPR6(r1) |
| 253 | ld r7,GPR7(r1) | 253 | ld r7,GPR7(r1) |
| 254 | ld r8,GPR8(r1) | 254 | ld r8,GPR8(r1) |
| 255 | addi r9,r1,STACK_FRAME_OVERHEAD | 255 | addi r9,r1,STACK_FRAME_OVERHEAD |
| 256 | clrrdi r10,r1,THREAD_SHIFT | 256 | clrrdi r10,r1,THREAD_SHIFT |
| 257 | ld r10,TI_FLAGS(r10) | 257 | ld r10,TI_FLAGS(r10) |
| 258 | b syscall_dotrace_cont | 258 | b syscall_dotrace_cont |
| 259 | 259 | ||
| 260 | syscall_enosys: | 260 | syscall_enosys: |
| 261 | li r3,-ENOSYS | 261 | li r3,-ENOSYS |
| 262 | b syscall_exit | 262 | b syscall_exit |
| 263 | 263 | ||
| 264 | syscall_exit_work: | 264 | syscall_exit_work: |
| 265 | /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr. | 265 | /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr. |
| 266 | If TIF_NOERROR is set, just save r3 as it is. */ | 266 | If TIF_NOERROR is set, just save r3 as it is. */ |
| 267 | 267 | ||
| 268 | andi. r0,r9,_TIF_RESTOREALL | 268 | andi. r0,r9,_TIF_RESTOREALL |
| 269 | beq+ 0f | 269 | beq+ 0f |
| 270 | REST_NVGPRS(r1) | 270 | REST_NVGPRS(r1) |
| 271 | b 2f | 271 | b 2f |
| 272 | 0: cmpld r3,r11 /* r10 is -LAST_ERRNO */ | 272 | 0: cmpld r3,r11 /* r10 is -LAST_ERRNO */ |
| 273 | blt+ 1f | 273 | blt+ 1f |
| 274 | andi. r0,r9,_TIF_NOERROR | 274 | andi. r0,r9,_TIF_NOERROR |
| 275 | bne- 1f | 275 | bne- 1f |
| 276 | ld r5,_CCR(r1) | 276 | ld r5,_CCR(r1) |
| 277 | neg r3,r3 | 277 | neg r3,r3 |
| 278 | oris r5,r5,0x1000 /* Set SO bit in CR */ | 278 | oris r5,r5,0x1000 /* Set SO bit in CR */ |
| 279 | std r5,_CCR(r1) | 279 | std r5,_CCR(r1) |
| 280 | 1: std r3,GPR3(r1) | 280 | 1: std r3,GPR3(r1) |
| 281 | 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) | 281 | 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) |
| 282 | beq 4f | 282 | beq 4f |
| 283 | 283 | ||
| 284 | /* Clear per-syscall TIF flags if any are set. */ | 284 | /* Clear per-syscall TIF flags if any are set. */ |
| 285 | 285 | ||
| 286 | li r11,_TIF_PERSYSCALL_MASK | 286 | li r11,_TIF_PERSYSCALL_MASK |
| 287 | addi r12,r12,TI_FLAGS | 287 | addi r12,r12,TI_FLAGS |
| 288 | 3: ldarx r10,0,r12 | 288 | 3: ldarx r10,0,r12 |
| 289 | andc r10,r10,r11 | 289 | andc r10,r10,r11 |
| 290 | stdcx. r10,0,r12 | 290 | stdcx. r10,0,r12 |
| 291 | bne- 3b | 291 | bne- 3b |
| 292 | subi r12,r12,TI_FLAGS | 292 | subi r12,r12,TI_FLAGS |
| 293 | 293 | ||
| 294 | 4: /* Anything else left to do? */ | 294 | 4: /* Anything else left to do? */ |
| 295 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) | 295 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) |
| 296 | beq .ret_from_except_lite | 296 | beq .ret_from_except_lite |
| 297 | 297 | ||
| 298 | /* Re-enable interrupts */ | 298 | /* Re-enable interrupts */ |
| 299 | #ifdef CONFIG_PPC_BOOK3E | 299 | #ifdef CONFIG_PPC_BOOK3E |
| 300 | wrteei 1 | 300 | wrteei 1 |
| 301 | #else | 301 | #else |
| 302 | mfmsr r10 | 302 | mfmsr r10 |
| 303 | ori r10,r10,MSR_EE | 303 | ori r10,r10,MSR_EE |
| 304 | mtmsrd r10,1 | 304 | mtmsrd r10,1 |
| 305 | #endif /* CONFIG_PPC_BOOK3E */ | 305 | #endif /* CONFIG_PPC_BOOK3E */ |
| 306 | 306 | ||
| 307 | bl .save_nvgprs | 307 | bl .save_nvgprs |
| 308 | addi r3,r1,STACK_FRAME_OVERHEAD | 308 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 309 | bl .do_syscall_trace_leave | 309 | bl .do_syscall_trace_leave |
| 310 | b .ret_from_except | 310 | b .ret_from_except |
| 311 | 311 | ||
| 312 | /* Save non-volatile GPRs, if not already saved. */ | 312 | /* Save non-volatile GPRs, if not already saved. */ |
| 313 | _GLOBAL(save_nvgprs) | 313 | _GLOBAL(save_nvgprs) |
| 314 | ld r11,_TRAP(r1) | 314 | ld r11,_TRAP(r1) |
| 315 | andi. r0,r11,1 | 315 | andi. r0,r11,1 |
| 316 | beqlr- | 316 | beqlr- |
| 317 | SAVE_NVGPRS(r1) | 317 | SAVE_NVGPRS(r1) |
| 318 | clrrdi r0,r11,1 | 318 | clrrdi r0,r11,1 |
| 319 | std r0,_TRAP(r1) | 319 | std r0,_TRAP(r1) |
| 320 | blr | 320 | blr |
| 321 | 321 | ||
| 322 | 322 | ||
| 323 | /* | 323 | /* |
| 324 | * The sigsuspend and rt_sigsuspend system calls can call do_signal | 324 | * The sigsuspend and rt_sigsuspend system calls can call do_signal |
| 325 | * and thus put the process into the stopped state where we might | 325 | * and thus put the process into the stopped state where we might |
| 326 | * want to examine its user state with ptrace. Therefore we need | 326 | * want to examine its user state with ptrace. Therefore we need |
| 327 | * to save all the nonvolatile registers (r14 - r31) before calling | 327 | * to save all the nonvolatile registers (r14 - r31) before calling |
| 328 | * the C code. Similarly, fork, vfork and clone need the full | 328 | * the C code. Similarly, fork, vfork and clone need the full |
| 329 | * register state on the stack so that it can be copied to the child. | 329 | * register state on the stack so that it can be copied to the child. |
| 330 | */ | 330 | */ |
| 331 | 331 | ||
| 332 | _GLOBAL(ppc_fork) | 332 | _GLOBAL(ppc_fork) |
| 333 | bl .save_nvgprs | 333 | bl .save_nvgprs |
| 334 | bl .sys_fork | 334 | bl .sys_fork |
| 335 | b syscall_exit | 335 | b syscall_exit |
| 336 | 336 | ||
| 337 | _GLOBAL(ppc_vfork) | 337 | _GLOBAL(ppc_vfork) |
| 338 | bl .save_nvgprs | 338 | bl .save_nvgprs |
| 339 | bl .sys_vfork | 339 | bl .sys_vfork |
| 340 | b syscall_exit | 340 | b syscall_exit |
| 341 | 341 | ||
| 342 | _GLOBAL(ppc_clone) | 342 | _GLOBAL(ppc_clone) |
| 343 | bl .save_nvgprs | 343 | bl .save_nvgprs |
| 344 | bl .sys_clone | 344 | bl .sys_clone |
| 345 | b syscall_exit | 345 | b syscall_exit |
| 346 | 346 | ||
| 347 | _GLOBAL(ppc32_swapcontext) | 347 | _GLOBAL(ppc32_swapcontext) |
| 348 | bl .save_nvgprs | 348 | bl .save_nvgprs |
| 349 | bl .compat_sys_swapcontext | 349 | bl .compat_sys_swapcontext |
| 350 | b syscall_exit | 350 | b syscall_exit |
| 351 | 351 | ||
| 352 | _GLOBAL(ppc64_swapcontext) | 352 | _GLOBAL(ppc64_swapcontext) |
| 353 | bl .save_nvgprs | 353 | bl .save_nvgprs |
| 354 | bl .sys_swapcontext | 354 | bl .sys_swapcontext |
| 355 | b syscall_exit | 355 | b syscall_exit |
| 356 | 356 | ||
| 357 | _GLOBAL(ret_from_fork) | 357 | _GLOBAL(ret_from_fork) |
| 358 | bl .schedule_tail | 358 | bl .schedule_tail |
| 359 | REST_NVGPRS(r1) | 359 | REST_NVGPRS(r1) |
| 360 | li r3,0 | 360 | li r3,0 |
| 361 | b syscall_exit | 361 | b syscall_exit |
| 362 | 362 | ||
| 363 | /* | 363 | /* |
| 364 | * This routine switches between two different tasks. The process | 364 | * This routine switches between two different tasks. The process |
| 365 | * state of one is saved on its kernel stack. Then the state | 365 | * state of one is saved on its kernel stack. Then the state |
| 366 | * of the other is restored from its kernel stack. The memory | 366 | * of the other is restored from its kernel stack. The memory |
| 367 | * management hardware is updated to the second process's state. | 367 | * management hardware is updated to the second process's state. |
| 368 | * Finally, we can return to the second process, via ret_from_except. | 368 | * Finally, we can return to the second process, via ret_from_except. |
| 369 | * On entry, r3 points to the THREAD for the current task, r4 | 369 | * On entry, r3 points to the THREAD for the current task, r4 |
| 370 | * points to the THREAD for the new task. | 370 | * points to the THREAD for the new task. |
| 371 | * | 371 | * |
| 372 | * Note: there are two ways to get to the "going out" portion | 372 | * Note: there are two ways to get to the "going out" portion |
| 373 | * of this code; either by coming in via the entry (_switch) | 373 | * of this code; either by coming in via the entry (_switch) |
| 374 | * or via "fork" which must set up an environment equivalent | 374 | * or via "fork" which must set up an environment equivalent |
| 375 | * to the "_switch" path. If you change this you'll have to change | 375 | * to the "_switch" path. If you change this you'll have to change |
| 376 | * the fork code also. | 376 | * the fork code also. |
| 377 | * | 377 | * |
| 378 | * The code which creates the new task context is in 'copy_thread' | 378 | * The code which creates the new task context is in 'copy_thread' |
| 379 | * in arch/powerpc/kernel/process.c | 379 | * in arch/powerpc/kernel/process.c |
| 380 | */ | 380 | */ |
| 381 | .align 7 | 381 | .align 7 |
| 382 | _GLOBAL(_switch) | 382 | _GLOBAL(_switch) |
| 383 | mflr r0 | 383 | mflr r0 |
| 384 | std r0,16(r1) | 384 | std r0,16(r1) |
| 385 | stdu r1,-SWITCH_FRAME_SIZE(r1) | 385 | stdu r1,-SWITCH_FRAME_SIZE(r1) |
| 386 | /* r3-r13 are caller saved -- Cort */ | 386 | /* r3-r13 are caller saved -- Cort */ |
| 387 | SAVE_8GPRS(14, r1) | 387 | SAVE_8GPRS(14, r1) |
| 388 | SAVE_10GPRS(22, r1) | 388 | SAVE_10GPRS(22, r1) |
| 389 | mflr r20 /* Return to switch caller */ | 389 | mflr r20 /* Return to switch caller */ |
| 390 | mfmsr r22 | 390 | mfmsr r22 |
| 391 | li r0, MSR_FP | 391 | li r0, MSR_FP |
| 392 | #ifdef CONFIG_VSX | 392 | #ifdef CONFIG_VSX |
| 393 | BEGIN_FTR_SECTION | 393 | BEGIN_FTR_SECTION |
| 394 | oris r0,r0,MSR_VSX@h /* Disable VSX */ | 394 | oris r0,r0,MSR_VSX@h /* Disable VSX */ |
| 395 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | 395 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) |
| 396 | #endif /* CONFIG_VSX */ | 396 | #endif /* CONFIG_VSX */ |
| 397 | #ifdef CONFIG_ALTIVEC | 397 | #ifdef CONFIG_ALTIVEC |
| 398 | BEGIN_FTR_SECTION | 398 | BEGIN_FTR_SECTION |
| 399 | oris r0,r0,MSR_VEC@h /* Disable altivec */ | 399 | oris r0,r0,MSR_VEC@h /* Disable altivec */ |
| 400 | mfspr r24,SPRN_VRSAVE /* save vrsave register value */ | 400 | mfspr r24,SPRN_VRSAVE /* save vrsave register value */ |
| 401 | std r24,THREAD_VRSAVE(r3) | 401 | std r24,THREAD_VRSAVE(r3) |
| 402 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 402 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 403 | #endif /* CONFIG_ALTIVEC */ | 403 | #endif /* CONFIG_ALTIVEC */ |
| 404 | and. r0,r0,r22 | 404 | and. r0,r0,r22 |
| 405 | beq+ 1f | 405 | beq+ 1f |
| 406 | andc r22,r22,r0 | 406 | andc r22,r22,r0 |
| 407 | MTMSRD(r22) | 407 | MTMSRD(r22) |
| 408 | isync | 408 | isync |
| 409 | 1: std r20,_NIP(r1) | 409 | 1: std r20,_NIP(r1) |
| 410 | mfcr r23 | 410 | mfcr r23 |
| 411 | std r23,_CCR(r1) | 411 | std r23,_CCR(r1) |
| 412 | std r1,KSP(r3) /* Set old stack pointer */ | 412 | std r1,KSP(r3) /* Set old stack pointer */ |
| 413 | 413 | ||
| 414 | #ifdef CONFIG_SMP | 414 | #ifdef CONFIG_SMP |
| 415 | /* We need a sync somewhere here to make sure that if the | 415 | /* We need a sync somewhere here to make sure that if the |
| 416 | * previous task gets rescheduled on another CPU, it sees all | 416 | * previous task gets rescheduled on another CPU, it sees all |
| 417 | * stores it has performed on this one. | 417 | * stores it has performed on this one. |
| 418 | */ | 418 | */ |
| 419 | sync | 419 | sync |
| 420 | #endif /* CONFIG_SMP */ | 420 | #endif /* CONFIG_SMP */ |
| 421 | 421 | ||
| 422 | addi r6,r4,-THREAD /* Convert THREAD to 'current' */ | 422 | addi r6,r4,-THREAD /* Convert THREAD to 'current' */ |
| 423 | std r6,PACACURRENT(r13) /* Set new 'current' */ | 423 | std r6,PACACURRENT(r13) /* Set new 'current' */ |
| 424 | 424 | ||
| 425 | ld r8,KSP(r4) /* new stack pointer */ | 425 | ld r8,KSP(r4) /* new stack pointer */ |
| 426 | #ifdef CONFIG_PPC_BOOK3S | 426 | #ifdef CONFIG_PPC_BOOK3S |
| 427 | BEGIN_FTR_SECTION | 427 | BEGIN_FTR_SECTION |
| 428 | BEGIN_FTR_SECTION_NESTED(95) | 428 | BEGIN_FTR_SECTION_NESTED(95) |
| 429 | clrrdi r6,r8,28 /* get its ESID */ | 429 | clrrdi r6,r8,28 /* get its ESID */ |
| 430 | clrrdi r9,r1,28 /* get current sp ESID */ | 430 | clrrdi r9,r1,28 /* get current sp ESID */ |
| 431 | FTR_SECTION_ELSE_NESTED(95) | 431 | FTR_SECTION_ELSE_NESTED(95) |
| 432 | clrrdi r6,r8,40 /* get its 1T ESID */ | 432 | clrrdi r6,r8,40 /* get its 1T ESID */ |
| 433 | clrrdi r9,r1,40 /* get current sp 1T ESID */ | 433 | clrrdi r9,r1,40 /* get current sp 1T ESID */ |
| 434 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95) | 434 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95) |
| 435 | FTR_SECTION_ELSE | 435 | FTR_SECTION_ELSE |
| 436 | b 2f | 436 | b 2f |
| 437 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB) | 437 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB) |
| 438 | clrldi. r0,r6,2 /* is new ESID c00000000? */ | 438 | clrldi. r0,r6,2 /* is new ESID c00000000? */ |
| 439 | cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ | 439 | cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ |
| 440 | cror eq,4*cr1+eq,eq | 440 | cror eq,4*cr1+eq,eq |
| 441 | beq 2f /* if yes, don't slbie it */ | 441 | beq 2f /* if yes, don't slbie it */ |
| 442 | 442 | ||
| 443 | /* Bolt in the new stack SLB entry */ | 443 | /* Bolt in the new stack SLB entry */ |
| 444 | ld r7,KSP_VSID(r4) /* Get new stack's VSID */ | 444 | ld r7,KSP_VSID(r4) /* Get new stack's VSID */ |
| 445 | oris r0,r6,(SLB_ESID_V)@h | 445 | oris r0,r6,(SLB_ESID_V)@h |
| 446 | ori r0,r0,(SLB_NUM_BOLTED-1)@l | 446 | ori r0,r0,(SLB_NUM_BOLTED-1)@l |
| 447 | BEGIN_FTR_SECTION | 447 | BEGIN_FTR_SECTION |
| 448 | li r9,MMU_SEGSIZE_1T /* insert B field */ | 448 | li r9,MMU_SEGSIZE_1T /* insert B field */ |
| 449 | oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h | 449 | oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h |
| 450 | rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0 | 450 | rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0 |
| 451 | END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) | 451 | END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) |
| 452 | 452 | ||
| 453 | /* Update the last bolted SLB. No write barriers are needed | 453 | /* Update the last bolted SLB. No write barriers are needed |
| 454 | * here, provided we only update the current CPU's SLB shadow | 454 | * here, provided we only update the current CPU's SLB shadow |
| 455 | * buffer. | 455 | * buffer. |
| 456 | */ | 456 | */ |
| 457 | ld r9,PACA_SLBSHADOWPTR(r13) | 457 | ld r9,PACA_SLBSHADOWPTR(r13) |
| 458 | li r12,0 | 458 | li r12,0 |
| 459 | std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ | 459 | std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ |
| 460 | std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ | 460 | std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ |
| 461 | std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ | 461 | std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ |
| 462 | 462 | ||
| 463 | /* No need to check for CPU_FTR_NO_SLBIE_B here, since when | 463 | /* No need to check for CPU_FTR_NO_SLBIE_B here, since when |
| 464 | * we have 1TB segments, the only CPUs known to have the errata | 464 | * we have 1TB segments, the only CPUs known to have the errata |
| 465 | * only support less than 1TB of system memory and we'll never | 465 | * only support less than 1TB of system memory and we'll never |
| 466 | * actually hit this code path. | 466 | * actually hit this code path. |
| 467 | */ | 467 | */ |
| 468 | 468 | ||
| 469 | slbie r6 | 469 | slbie r6 |
| 470 | slbie r6 /* Workaround POWER5 < DD2.1 issue */ | 470 | slbie r6 /* Workaround POWER5 < DD2.1 issue */ |
| 471 | slbmte r7,r0 | 471 | slbmte r7,r0 |
| 472 | isync | 472 | isync |
| 473 | 2: | 473 | 2: |
| 474 | #endif /* !CONFIG_PPC_BOOK3S */ | 474 | #endif /* !CONFIG_PPC_BOOK3S */ |
| 475 | 475 | ||
| 476 | clrrdi r7,r8,THREAD_SHIFT /* base of new stack */ | 476 | clrrdi r7,r8,THREAD_SHIFT /* base of new stack */ |
| 477 | /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE | 477 | /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE |
| 478 | because we don't need to leave the 288-byte ABI gap at the | 478 | because we don't need to leave the 288-byte ABI gap at the |
| 479 | top of the kernel stack. */ | 479 | top of the kernel stack. */ |
| 480 | addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE | 480 | addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE |
| 481 | 481 | ||
| 482 | mr r1,r8 /* start using new stack pointer */ | 482 | mr r1,r8 /* start using new stack pointer */ |
| 483 | std r7,PACAKSAVE(r13) | 483 | std r7,PACAKSAVE(r13) |
| 484 | 484 | ||
| 485 | ld r6,_CCR(r1) | 485 | ld r6,_CCR(r1) |
| 486 | mtcrf 0xFF,r6 | 486 | mtcrf 0xFF,r6 |
| 487 | 487 | ||
| 488 | #ifdef CONFIG_ALTIVEC | 488 | #ifdef CONFIG_ALTIVEC |
| 489 | BEGIN_FTR_SECTION | 489 | BEGIN_FTR_SECTION |
| 490 | ld r0,THREAD_VRSAVE(r4) | 490 | ld r0,THREAD_VRSAVE(r4) |
| 491 | mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ | 491 | mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ |
| 492 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 492 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 493 | #endif /* CONFIG_ALTIVEC */ | 493 | #endif /* CONFIG_ALTIVEC */ |
| 494 | 494 | ||
| 495 | /* r3-r13 are destroyed -- Cort */ | 495 | /* r3-r13 are destroyed -- Cort */ |
| 496 | REST_8GPRS(14, r1) | 496 | REST_8GPRS(14, r1) |
| 497 | REST_10GPRS(22, r1) | 497 | REST_10GPRS(22, r1) |
| 498 | 498 | ||
| 499 | /* convert old thread to its task_struct for return value */ | 499 | /* convert old thread to its task_struct for return value */ |
| 500 | addi r3,r3,-THREAD | 500 | addi r3,r3,-THREAD |
| 501 | ld r7,_NIP(r1) /* Return to _switch caller in new task */ | 501 | ld r7,_NIP(r1) /* Return to _switch caller in new task */ |
| 502 | mtlr r7 | 502 | mtlr r7 |
| 503 | addi r1,r1,SWITCH_FRAME_SIZE | 503 | addi r1,r1,SWITCH_FRAME_SIZE |
| 504 | blr | 504 | blr |
| 505 | 505 | ||
| 506 | .align 7 | 506 | .align 7 |
| 507 | _GLOBAL(ret_from_except) | 507 | _GLOBAL(ret_from_except) |
| 508 | ld r11,_TRAP(r1) | 508 | ld r11,_TRAP(r1) |
| 509 | andi. r0,r11,1 | 509 | andi. r0,r11,1 |
| 510 | bne .ret_from_except_lite | 510 | bne .ret_from_except_lite |
| 511 | REST_NVGPRS(r1) | 511 | REST_NVGPRS(r1) |
| 512 | 512 | ||
| 513 | _GLOBAL(ret_from_except_lite) | 513 | _GLOBAL(ret_from_except_lite) |
| 514 | /* | 514 | /* |
| 515 | * Disable interrupts so that current_thread_info()->flags | 515 | * Disable interrupts so that current_thread_info()->flags |
| 516 | * can't change between when we test it and when we return | 516 | * can't change between when we test it and when we return |
| 517 | * from the interrupt. | 517 | * from the interrupt. |
| 518 | */ | 518 | */ |
| 519 | #ifdef CONFIG_PPC_BOOK3E | 519 | #ifdef CONFIG_PPC_BOOK3E |
| 520 | wrteei 0 | 520 | wrteei 0 |
| 521 | #else | 521 | #else |
| 522 | mfmsr r10 /* Get current interrupt state */ | 522 | mfmsr r10 /* Get current interrupt state */ |
| 523 | rldicl r9,r10,48,1 /* clear MSR_EE */ | 523 | rldicl r9,r10,48,1 /* clear MSR_EE */ |
| 524 | rotldi r9,r9,16 | 524 | rotldi r9,r9,16 |
| 525 | mtmsrd r9,1 /* Update machine state */ | 525 | mtmsrd r9,1 /* Update machine state */ |
| 526 | #endif /* CONFIG_PPC_BOOK3E */ | 526 | #endif /* CONFIG_PPC_BOOK3E */ |
| 527 | 527 | ||
| 528 | #ifdef CONFIG_PREEMPT | 528 | #ifdef CONFIG_PREEMPT |
| 529 | clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ | 529 | clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ |
| 530 | li r0,_TIF_NEED_RESCHED /* bits to check */ | 530 | li r0,_TIF_NEED_RESCHED /* bits to check */ |
| 531 | ld r3,_MSR(r1) | 531 | ld r3,_MSR(r1) |
| 532 | ld r4,TI_FLAGS(r9) | 532 | ld r4,TI_FLAGS(r9) |
| 533 | /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */ | 533 | /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */ |
| 534 | rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING | 534 | rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING |
| 535 | and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */ | 535 | and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */ |
| 536 | bne do_work | 536 | bne do_work |
| 537 | 537 | ||
| 538 | #else /* !CONFIG_PREEMPT */ | 538 | #else /* !CONFIG_PREEMPT */ |
| 539 | ld r3,_MSR(r1) /* Returning to user mode? */ | 539 | ld r3,_MSR(r1) /* Returning to user mode? */ |
| 540 | andi. r3,r3,MSR_PR | 540 | andi. r3,r3,MSR_PR |
| 541 | beq restore /* if not, just restore regs and return */ | 541 | beq restore /* if not, just restore regs and return */ |
| 542 | 542 | ||
| 543 | /* Check current_thread_info()->flags */ | 543 | /* Check current_thread_info()->flags */ |
| 544 | clrrdi r9,r1,THREAD_SHIFT | 544 | clrrdi r9,r1,THREAD_SHIFT |
| 545 | ld r4,TI_FLAGS(r9) | 545 | ld r4,TI_FLAGS(r9) |
| 546 | andi. r0,r4,_TIF_USER_WORK_MASK | 546 | andi. r0,r4,_TIF_USER_WORK_MASK |
| 547 | bne do_work | 547 | bne do_work |
| 548 | #endif | 548 | #endif |
| 549 | 549 | ||
| 550 | restore: | 550 | restore: |
| 551 | BEGIN_FW_FTR_SECTION | 551 | BEGIN_FW_FTR_SECTION |
| 552 | ld r5,SOFTE(r1) | 552 | ld r5,SOFTE(r1) |
| 553 | FW_FTR_SECTION_ELSE | 553 | FW_FTR_SECTION_ELSE |
| 554 | b iseries_check_pending_irqs | 554 | b .Liseries_check_pending_irqs |
| 555 | ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) | 555 | ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) |
| 556 | 2: | 556 | 2: |
| 557 | TRACE_AND_RESTORE_IRQ(r5); | 557 | TRACE_AND_RESTORE_IRQ(r5); |
| 558 | 558 | ||
| 559 | #ifdef CONFIG_PERF_EVENTS | 559 | #ifdef CONFIG_PERF_EVENTS |
| 560 | /* check paca->perf_event_pending if we're enabling ints */ | 560 | /* check paca->perf_event_pending if we're enabling ints */ |
| 561 | lbz r3,PACAPERFPEND(r13) | 561 | lbz r3,PACAPERFPEND(r13) |
| 562 | and. r3,r3,r5 | 562 | and. r3,r3,r5 |
| 563 | beq 27f | 563 | beq 27f |
| 564 | bl .perf_event_do_pending | 564 | bl .perf_event_do_pending |
| 565 | 27: | 565 | 27: |
| 566 | #endif /* CONFIG_PERF_EVENTS */ | 566 | #endif /* CONFIG_PERF_EVENTS */ |
| 567 | 567 | ||
| 568 | /* extract EE bit and use it to restore paca->hard_enabled */ | 568 | /* extract EE bit and use it to restore paca->hard_enabled */ |
| 569 | ld r3,_MSR(r1) | 569 | ld r3,_MSR(r1) |
| 570 | rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ | 570 | rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ |
| 571 | stb r4,PACAHARDIRQEN(r13) | 571 | stb r4,PACAHARDIRQEN(r13) |
| 572 | 572 | ||
| 573 | #ifdef CONFIG_PPC_BOOK3E | 573 | #ifdef CONFIG_PPC_BOOK3E |
| 574 | b .exception_return_book3e | 574 | b .exception_return_book3e |
| 575 | #else | 575 | #else |
| 576 | ld r4,_CTR(r1) | 576 | ld r4,_CTR(r1) |
| 577 | ld r0,_LINK(r1) | 577 | ld r0,_LINK(r1) |
| 578 | mtctr r4 | 578 | mtctr r4 |
| 579 | mtlr r0 | 579 | mtlr r0 |
| 580 | ld r4,_XER(r1) | 580 | ld r4,_XER(r1) |
| 581 | mtspr SPRN_XER,r4 | 581 | mtspr SPRN_XER,r4 |
| 582 | 582 | ||
| 583 | REST_8GPRS(5, r1) | 583 | REST_8GPRS(5, r1) |
| 584 | 584 | ||
| 585 | andi. r0,r3,MSR_RI | 585 | andi. r0,r3,MSR_RI |
| 586 | beq- unrecov_restore | 586 | beq- unrecov_restore |
| 587 | 587 | ||
| 588 | stdcx. r0,0,r1 /* to clear the reservation */ | 588 | stdcx. r0,0,r1 /* to clear the reservation */ |
| 589 | 589 | ||
| 590 | /* | 590 | /* |
| 591 | * Clear RI before restoring r13. If we are returning to | 591 | * Clear RI before restoring r13. If we are returning to |
| 592 | * userspace and we take an exception after restoring r13, | 592 | * userspace and we take an exception after restoring r13, |
| 593 | * we end up corrupting the userspace r13 value. | 593 | * we end up corrupting the userspace r13 value. |
| 594 | */ | 594 | */ |
| 595 | mfmsr r4 | 595 | mfmsr r4 |
| 596 | andc r4,r4,r0 /* r0 contains MSR_RI here */ | 596 | andc r4,r4,r0 /* r0 contains MSR_RI here */ |
| 597 | mtmsrd r4,1 | 597 | mtmsrd r4,1 |
| 598 | 598 | ||
| 599 | /* | 599 | /* |
| 600 | * r13 is our per cpu area, only restore it if we are returning to | 600 | * r13 is our per cpu area, only restore it if we are returning to |
| 601 | * userspace | 601 | * userspace |
| 602 | */ | 602 | */ |
| 603 | andi. r0,r3,MSR_PR | 603 | andi. r0,r3,MSR_PR |
| 604 | beq 1f | 604 | beq 1f |
| 605 | ACCOUNT_CPU_USER_EXIT(r2, r4) | 605 | ACCOUNT_CPU_USER_EXIT(r2, r4) |
| 606 | REST_GPR(13, r1) | 606 | REST_GPR(13, r1) |
| 607 | 1: | 607 | 1: |
| 608 | mtspr SPRN_SRR1,r3 | 608 | mtspr SPRN_SRR1,r3 |
| 609 | 609 | ||
| 610 | ld r2,_CCR(r1) | 610 | ld r2,_CCR(r1) |
| 611 | mtcrf 0xFF,r2 | 611 | mtcrf 0xFF,r2 |
| 612 | ld r2,_NIP(r1) | 612 | ld r2,_NIP(r1) |
| 613 | mtspr SPRN_SRR0,r2 | 613 | mtspr SPRN_SRR0,r2 |
| 614 | 614 | ||
| 615 | ld r0,GPR0(r1) | 615 | ld r0,GPR0(r1) |
| 616 | ld r2,GPR2(r1) | 616 | ld r2,GPR2(r1) |
| 617 | ld r3,GPR3(r1) | 617 | ld r3,GPR3(r1) |
| 618 | ld r4,GPR4(r1) | 618 | ld r4,GPR4(r1) |
| 619 | ld r1,GPR1(r1) | 619 | ld r1,GPR1(r1) |
| 620 | 620 | ||
| 621 | rfid | 621 | rfid |
| 622 | b . /* prevent speculative execution */ | 622 | b . /* prevent speculative execution */ |
| 623 | 623 | ||
| 624 | #endif /* CONFIG_PPC_BOOK3E */ | 624 | #endif /* CONFIG_PPC_BOOK3E */ |
| 625 | 625 | ||
| 626 | iseries_check_pending_irqs: | 626 | .Liseries_check_pending_irqs: |
| 627 | #ifdef CONFIG_PPC_ISERIES | 627 | #ifdef CONFIG_PPC_ISERIES |
| 628 | ld r5,SOFTE(r1) | 628 | ld r5,SOFTE(r1) |
| 629 | cmpdi 0,r5,0 | 629 | cmpdi 0,r5,0 |
| 630 | beq 2b | 630 | beq 2b |
| 631 | /* Check for pending interrupts (iSeries) */ | 631 | /* Check for pending interrupts (iSeries) */ |
| 632 | ld r3,PACALPPACAPTR(r13) | 632 | ld r3,PACALPPACAPTR(r13) |
| 633 | ld r3,LPPACAANYINT(r3) | 633 | ld r3,LPPACAANYINT(r3) |
| 634 | cmpdi r3,0 | 634 | cmpdi r3,0 |
| 635 | beq+ 2b /* skip do_IRQ if no interrupts */ | 635 | beq+ 2b /* skip do_IRQ if no interrupts */ |
| 636 | 636 | ||
| 637 | li r3,0 | 637 | li r3,0 |
| 638 | stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */ | 638 | stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */ |
| 639 | #ifdef CONFIG_TRACE_IRQFLAGS | 639 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 640 | bl .trace_hardirqs_off | 640 | bl .trace_hardirqs_off |
| 641 | mfmsr r10 | 641 | mfmsr r10 |
| 642 | #endif | 642 | #endif |
| 643 | ori r10,r10,MSR_EE | 643 | ori r10,r10,MSR_EE |
| 644 | mtmsrd r10 /* hard-enable again */ | 644 | mtmsrd r10 /* hard-enable again */ |
| 645 | addi r3,r1,STACK_FRAME_OVERHEAD | 645 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 646 | bl .do_IRQ | 646 | bl .do_IRQ |
| 647 | b .ret_from_except_lite /* loop back and handle more */ | 647 | b .ret_from_except_lite /* loop back and handle more */ |
| 648 | #endif | 648 | #endif |
| 649 | 649 | ||
| 650 | do_work: | 650 | do_work: |
| 651 | #ifdef CONFIG_PREEMPT | 651 | #ifdef CONFIG_PREEMPT |
| 652 | andi. r0,r3,MSR_PR /* Returning to user mode? */ | 652 | andi. r0,r3,MSR_PR /* Returning to user mode? */ |
| 653 | bne user_work | 653 | bne user_work |
| 654 | /* Check that preempt_count() == 0 and interrupts are enabled */ | 654 | /* Check that preempt_count() == 0 and interrupts are enabled */ |
| 655 | lwz r8,TI_PREEMPT(r9) | 655 | lwz r8,TI_PREEMPT(r9) |
| 656 | cmpwi cr1,r8,0 | 656 | cmpwi cr1,r8,0 |
| 657 | ld r0,SOFTE(r1) | 657 | ld r0,SOFTE(r1) |
| 658 | cmpdi r0,0 | 658 | cmpdi r0,0 |
| 659 | crandc eq,cr1*4+eq,eq | 659 | crandc eq,cr1*4+eq,eq |
| 660 | bne restore | 660 | bne restore |
| 661 | 661 | ||
| 662 | /* Here we are preempting the current task. | 662 | /* Here we are preempting the current task. |
| 663 | * | 663 | * |
| 664 | * Ensure interrupts are soft-disabled. We also properly mark | 664 | * Ensure interrupts are soft-disabled. We also properly mark |
| 665 | * the PACA to reflect the fact that they are hard-disabled | 665 | * the PACA to reflect the fact that they are hard-disabled |
| 666 | * and trace the change | 666 | * and trace the change |
| 667 | */ | 667 | */ |
| 668 | li r0,0 | 668 | li r0,0 |
| 669 | stb r0,PACASOFTIRQEN(r13) | 669 | stb r0,PACASOFTIRQEN(r13) |
| 670 | stb r0,PACAHARDIRQEN(r13) | 670 | stb r0,PACAHARDIRQEN(r13) |
| 671 | TRACE_DISABLE_INTS | 671 | TRACE_DISABLE_INTS |
| 672 | 672 | ||
| 673 | /* Call the scheduler with soft IRQs off */ | 673 | /* Call the scheduler with soft IRQs off */ |
| 674 | 1: bl .preempt_schedule_irq | 674 | 1: bl .preempt_schedule_irq |
| 675 | 675 | ||
| 676 | /* Hard-disable interrupts again (and update PACA) */ | 676 | /* Hard-disable interrupts again (and update PACA) */ |
| 677 | #ifdef CONFIG_PPC_BOOK3E | 677 | #ifdef CONFIG_PPC_BOOK3E |
| 678 | wrteei 0 | 678 | wrteei 0 |
| 679 | #else | 679 | #else |
| 680 | mfmsr r10 | 680 | mfmsr r10 |
| 681 | rldicl r10,r10,48,1 | 681 | rldicl r10,r10,48,1 |
| 682 | rotldi r10,r10,16 | 682 | rotldi r10,r10,16 |
| 683 | mtmsrd r10,1 | 683 | mtmsrd r10,1 |
| 684 | #endif /* CONFIG_PPC_BOOK3E */ | 684 | #endif /* CONFIG_PPC_BOOK3E */ |
| 685 | li r0,0 | 685 | li r0,0 |
| 686 | stb r0,PACAHARDIRQEN(r13) | 686 | stb r0,PACAHARDIRQEN(r13) |
| 687 | 687 | ||
| 688 | /* Re-test flags and eventually loop */ | 688 | /* Re-test flags and eventually loop */ |
| 689 | clrrdi r9,r1,THREAD_SHIFT | 689 | clrrdi r9,r1,THREAD_SHIFT |
| 690 | ld r4,TI_FLAGS(r9) | 690 | ld r4,TI_FLAGS(r9) |
| 691 | andi. r0,r4,_TIF_NEED_RESCHED | 691 | andi. r0,r4,_TIF_NEED_RESCHED |
| 692 | bne 1b | 692 | bne 1b |
| 693 | b restore | 693 | b restore |
| 694 | 694 | ||
| 695 | user_work: | 695 | user_work: |
| 696 | #endif /* CONFIG_PREEMPT */ | 696 | #endif /* CONFIG_PREEMPT */ |
| 697 | 697 | ||
| 698 | /* Enable interrupts */ | 698 | /* Enable interrupts */ |
| 699 | #ifdef CONFIG_PPC_BOOK3E | 699 | #ifdef CONFIG_PPC_BOOK3E |
| 700 | wrteei 1 | 700 | wrteei 1 |
| 701 | #else | 701 | #else |
| 702 | ori r10,r10,MSR_EE | 702 | ori r10,r10,MSR_EE |
| 703 | mtmsrd r10,1 | 703 | mtmsrd r10,1 |
| 704 | #endif /* CONFIG_PPC_BOOK3E */ | 704 | #endif /* CONFIG_PPC_BOOK3E */ |
| 705 | 705 | ||
| 706 | andi. r0,r4,_TIF_NEED_RESCHED | 706 | andi. r0,r4,_TIF_NEED_RESCHED |
| 707 | beq 1f | 707 | beq 1f |
| 708 | bl .schedule | 708 | bl .schedule |
| 709 | b .ret_from_except_lite | 709 | b .ret_from_except_lite |
| 710 | 710 | ||
| 711 | 1: bl .save_nvgprs | 711 | 1: bl .save_nvgprs |
| 712 | addi r3,r1,STACK_FRAME_OVERHEAD | 712 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 713 | bl .do_signal | 713 | bl .do_signal |
| 714 | b .ret_from_except | 714 | b .ret_from_except |
| 715 | 715 | ||
| 716 | unrecov_restore: | 716 | unrecov_restore: |
| 717 | addi r3,r1,STACK_FRAME_OVERHEAD | 717 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 718 | bl .unrecoverable_exception | 718 | bl .unrecoverable_exception |
| 719 | b unrecov_restore | 719 | b unrecov_restore |
| 720 | 720 | ||
| 721 | #ifdef CONFIG_PPC_RTAS | 721 | #ifdef CONFIG_PPC_RTAS |
| 722 | /* | 722 | /* |
| 723 | * On CHRP, the Run-Time Abstraction Services (RTAS) have to be | 723 | * On CHRP, the Run-Time Abstraction Services (RTAS) have to be |
| 724 | * called with the MMU off. | 724 | * called with the MMU off. |
| 725 | * | 725 | * |
| 726 | * In addition, we need to be in 32b mode, at least for now. | 726 | * In addition, we need to be in 32b mode, at least for now. |
| 727 | * | 727 | * |
| 728 | * Note: r3 is an input parameter to rtas, so don't trash it... | 728 | * Note: r3 is an input parameter to rtas, so don't trash it... |
| 729 | */ | 729 | */ |
| 730 | _GLOBAL(enter_rtas) | 730 | _GLOBAL(enter_rtas) |
| 731 | mflr r0 | 731 | mflr r0 |
| 732 | std r0,16(r1) | 732 | std r0,16(r1) |
| 733 | stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */ | 733 | stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */ |
| 734 | 734 | ||
| 735 | /* Because RTAS is running in 32b mode, it clobbers the high order half | 735 | /* Because RTAS is running in 32b mode, it clobbers the high order half |
| 736 | * of all registers that it saves. We therefore save those registers | 736 | * of all registers that it saves. We therefore save those registers |
| 737 | * RTAS might touch to the stack. (r0, r3-r13 are caller saved) | 737 | * RTAS might touch to the stack. (r0, r3-r13 are caller saved) |
| 738 | */ | 738 | */ |
| 739 | SAVE_GPR(2, r1) /* Save the TOC */ | 739 | SAVE_GPR(2, r1) /* Save the TOC */ |
| 740 | SAVE_GPR(13, r1) /* Save paca */ | 740 | SAVE_GPR(13, r1) /* Save paca */ |
| 741 | SAVE_8GPRS(14, r1) /* Save the non-volatiles */ | 741 | SAVE_8GPRS(14, r1) /* Save the non-volatiles */ |
| 742 | SAVE_10GPRS(22, r1) /* ditto */ | 742 | SAVE_10GPRS(22, r1) /* ditto */ |
| 743 | 743 | ||
| 744 | mfcr r4 | 744 | mfcr r4 |
| 745 | std r4,_CCR(r1) | 745 | std r4,_CCR(r1) |
| 746 | mfctr r5 | 746 | mfctr r5 |
| 747 | std r5,_CTR(r1) | 747 | std r5,_CTR(r1) |
| 748 | mfspr r6,SPRN_XER | 748 | mfspr r6,SPRN_XER |
| 749 | std r6,_XER(r1) | 749 | std r6,_XER(r1) |
| 750 | mfdar r7 | 750 | mfdar r7 |
| 751 | std r7,_DAR(r1) | 751 | std r7,_DAR(r1) |
| 752 | mfdsisr r8 | 752 | mfdsisr r8 |
| 753 | std r8,_DSISR(r1) | 753 | std r8,_DSISR(r1) |
| 754 | 754 | ||
| 755 | /* Temporary workaround to clear CR until RTAS can be modified to | 755 | /* Temporary workaround to clear CR until RTAS can be modified to |
| 756 | * ignore all bits. | 756 | * ignore all bits. |
| 757 | */ | 757 | */ |
| 758 | li r0,0 | 758 | li r0,0 |
| 759 | mtcr r0 | 759 | mtcr r0 |
| 760 | 760 | ||
| 761 | #ifdef CONFIG_BUG | 761 | #ifdef CONFIG_BUG |
| 762 | /* There is no way it is acceptable to get here with interrupts enabled, | 762 | /* There is no way it is acceptable to get here with interrupts enabled, |
| 763 | * check it with the asm equivalent of WARN_ON | 763 | * check it with the asm equivalent of WARN_ON |
| 764 | */ | 764 | */ |
| 765 | lbz r0,PACASOFTIRQEN(r13) | 765 | lbz r0,PACASOFTIRQEN(r13) |
| 766 | 1: tdnei r0,0 | 766 | 1: tdnei r0,0 |
| 767 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING | 767 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING |
| 768 | #endif | 768 | #endif |
| 769 | 769 | ||
| 770 | /* Hard-disable interrupts */ | 770 | /* Hard-disable interrupts */ |
| 771 | mfmsr r6 | 771 | mfmsr r6 |
| 772 | rldicl r7,r6,48,1 | 772 | rldicl r7,r6,48,1 |
| 773 | rotldi r7,r7,16 | 773 | rotldi r7,r7,16 |
| 774 | mtmsrd r7,1 | 774 | mtmsrd r7,1 |
| 775 | 775 | ||
| 776 | /* Unfortunately, the stack pointer and the MSR are also clobbered, | 776 | /* Unfortunately, the stack pointer and the MSR are also clobbered, |
| 777 | * so they are saved in the PACA which allows us to restore | 777 | * so they are saved in the PACA which allows us to restore |
| 778 | * our original state after RTAS returns. | 778 | * our original state after RTAS returns. |
| 779 | */ | 779 | */ |
| 780 | std r1,PACAR1(r13) | 780 | std r1,PACAR1(r13) |
| 781 | std r6,PACASAVEDMSR(r13) | 781 | std r6,PACASAVEDMSR(r13) |
| 782 | 782 | ||
| 783 | /* Setup our real return addr */ | 783 | /* Setup our real return addr */ |
| 784 | LOAD_REG_ADDR(r4,.rtas_return_loc) | 784 | LOAD_REG_ADDR(r4,.rtas_return_loc) |
| 785 | clrldi r4,r4,2 /* convert to realmode address */ | 785 | clrldi r4,r4,2 /* convert to realmode address */ |
| 786 | mtlr r4 | 786 | mtlr r4 |
| 787 | 787 | ||
| 788 | li r0,0 | 788 | li r0,0 |
| 789 | ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI | 789 | ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI |
| 790 | andc r0,r6,r0 | 790 | andc r0,r6,r0 |
| 791 | 791 | ||
| 792 | li r9,1 | 792 | li r9,1 |
| 793 | rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) | 793 | rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) |
| 794 | ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP | 794 | ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP |
| 795 | andc r6,r0,r9 | 795 | andc r6,r0,r9 |
| 796 | ori r6,r6,MSR_RI | 796 | ori r6,r6,MSR_RI |
| 797 | sync /* disable interrupts so SRR0/1 */ | 797 | sync /* disable interrupts so SRR0/1 */ |
| 798 | mtmsrd r0 /* don't get trashed */ | 798 | mtmsrd r0 /* don't get trashed */ |
| 799 | 799 | ||
| 800 | LOAD_REG_ADDR(r4, rtas) | 800 | LOAD_REG_ADDR(r4, rtas) |
| 801 | ld r5,RTASENTRY(r4) /* get the rtas->entry value */ | 801 | ld r5,RTASENTRY(r4) /* get the rtas->entry value */ |
| 802 | ld r4,RTASBASE(r4) /* get the rtas->base value */ | 802 | ld r4,RTASBASE(r4) /* get the rtas->base value */ |
| 803 | 803 | ||
| 804 | mtspr SPRN_SRR0,r5 | 804 | mtspr SPRN_SRR0,r5 |
| 805 | mtspr SPRN_SRR1,r6 | 805 | mtspr SPRN_SRR1,r6 |
| 806 | rfid | 806 | rfid |
| 807 | b . /* prevent speculative execution */ | 807 | b . /* prevent speculative execution */ |
| 808 | 808 | ||
| 809 | _STATIC(rtas_return_loc) | 809 | _STATIC(rtas_return_loc) |
| 810 | /* relocation is off at this point */ | 810 | /* relocation is off at this point */ |
| 811 | mfspr r4,SPRN_SPRG_PACA /* Get PACA */ | 811 | mfspr r4,SPRN_SPRG_PACA /* Get PACA */ |
| 812 | clrldi r4,r4,2 /* convert to realmode address */ | 812 | clrldi r4,r4,2 /* convert to realmode address */ |
| 813 | 813 | ||
| 814 | bcl 20,31,$+4 | 814 | bcl 20,31,$+4 |
| 815 | 0: mflr r3 | 815 | 0: mflr r3 |
| 816 | ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */ | 816 | ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */ |
| 817 | 817 | ||
| 818 | mfmsr r6 | 818 | mfmsr r6 |
| 819 | li r0,MSR_RI | 819 | li r0,MSR_RI |
| 820 | andc r6,r6,r0 | 820 | andc r6,r6,r0 |
| 821 | sync | 821 | sync |
| 822 | mtmsrd r6 | 822 | mtmsrd r6 |
| 823 | 823 | ||
| 824 | ld r1,PACAR1(r4) /* Restore our SP */ | 824 | ld r1,PACAR1(r4) /* Restore our SP */ |
| 825 | ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ | 825 | ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ |
| 826 | 826 | ||
| 827 | mtspr SPRN_SRR0,r3 | 827 | mtspr SPRN_SRR0,r3 |
| 828 | mtspr SPRN_SRR1,r4 | 828 | mtspr SPRN_SRR1,r4 |
| 829 | rfid | 829 | rfid |
| 830 | b . /* prevent speculative execution */ | 830 | b . /* prevent speculative execution */ |
| 831 | 831 | ||
| 832 | .align 3 | 832 | .align 3 |
| 833 | 1: .llong .rtas_restore_regs | 833 | 1: .llong .rtas_restore_regs |
| 834 | 834 | ||
| 835 | _STATIC(rtas_restore_regs) | 835 | _STATIC(rtas_restore_regs) |
| 836 | /* relocation is on at this point */ | 836 | /* relocation is on at this point */ |
| 837 | REST_GPR(2, r1) /* Restore the TOC */ | 837 | REST_GPR(2, r1) /* Restore the TOC */ |
| 838 | REST_GPR(13, r1) /* Restore paca */ | 838 | REST_GPR(13, r1) /* Restore paca */ |
| 839 | REST_8GPRS(14, r1) /* Restore the non-volatiles */ | 839 | REST_8GPRS(14, r1) /* Restore the non-volatiles */ |
| 840 | REST_10GPRS(22, r1) /* ditto */ | 840 | REST_10GPRS(22, r1) /* ditto */ |
| 841 | 841 | ||
| 842 | mfspr r13,SPRN_SPRG_PACA | 842 | mfspr r13,SPRN_SPRG_PACA |
| 843 | 843 | ||
| 844 | ld r4,_CCR(r1) | 844 | ld r4,_CCR(r1) |
| 845 | mtcr r4 | 845 | mtcr r4 |
| 846 | ld r5,_CTR(r1) | 846 | ld r5,_CTR(r1) |
| 847 | mtctr r5 | 847 | mtctr r5 |
| 848 | ld r6,_XER(r1) | 848 | ld r6,_XER(r1) |
| 849 | mtspr SPRN_XER,r6 | 849 | mtspr SPRN_XER,r6 |
| 850 | ld r7,_DAR(r1) | 850 | ld r7,_DAR(r1) |
| 851 | mtdar r7 | 851 | mtdar r7 |
| 852 | ld r8,_DSISR(r1) | 852 | ld r8,_DSISR(r1) |
| 853 | mtdsisr r8 | 853 | mtdsisr r8 |
| 854 | 854 | ||
| 855 | addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */ | 855 | addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */ |
| 856 | ld r0,16(r1) /* get return address */ | 856 | ld r0,16(r1) /* get return address */ |
| 857 | 857 | ||
| 858 | mtlr r0 | 858 | mtlr r0 |
| 859 | blr /* return to caller */ | 859 | blr /* return to caller */ |
| 860 | 860 | ||
| 861 | #endif /* CONFIG_PPC_RTAS */ | 861 | #endif /* CONFIG_PPC_RTAS */ |
| 862 | 862 | ||
| 863 | _GLOBAL(enter_prom) | 863 | _GLOBAL(enter_prom) |
| 864 | mflr r0 | 864 | mflr r0 |
| 865 | std r0,16(r1) | 865 | std r0,16(r1) |
| 866 | stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */ | 866 | stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */ |
| 867 | 867 | ||
| 868 | /* Because PROM is running in 32b mode, it clobbers the high order half | 868 | /* Because PROM is running in 32b mode, it clobbers the high order half |
| 869 | * of all registers that it saves. We therefore save those registers | 869 | * of all registers that it saves. We therefore save those registers |
| 870 | * PROM might touch to the stack. (r0, r3-r13 are caller saved) | 870 | * PROM might touch to the stack. (r0, r3-r13 are caller saved) |
| 871 | */ | 871 | */ |
| 872 | SAVE_GPR(2, r1) | 872 | SAVE_GPR(2, r1) |
| 873 | SAVE_GPR(13, r1) | 873 | SAVE_GPR(13, r1) |
| 874 | SAVE_8GPRS(14, r1) | 874 | SAVE_8GPRS(14, r1) |
| 875 | SAVE_10GPRS(22, r1) | 875 | SAVE_10GPRS(22, r1) |
| 876 | mfcr r10 | 876 | mfcr r10 |
| 877 | mfmsr r11 | 877 | mfmsr r11 |
| 878 | std r10,_CCR(r1) | 878 | std r10,_CCR(r1) |
| 879 | std r11,_MSR(r1) | 879 | std r11,_MSR(r1) |
| 880 | 880 | ||
| 881 | /* Get the PROM entrypoint */ | 881 | /* Get the PROM entrypoint */ |
| 882 | mtlr r4 | 882 | mtlr r4 |
| 883 | 883 | ||
| 884 | /* Switch MSR to 32 bits mode | 884 | /* Switch MSR to 32 bits mode |
| 885 | */ | 885 | */ |
| 886 | #ifdef CONFIG_PPC_BOOK3E | 886 | #ifdef CONFIG_PPC_BOOK3E |
| 887 | rlwinm r11,r11,0,1,31 | 887 | rlwinm r11,r11,0,1,31 |
| 888 | mtmsr r11 | 888 | mtmsr r11 |
| 889 | #else /* CONFIG_PPC_BOOK3E */ | 889 | #else /* CONFIG_PPC_BOOK3E */ |
| 890 | mfmsr r11 | 890 | mfmsr r11 |
| 891 | li r12,1 | 891 | li r12,1 |
| 892 | rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) | 892 | rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) |
| 893 | andc r11,r11,r12 | 893 | andc r11,r11,r12 |
| 894 | li r12,1 | 894 | li r12,1 |
| 895 | rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) | 895 | rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) |
| 896 | andc r11,r11,r12 | 896 | andc r11,r11,r12 |
| 897 | mtmsrd r11 | 897 | mtmsrd r11 |
| 898 | #endif /* CONFIG_PPC_BOOK3E */ | 898 | #endif /* CONFIG_PPC_BOOK3E */ |
| 899 | isync | 899 | isync |
| 900 | 900 | ||
| 901 | /* Enter PROM here... */ | 901 | /* Enter PROM here... */ |
| 902 | blrl | 902 | blrl |
| 903 | 903 | ||
| 904 | /* Just make sure that r1 top 32 bits didn't get | 904 | /* Just make sure that r1 top 32 bits didn't get |
| 905 | * corrupt by OF | 905 | * corrupt by OF |
| 906 | */ | 906 | */ |
| 907 | rldicl r1,r1,0,32 | 907 | rldicl r1,r1,0,32 |
| 908 | 908 | ||
| 909 | /* Restore the MSR (back to 64 bits) */ | 909 | /* Restore the MSR (back to 64 bits) */ |
| 910 | ld r0,_MSR(r1) | 910 | ld r0,_MSR(r1) |
| 911 | MTMSRD(r0) | 911 | MTMSRD(r0) |
| 912 | isync | 912 | isync |
| 913 | 913 | ||
| 914 | /* Restore other registers */ | 914 | /* Restore other registers */ |
| 915 | REST_GPR(2, r1) | 915 | REST_GPR(2, r1) |
| 916 | REST_GPR(13, r1) | 916 | REST_GPR(13, r1) |
| 917 | REST_8GPRS(14, r1) | 917 | REST_8GPRS(14, r1) |
| 918 | REST_10GPRS(22, r1) | 918 | REST_10GPRS(22, r1) |
| 919 | ld r4,_CCR(r1) | 919 | ld r4,_CCR(r1) |
| 920 | mtcr r4 | 920 | mtcr r4 |
| 921 | 921 | ||
| 922 | addi r1,r1,PROM_FRAME_SIZE | 922 | addi r1,r1,PROM_FRAME_SIZE |
| 923 | ld r0,16(r1) | 923 | ld r0,16(r1) |
| 924 | mtlr r0 | 924 | mtlr r0 |
| 925 | blr | 925 | blr |
| 926 | 926 | ||
| 927 | #ifdef CONFIG_FUNCTION_TRACER | 927 | #ifdef CONFIG_FUNCTION_TRACER |
| 928 | #ifdef CONFIG_DYNAMIC_FTRACE | 928 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 929 | _GLOBAL(mcount) | 929 | _GLOBAL(mcount) |
| 930 | _GLOBAL(_mcount) | 930 | _GLOBAL(_mcount) |
| 931 | blr | 931 | blr |
| 932 | 932 | ||
| 933 | _GLOBAL(ftrace_caller) | 933 | _GLOBAL(ftrace_caller) |
| 934 | /* Taken from output of objdump from lib64/glibc */ | 934 | /* Taken from output of objdump from lib64/glibc */ |
| 935 | mflr r3 | 935 | mflr r3 |
| 936 | ld r11, 0(r1) | 936 | ld r11, 0(r1) |
| 937 | stdu r1, -112(r1) | 937 | stdu r1, -112(r1) |
| 938 | std r3, 128(r1) | 938 | std r3, 128(r1) |
| 939 | ld r4, 16(r11) | 939 | ld r4, 16(r11) |
| 940 | subi r3, r3, MCOUNT_INSN_SIZE | 940 | subi r3, r3, MCOUNT_INSN_SIZE |
| 941 | .globl ftrace_call | 941 | .globl ftrace_call |
| 942 | ftrace_call: | 942 | ftrace_call: |
| 943 | bl ftrace_stub | 943 | bl ftrace_stub |
| 944 | nop | 944 | nop |
| 945 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 945 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 946 | .globl ftrace_graph_call | 946 | .globl ftrace_graph_call |
| 947 | ftrace_graph_call: | 947 | ftrace_graph_call: |
| 948 | b ftrace_graph_stub | 948 | b ftrace_graph_stub |
| 949 | _GLOBAL(ftrace_graph_stub) | 949 | _GLOBAL(ftrace_graph_stub) |
| 950 | #endif | 950 | #endif |
| 951 | ld r0, 128(r1) | 951 | ld r0, 128(r1) |
| 952 | mtlr r0 | 952 | mtlr r0 |
| 953 | addi r1, r1, 112 | 953 | addi r1, r1, 112 |
| 954 | _GLOBAL(ftrace_stub) | 954 | _GLOBAL(ftrace_stub) |
| 955 | blr | 955 | blr |
| 956 | #else | 956 | #else |
| 957 | _GLOBAL(mcount) | 957 | _GLOBAL(mcount) |
| 958 | blr | 958 | blr |
| 959 | 959 | ||
| 960 | _GLOBAL(_mcount) | 960 | _GLOBAL(_mcount) |
| 961 | /* Taken from output of objdump from lib64/glibc */ | 961 | /* Taken from output of objdump from lib64/glibc */ |
| 962 | mflr r3 | 962 | mflr r3 |
| 963 | ld r11, 0(r1) | 963 | ld r11, 0(r1) |
| 964 | stdu r1, -112(r1) | 964 | stdu r1, -112(r1) |
| 965 | std r3, 128(r1) | 965 | std r3, 128(r1) |
| 966 | ld r4, 16(r11) | 966 | ld r4, 16(r11) |
| 967 | 967 | ||
| 968 | subi r3, r3, MCOUNT_INSN_SIZE | 968 | subi r3, r3, MCOUNT_INSN_SIZE |
| 969 | LOAD_REG_ADDR(r5,ftrace_trace_function) | 969 | LOAD_REG_ADDR(r5,ftrace_trace_function) |
| 970 | ld r5,0(r5) | 970 | ld r5,0(r5) |
| 971 | ld r5,0(r5) | 971 | ld r5,0(r5) |
| 972 | mtctr r5 | 972 | mtctr r5 |
| 973 | bctrl | 973 | bctrl |
| 974 | nop | 974 | nop |
| 975 | 975 | ||
| 976 | 976 | ||
| 977 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 977 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 978 | b ftrace_graph_caller | 978 | b ftrace_graph_caller |
| 979 | #endif | 979 | #endif |
| 980 | ld r0, 128(r1) | 980 | ld r0, 128(r1) |
| 981 | mtlr r0 | 981 | mtlr r0 |
| 982 | addi r1, r1, 112 | 982 | addi r1, r1, 112 |
| 983 | _GLOBAL(ftrace_stub) | 983 | _GLOBAL(ftrace_stub) |
| 984 | blr | 984 | blr |
| 985 | 985 | ||
| 986 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 986 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 987 | 987 | ||
| 988 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 988 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 989 | _GLOBAL(ftrace_graph_caller) | 989 | _GLOBAL(ftrace_graph_caller) |
| 990 | /* load r4 with local address */ | 990 | /* load r4 with local address */ |
| 991 | ld r4, 128(r1) | 991 | ld r4, 128(r1) |
| 992 | subi r4, r4, MCOUNT_INSN_SIZE | 992 | subi r4, r4, MCOUNT_INSN_SIZE |
| 993 | 993 | ||
| 994 | /* get the parent address */ | 994 | /* get the parent address */ |
| 995 | ld r11, 112(r1) | 995 | ld r11, 112(r1) |
| 996 | addi r3, r11, 16 | 996 | addi r3, r11, 16 |
| 997 | 997 | ||
| 998 | bl .prepare_ftrace_return | 998 | bl .prepare_ftrace_return |
| 999 | nop | 999 | nop |
| 1000 | 1000 | ||
| 1001 | ld r0, 128(r1) | 1001 | ld r0, 128(r1) |
| 1002 | mtlr r0 | 1002 | mtlr r0 |
| 1003 | addi r1, r1, 112 | 1003 | addi r1, r1, 112 |
| 1004 | blr | 1004 | blr |
| 1005 | 1005 | ||
| 1006 | _GLOBAL(return_to_handler) | 1006 | _GLOBAL(return_to_handler) |
| 1007 | /* need to save return values */ | 1007 | /* need to save return values */ |
| 1008 | std r4, -24(r1) | 1008 | std r4, -24(r1) |
| 1009 | std r3, -16(r1) | 1009 | std r3, -16(r1) |
| 1010 | std r31, -8(r1) | 1010 | std r31, -8(r1) |
| 1011 | mr r31, r1 | 1011 | mr r31, r1 |
| 1012 | stdu r1, -112(r1) | 1012 | stdu r1, -112(r1) |
| 1013 | 1013 | ||
| 1014 | bl .ftrace_return_to_handler | 1014 | bl .ftrace_return_to_handler |
| 1015 | nop | 1015 | nop |
| 1016 | 1016 | ||
| 1017 | /* return value has real return address */ | 1017 | /* return value has real return address */ |
| 1018 | mtlr r3 | 1018 | mtlr r3 |
| 1019 | 1019 | ||
| 1020 | ld r1, 0(r1) | 1020 | ld r1, 0(r1) |
| 1021 | ld r4, -24(r1) | 1021 | ld r4, -24(r1) |
| 1022 | ld r3, -16(r1) | 1022 | ld r3, -16(r1) |
| 1023 | ld r31, -8(r1) | 1023 | ld r31, -8(r1) |
| 1024 | 1024 | ||
| 1025 | /* Jump back to real return address */ | 1025 | /* Jump back to real return address */ |
| 1026 | blr | 1026 | blr |
| 1027 | 1027 | ||
| 1028 | _GLOBAL(mod_return_to_handler) | 1028 | _GLOBAL(mod_return_to_handler) |
| 1029 | /* need to save return values */ | 1029 | /* need to save return values */ |
| 1030 | std r4, -32(r1) | 1030 | std r4, -32(r1) |
| 1031 | std r3, -24(r1) | 1031 | std r3, -24(r1) |
| 1032 | /* save TOC */ | 1032 | /* save TOC */ |
| 1033 | std r2, -16(r1) | 1033 | std r2, -16(r1) |
| 1034 | std r31, -8(r1) | 1034 | std r31, -8(r1) |
| 1035 | mr r31, r1 | 1035 | mr r31, r1 |
| 1036 | stdu r1, -112(r1) | 1036 | stdu r1, -112(r1) |
| 1037 | 1037 | ||
| 1038 | /* | 1038 | /* |
| 1039 | * We are in a module using the module's TOC. | 1039 | * We are in a module using the module's TOC. |
| 1040 | * Switch to our TOC to run inside the core kernel. | 1040 | * Switch to our TOC to run inside the core kernel. |
| 1041 | */ | 1041 | */ |
| 1042 | ld r2, PACATOC(r13) | 1042 | ld r2, PACATOC(r13) |
| 1043 | 1043 | ||
| 1044 | bl .ftrace_return_to_handler | 1044 | bl .ftrace_return_to_handler |
| 1045 | nop | 1045 | nop |
| 1046 | 1046 | ||
| 1047 | /* return value has real return address */ | 1047 | /* return value has real return address */ |
| 1048 | mtlr r3 | 1048 | mtlr r3 |
| 1049 | 1049 | ||
| 1050 | ld r1, 0(r1) | 1050 | ld r1, 0(r1) |
| 1051 | ld r4, -32(r1) | 1051 | ld r4, -32(r1) |
| 1052 | ld r3, -24(r1) | 1052 | ld r3, -24(r1) |
| 1053 | ld r2, -16(r1) | 1053 | ld r2, -16(r1) |
| 1054 | ld r31, -8(r1) | 1054 | ld r31, -8(r1) |
| 1055 | 1055 | ||
| 1056 | /* Jump back to real return address */ | 1056 | /* Jump back to real return address */ |
| 1057 | blr | 1057 | blr |
| 1058 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 1058 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 1059 | #endif /* CONFIG_FUNCTION_TRACER */ | 1059 | #endif /* CONFIG_FUNCTION_TRACER */ |
| 1060 | 1060 |
arch/powerpc/kernel/exceptions-64s.S
| 1 | /* | 1 | /* |
| 2 | * This file contains the 64-bit "server" PowerPC variant | 2 | * This file contains the 64-bit "server" PowerPC variant |
| 3 | * of the low level exception handling including exception | 3 | * of the low level exception handling including exception |
| 4 | * vectors, exception return, part of the slb and stab | 4 | * vectors, exception return, part of the slb and stab |
| 5 | * handling and other fixed offset specific things. | 5 | * handling and other fixed offset specific things. |
| 6 | * | 6 | * |
| 7 | * This file is meant to be #included from head_64.S due to | 7 | * This file is meant to be #included from head_64.S due to |
| 8 | * position dependant assembly. | 8 | * position dependant assembly. |
| 9 | * | 9 | * |
| 10 | * Most of this originates from head_64.S and thus has the same | 10 | * Most of this originates from head_64.S and thus has the same |
| 11 | * copyright history. | 11 | * copyright history. |
| 12 | * | 12 | * |
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | #include <asm/exception-64s.h> | 15 | #include <asm/exception-64s.h> |
| 16 | 16 | ||
| 17 | /* | 17 | /* |
| 18 | * We layout physical memory as follows: | 18 | * We layout physical memory as follows: |
| 19 | * 0x0000 - 0x00ff : Secondary processor spin code | 19 | * 0x0000 - 0x00ff : Secondary processor spin code |
| 20 | * 0x0100 - 0x2fff : pSeries Interrupt prologs | 20 | * 0x0100 - 0x2fff : pSeries Interrupt prologs |
| 21 | * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs | 21 | * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs |
| 22 | * 0x6000 - 0x6fff : Initial (CPU0) segment table | 22 | * 0x6000 - 0x6fff : Initial (CPU0) segment table |
| 23 | * 0x7000 - 0x7fff : FWNMI data area | 23 | * 0x7000 - 0x7fff : FWNMI data area |
| 24 | * 0x8000 - : Early init and support code | 24 | * 0x8000 - : Early init and support code |
| 25 | */ | 25 | */ |
| 26 | 26 | ||
| 27 | /* | 27 | /* |
| 28 | * This is the start of the interrupt handlers for pSeries | 28 | * This is the start of the interrupt handlers for pSeries |
| 29 | * This code runs with relocation off. | 29 | * This code runs with relocation off. |
| 30 | * Code from here to __end_interrupts gets copied down to real | 30 | * Code from here to __end_interrupts gets copied down to real |
| 31 | * address 0x100 when we are running a relocatable kernel. | 31 | * address 0x100 when we are running a relocatable kernel. |
| 32 | * Therefore any relative branches in this section must only | 32 | * Therefore any relative branches in this section must only |
| 33 | * branch to labels in this section. | 33 | * branch to labels in this section. |
| 34 | */ | 34 | */ |
| 35 | . = 0x100 | 35 | . = 0x100 |
| 36 | .globl __start_interrupts | 36 | .globl __start_interrupts |
| 37 | __start_interrupts: | 37 | __start_interrupts: |
| 38 | 38 | ||
| 39 | STD_EXCEPTION_PSERIES(0x100, system_reset) | 39 | STD_EXCEPTION_PSERIES(0x100, system_reset) |
| 40 | 40 | ||
| 41 | . = 0x200 | 41 | . = 0x200 |
| 42 | _machine_check_pSeries: | 42 | _machine_check_pSeries: |
| 43 | HMT_MEDIUM | 43 | HMT_MEDIUM |
| 44 | mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ | 44 | mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ |
| 45 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | 45 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) |
| 46 | 46 | ||
| 47 | . = 0x300 | 47 | . = 0x300 |
| 48 | .globl data_access_pSeries | 48 | .globl data_access_pSeries |
| 49 | data_access_pSeries: | 49 | data_access_pSeries: |
| 50 | HMT_MEDIUM | 50 | HMT_MEDIUM |
| 51 | mtspr SPRN_SPRG_SCRATCH0,r13 | 51 | mtspr SPRN_SPRG_SCRATCH0,r13 |
| 52 | BEGIN_FTR_SECTION | 52 | BEGIN_FTR_SECTION |
| 53 | mfspr r13,SPRN_SPRG_PACA | 53 | mfspr r13,SPRN_SPRG_PACA |
| 54 | std r9,PACA_EXSLB+EX_R9(r13) | 54 | std r9,PACA_EXSLB+EX_R9(r13) |
| 55 | std r10,PACA_EXSLB+EX_R10(r13) | 55 | std r10,PACA_EXSLB+EX_R10(r13) |
| 56 | mfspr r10,SPRN_DAR | 56 | mfspr r10,SPRN_DAR |
| 57 | mfspr r9,SPRN_DSISR | 57 | mfspr r9,SPRN_DSISR |
| 58 | srdi r10,r10,60 | 58 | srdi r10,r10,60 |
| 59 | rlwimi r10,r9,16,0x20 | 59 | rlwimi r10,r9,16,0x20 |
| 60 | mfcr r9 | 60 | mfcr r9 |
| 61 | cmpwi r10,0x2c | 61 | cmpwi r10,0x2c |
| 62 | beq do_stab_bolted_pSeries | 62 | beq do_stab_bolted_pSeries |
| 63 | ld r10,PACA_EXSLB+EX_R10(r13) | 63 | ld r10,PACA_EXSLB+EX_R10(r13) |
| 64 | std r11,PACA_EXGEN+EX_R11(r13) | 64 | std r11,PACA_EXGEN+EX_R11(r13) |
| 65 | ld r11,PACA_EXSLB+EX_R9(r13) | 65 | ld r11,PACA_EXSLB+EX_R9(r13) |
| 66 | std r12,PACA_EXGEN+EX_R12(r13) | 66 | std r12,PACA_EXGEN+EX_R12(r13) |
| 67 | mfspr r12,SPRN_SPRG_SCRATCH0 | 67 | mfspr r12,SPRN_SPRG_SCRATCH0 |
| 68 | std r10,PACA_EXGEN+EX_R10(r13) | 68 | std r10,PACA_EXGEN+EX_R10(r13) |
| 69 | std r11,PACA_EXGEN+EX_R9(r13) | 69 | std r11,PACA_EXGEN+EX_R9(r13) |
| 70 | std r12,PACA_EXGEN+EX_R13(r13) | 70 | std r12,PACA_EXGEN+EX_R13(r13) |
| 71 | EXCEPTION_PROLOG_PSERIES_1(data_access_common) | 71 | EXCEPTION_PROLOG_PSERIES_1(data_access_common) |
| 72 | FTR_SECTION_ELSE | 72 | FTR_SECTION_ELSE |
| 73 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) | 73 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) |
| 74 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB) | 74 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB) |
| 75 | 75 | ||
| 76 | . = 0x380 | 76 | . = 0x380 |
| 77 | .globl data_access_slb_pSeries | 77 | .globl data_access_slb_pSeries |
| 78 | data_access_slb_pSeries: | 78 | data_access_slb_pSeries: |
| 79 | HMT_MEDIUM | 79 | HMT_MEDIUM |
| 80 | mtspr SPRN_SPRG_SCRATCH0,r13 | 80 | mtspr SPRN_SPRG_SCRATCH0,r13 |
| 81 | mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ | 81 | mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ |
| 82 | std r3,PACA_EXSLB+EX_R3(r13) | 82 | std r3,PACA_EXSLB+EX_R3(r13) |
| 83 | mfspr r3,SPRN_DAR | 83 | mfspr r3,SPRN_DAR |
| 84 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | 84 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ |
| 85 | mfcr r9 | 85 | mfcr r9 |
| 86 | #ifdef __DISABLED__ | 86 | #ifdef __DISABLED__ |
| 87 | /* Keep that around for when we re-implement dynamic VSIDs */ | 87 | /* Keep that around for when we re-implement dynamic VSIDs */ |
| 88 | cmpdi r3,0 | 88 | cmpdi r3,0 |
| 89 | bge slb_miss_user_pseries | 89 | bge slb_miss_user_pseries |
| 90 | #endif /* __DISABLED__ */ | 90 | #endif /* __DISABLED__ */ |
| 91 | std r10,PACA_EXSLB+EX_R10(r13) | 91 | std r10,PACA_EXSLB+EX_R10(r13) |
| 92 | std r11,PACA_EXSLB+EX_R11(r13) | 92 | std r11,PACA_EXSLB+EX_R11(r13) |
| 93 | std r12,PACA_EXSLB+EX_R12(r13) | 93 | std r12,PACA_EXSLB+EX_R12(r13) |
| 94 | mfspr r10,SPRN_SPRG_SCRATCH0 | 94 | mfspr r10,SPRN_SPRG_SCRATCH0 |
| 95 | std r10,PACA_EXSLB+EX_R13(r13) | 95 | std r10,PACA_EXSLB+EX_R13(r13) |
| 96 | mfspr r12,SPRN_SRR1 /* and SRR1 */ | 96 | mfspr r12,SPRN_SRR1 /* and SRR1 */ |
| 97 | #ifndef CONFIG_RELOCATABLE | 97 | #ifndef CONFIG_RELOCATABLE |
| 98 | b .slb_miss_realmode | 98 | b .slb_miss_realmode |
| 99 | #else | 99 | #else |
| 100 | /* | 100 | /* |
| 101 | * We can't just use a direct branch to .slb_miss_realmode | 101 | * We can't just use a direct branch to .slb_miss_realmode |
| 102 | * because the distance from here to there depends on where | 102 | * because the distance from here to there depends on where |
| 103 | * the kernel ends up being put. | 103 | * the kernel ends up being put. |
| 104 | */ | 104 | */ |
| 105 | mfctr r11 | 105 | mfctr r11 |
| 106 | ld r10,PACAKBASE(r13) | 106 | ld r10,PACAKBASE(r13) |
| 107 | LOAD_HANDLER(r10, .slb_miss_realmode) | 107 | LOAD_HANDLER(r10, .slb_miss_realmode) |
| 108 | mtctr r10 | 108 | mtctr r10 |
| 109 | bctr | 109 | bctr |
| 110 | #endif | 110 | #endif |
| 111 | 111 | ||
| 112 | STD_EXCEPTION_PSERIES(0x400, instruction_access) | 112 | STD_EXCEPTION_PSERIES(0x400, instruction_access) |
| 113 | 113 | ||
| 114 | . = 0x480 | 114 | . = 0x480 |
| 115 | .globl instruction_access_slb_pSeries | 115 | .globl instruction_access_slb_pSeries |
| 116 | instruction_access_slb_pSeries: | 116 | instruction_access_slb_pSeries: |
| 117 | HMT_MEDIUM | 117 | HMT_MEDIUM |
| 118 | mtspr SPRN_SPRG_SCRATCH0,r13 | 118 | mtspr SPRN_SPRG_SCRATCH0,r13 |
| 119 | mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ | 119 | mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ |
| 120 | std r3,PACA_EXSLB+EX_R3(r13) | 120 | std r3,PACA_EXSLB+EX_R3(r13) |
| 121 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ | 121 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ |
| 122 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | 122 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ |
| 123 | mfcr r9 | 123 | mfcr r9 |
| 124 | #ifdef __DISABLED__ | 124 | #ifdef __DISABLED__ |
| 125 | /* Keep that around for when we re-implement dynamic VSIDs */ | 125 | /* Keep that around for when we re-implement dynamic VSIDs */ |
| 126 | cmpdi r3,0 | 126 | cmpdi r3,0 |
| 127 | bge slb_miss_user_pseries | 127 | bge slb_miss_user_pseries |
| 128 | #endif /* __DISABLED__ */ | 128 | #endif /* __DISABLED__ */ |
| 129 | std r10,PACA_EXSLB+EX_R10(r13) | 129 | std r10,PACA_EXSLB+EX_R10(r13) |
| 130 | std r11,PACA_EXSLB+EX_R11(r13) | 130 | std r11,PACA_EXSLB+EX_R11(r13) |
| 131 | std r12,PACA_EXSLB+EX_R12(r13) | 131 | std r12,PACA_EXSLB+EX_R12(r13) |
| 132 | mfspr r10,SPRN_SPRG_SCRATCH0 | 132 | mfspr r10,SPRN_SPRG_SCRATCH0 |
| 133 | std r10,PACA_EXSLB+EX_R13(r13) | 133 | std r10,PACA_EXSLB+EX_R13(r13) |
| 134 | mfspr r12,SPRN_SRR1 /* and SRR1 */ | 134 | mfspr r12,SPRN_SRR1 /* and SRR1 */ |
| 135 | #ifndef CONFIG_RELOCATABLE | 135 | #ifndef CONFIG_RELOCATABLE |
| 136 | b .slb_miss_realmode | 136 | b .slb_miss_realmode |
| 137 | #else | 137 | #else |
| 138 | mfctr r11 | 138 | mfctr r11 |
| 139 | ld r10,PACAKBASE(r13) | 139 | ld r10,PACAKBASE(r13) |
| 140 | LOAD_HANDLER(r10, .slb_miss_realmode) | 140 | LOAD_HANDLER(r10, .slb_miss_realmode) |
| 141 | mtctr r10 | 141 | mtctr r10 |
| 142 | bctr | 142 | bctr |
| 143 | #endif | 143 | #endif |
| 144 | 144 | ||
| 145 | MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) | 145 | MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) |
| 146 | STD_EXCEPTION_PSERIES(0x600, alignment) | 146 | STD_EXCEPTION_PSERIES(0x600, alignment) |
| 147 | STD_EXCEPTION_PSERIES(0x700, program_check) | 147 | STD_EXCEPTION_PSERIES(0x700, program_check) |
| 148 | STD_EXCEPTION_PSERIES(0x800, fp_unavailable) | 148 | STD_EXCEPTION_PSERIES(0x800, fp_unavailable) |
| 149 | MASKABLE_EXCEPTION_PSERIES(0x900, decrementer) | 149 | MASKABLE_EXCEPTION_PSERIES(0x900, decrementer) |
| 150 | STD_EXCEPTION_PSERIES(0xa00, trap_0a) | 150 | STD_EXCEPTION_PSERIES(0xa00, trap_0a) |
| 151 | STD_EXCEPTION_PSERIES(0xb00, trap_0b) | 151 | STD_EXCEPTION_PSERIES(0xb00, trap_0b) |
| 152 | 152 | ||
| 153 | . = 0xc00 | 153 | . = 0xc00 |
| 154 | .globl system_call_pSeries | 154 | .globl system_call_pSeries |
| 155 | system_call_pSeries: | 155 | system_call_pSeries: |
| 156 | HMT_MEDIUM | 156 | HMT_MEDIUM |
| 157 | BEGIN_FTR_SECTION | 157 | BEGIN_FTR_SECTION |
| 158 | cmpdi r0,0x1ebe | 158 | cmpdi r0,0x1ebe |
| 159 | beq- 1f | 159 | beq- 1f |
| 160 | END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | 160 | END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) |
| 161 | mr r9,r13 | 161 | mr r9,r13 |
| 162 | mfspr r13,SPRN_SPRG_PACA | 162 | mfspr r13,SPRN_SPRG_PACA |
| 163 | mfspr r11,SPRN_SRR0 | 163 | mfspr r11,SPRN_SRR0 |
| 164 | ld r12,PACAKBASE(r13) | 164 | ld r12,PACAKBASE(r13) |
| 165 | ld r10,PACAKMSR(r13) | 165 | ld r10,PACAKMSR(r13) |
| 166 | LOAD_HANDLER(r12, system_call_entry) | 166 | LOAD_HANDLER(r12, system_call_entry) |
| 167 | mtspr SPRN_SRR0,r12 | 167 | mtspr SPRN_SRR0,r12 |
| 168 | mfspr r12,SPRN_SRR1 | 168 | mfspr r12,SPRN_SRR1 |
| 169 | mtspr SPRN_SRR1,r10 | 169 | mtspr SPRN_SRR1,r10 |
| 170 | rfid | 170 | rfid |
| 171 | b . /* prevent speculative execution */ | 171 | b . /* prevent speculative execution */ |
| 172 | 172 | ||
| 173 | /* Fast LE/BE switch system call */ | 173 | /* Fast LE/BE switch system call */ |
| 174 | 1: mfspr r12,SPRN_SRR1 | 174 | 1: mfspr r12,SPRN_SRR1 |
| 175 | xori r12,r12,MSR_LE | 175 | xori r12,r12,MSR_LE |
| 176 | mtspr SPRN_SRR1,r12 | 176 | mtspr SPRN_SRR1,r12 |
| 177 | rfid /* return to userspace */ | 177 | rfid /* return to userspace */ |
| 178 | b . | 178 | b . |
| 179 | 179 | ||
| 180 | STD_EXCEPTION_PSERIES(0xd00, single_step) | 180 | STD_EXCEPTION_PSERIES(0xd00, single_step) |
| 181 | STD_EXCEPTION_PSERIES(0xe00, trap_0e) | 181 | STD_EXCEPTION_PSERIES(0xe00, trap_0e) |
| 182 | 182 | ||
| 183 | /* We need to deal with the Altivec unavailable exception | 183 | /* We need to deal with the Altivec unavailable exception |
| 184 | * here which is at 0xf20, thus in the middle of the | 184 | * here which is at 0xf20, thus in the middle of the |
| 185 | * prolog code of the PerformanceMonitor one. A little | 185 | * prolog code of the PerformanceMonitor one. A little |
| 186 | * trickery is thus necessary | 186 | * trickery is thus necessary |
| 187 | */ | 187 | */ |
| 188 | performance_monitor_pSeries_1: | ||
| 188 | . = 0xf00 | 189 | . = 0xf00 |
| 189 | b performance_monitor_pSeries | 190 | b performance_monitor_pSeries |
| 190 | 191 | ||
| 192 | altivec_unavailable_pSeries_1: | ||
| 191 | . = 0xf20 | 193 | . = 0xf20 |
| 192 | b altivec_unavailable_pSeries | 194 | b altivec_unavailable_pSeries |
| 193 | 195 | ||
| 196 | vsx_unavailable_pSeries_1: | ||
| 194 | . = 0xf40 | 197 | . = 0xf40 |
| 195 | b vsx_unavailable_pSeries | 198 | b vsx_unavailable_pSeries |
| 196 | 199 | ||
| 197 | #ifdef CONFIG_CBE_RAS | 200 | #ifdef CONFIG_CBE_RAS |
| 198 | HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) | 201 | HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) |
| 199 | #endif /* CONFIG_CBE_RAS */ | 202 | #endif /* CONFIG_CBE_RAS */ |
| 200 | STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) | 203 | STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) |
| 201 | #ifdef CONFIG_CBE_RAS | 204 | #ifdef CONFIG_CBE_RAS |
| 202 | HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance) | 205 | HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance) |
| 203 | #endif /* CONFIG_CBE_RAS */ | 206 | #endif /* CONFIG_CBE_RAS */ |
| 204 | STD_EXCEPTION_PSERIES(0x1700, altivec_assist) | 207 | STD_EXCEPTION_PSERIES(0x1700, altivec_assist) |
| 205 | #ifdef CONFIG_CBE_RAS | 208 | #ifdef CONFIG_CBE_RAS |
| 206 | HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal) | 209 | HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal) |
| 207 | #endif /* CONFIG_CBE_RAS */ | 210 | #endif /* CONFIG_CBE_RAS */ |
| 208 | 211 | ||
| 209 | . = 0x3000 | 212 | . = 0x3000 |
| 210 | 213 | ||
| 211 | /*** pSeries interrupt support ***/ | 214 | /*** pSeries interrupt support ***/ |
| 212 | 215 | ||
| 213 | /* moved from 0xf00 */ | 216 | /* moved from 0xf00 */ |
| 214 | STD_EXCEPTION_PSERIES(., performance_monitor) | 217 | STD_EXCEPTION_PSERIES(., performance_monitor) |
| 215 | STD_EXCEPTION_PSERIES(., altivec_unavailable) | 218 | STD_EXCEPTION_PSERIES(., altivec_unavailable) |
| 216 | STD_EXCEPTION_PSERIES(., vsx_unavailable) | 219 | STD_EXCEPTION_PSERIES(., vsx_unavailable) |
| 217 | 220 | ||
| 218 | /* | 221 | /* |
| 219 | * An interrupt came in while soft-disabled; clear EE in SRR1, | 222 | * An interrupt came in while soft-disabled; clear EE in SRR1, |
| 220 | * clear paca->hard_enabled and return. | 223 | * clear paca->hard_enabled and return. |
| 221 | */ | 224 | */ |
| 222 | masked_interrupt: | 225 | masked_interrupt: |
| 223 | stb r10,PACAHARDIRQEN(r13) | 226 | stb r10,PACAHARDIRQEN(r13) |
| 224 | mtcrf 0x80,r9 | 227 | mtcrf 0x80,r9 |
| 225 | ld r9,PACA_EXGEN+EX_R9(r13) | 228 | ld r9,PACA_EXGEN+EX_R9(r13) |
| 226 | mfspr r10,SPRN_SRR1 | 229 | mfspr r10,SPRN_SRR1 |
| 227 | rldicl r10,r10,48,1 /* clear MSR_EE */ | 230 | rldicl r10,r10,48,1 /* clear MSR_EE */ |
| 228 | rotldi r10,r10,16 | 231 | rotldi r10,r10,16 |
| 229 | mtspr SPRN_SRR1,r10 | 232 | mtspr SPRN_SRR1,r10 |
| 230 | ld r10,PACA_EXGEN+EX_R10(r13) | 233 | ld r10,PACA_EXGEN+EX_R10(r13) |
| 231 | mfspr r13,SPRN_SPRG_SCRATCH0 | 234 | mfspr r13,SPRN_SPRG_SCRATCH0 |
| 232 | rfid | 235 | rfid |
| 233 | b . | 236 | b . |
| 234 | 237 | ||
| 235 | .align 7 | 238 | .align 7 |
| 236 | do_stab_bolted_pSeries: | 239 | do_stab_bolted_pSeries: |
| 237 | std r11,PACA_EXSLB+EX_R11(r13) | 240 | std r11,PACA_EXSLB+EX_R11(r13) |
| 238 | std r12,PACA_EXSLB+EX_R12(r13) | 241 | std r12,PACA_EXSLB+EX_R12(r13) |
| 239 | mfspr r10,SPRN_SPRG_SCRATCH0 | 242 | mfspr r10,SPRN_SPRG_SCRATCH0 |
| 240 | std r10,PACA_EXSLB+EX_R13(r13) | 243 | std r10,PACA_EXSLB+EX_R13(r13) |
| 241 | EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted) | 244 | EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted) |
| 242 | 245 | ||
| 243 | #ifdef CONFIG_PPC_PSERIES | 246 | #ifdef CONFIG_PPC_PSERIES |
| 244 | /* | 247 | /* |
| 245 | * Vectors for the FWNMI option. Share common code. | 248 | * Vectors for the FWNMI option. Share common code. |
| 246 | */ | 249 | */ |
| 247 | .globl system_reset_fwnmi | 250 | .globl system_reset_fwnmi |
| 248 | .align 7 | 251 | .align 7 |
| 249 | system_reset_fwnmi: | 252 | system_reset_fwnmi: |
| 250 | HMT_MEDIUM | 253 | HMT_MEDIUM |
| 251 | mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ | 254 | mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ |
| 252 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) | 255 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) |
| 253 | 256 | ||
| 254 | .globl machine_check_fwnmi | 257 | .globl machine_check_fwnmi |
| 255 | .align 7 | 258 | .align 7 |
| 256 | machine_check_fwnmi: | 259 | machine_check_fwnmi: |
| 257 | HMT_MEDIUM | 260 | HMT_MEDIUM |
| 258 | mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ | 261 | mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ |
| 259 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | 262 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) |
| 260 | 263 | ||
| 261 | #endif /* CONFIG_PPC_PSERIES */ | 264 | #endif /* CONFIG_PPC_PSERIES */ |
| 262 | 265 | ||
| 263 | #ifdef __DISABLED__ | 266 | #ifdef __DISABLED__ |
| 264 | /* | 267 | /* |
| 265 | * This is used for when the SLB miss handler has to go virtual, | 268 | * This is used for when the SLB miss handler has to go virtual, |
| 266 | * which doesn't happen for now anymore but will once we re-implement | 269 | * which doesn't happen for now anymore but will once we re-implement |
| 267 | * dynamic VSIDs for shared page tables | 270 | * dynamic VSIDs for shared page tables |
| 268 | */ | 271 | */ |
| 269 | slb_miss_user_pseries: | 272 | slb_miss_user_pseries: |
| 270 | std r10,PACA_EXGEN+EX_R10(r13) | 273 | std r10,PACA_EXGEN+EX_R10(r13) |
| 271 | std r11,PACA_EXGEN+EX_R11(r13) | 274 | std r11,PACA_EXGEN+EX_R11(r13) |
| 272 | std r12,PACA_EXGEN+EX_R12(r13) | 275 | std r12,PACA_EXGEN+EX_R12(r13) |
| 273 | mfspr r10,SPRG_SCRATCH0 | 276 | mfspr r10,SPRG_SCRATCH0 |
| 274 | ld r11,PACA_EXSLB+EX_R9(r13) | 277 | ld r11,PACA_EXSLB+EX_R9(r13) |
| 275 | ld r12,PACA_EXSLB+EX_R3(r13) | 278 | ld r12,PACA_EXSLB+EX_R3(r13) |
| 276 | std r10,PACA_EXGEN+EX_R13(r13) | 279 | std r10,PACA_EXGEN+EX_R13(r13) |
| 277 | std r11,PACA_EXGEN+EX_R9(r13) | 280 | std r11,PACA_EXGEN+EX_R9(r13) |
| 278 | std r12,PACA_EXGEN+EX_R3(r13) | 281 | std r12,PACA_EXGEN+EX_R3(r13) |
| 279 | clrrdi r12,r13,32 | 282 | clrrdi r12,r13,32 |
| 280 | mfmsr r10 | 283 | mfmsr r10 |
| 281 | mfspr r11,SRR0 /* save SRR0 */ | 284 | mfspr r11,SRR0 /* save SRR0 */ |
| 282 | ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ | 285 | ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ |
| 283 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI | 286 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI |
| 284 | mtspr SRR0,r12 | 287 | mtspr SRR0,r12 |
| 285 | mfspr r12,SRR1 /* and SRR1 */ | 288 | mfspr r12,SRR1 /* and SRR1 */ |
| 286 | mtspr SRR1,r10 | 289 | mtspr SRR1,r10 |
| 287 | rfid | 290 | rfid |
| 288 | b . /* prevent spec. execution */ | 291 | b . /* prevent spec. execution */ |
| 289 | #endif /* __DISABLED__ */ | 292 | #endif /* __DISABLED__ */ |
| 290 | 293 | ||
| 291 | .align 7 | 294 | .align 7 |
| 292 | .globl __end_interrupts | 295 | .globl __end_interrupts |
| 293 | __end_interrupts: | 296 | __end_interrupts: |
| 294 | 297 | ||
| 295 | /* | 298 | /* |
| 296 | * Code from here down to __end_handlers is invoked from the | 299 | * Code from here down to __end_handlers is invoked from the |
| 297 | * exception prologs above. Because the prologs assemble the | 300 | * exception prologs above. Because the prologs assemble the |
| 298 | * addresses of these handlers using the LOAD_HANDLER macro, | 301 | * addresses of these handlers using the LOAD_HANDLER macro, |
| 299 | * which uses an addi instruction, these handlers must be in | 302 | * which uses an addi instruction, these handlers must be in |
| 300 | * the first 32k of the kernel image. | 303 | * the first 32k of the kernel image. |
| 301 | */ | 304 | */ |
| 302 | 305 | ||
| 303 | /*** Common interrupt handlers ***/ | 306 | /*** Common interrupt handlers ***/ |
| 304 | 307 | ||
| 305 | STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) | 308 | STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) |
| 306 | 309 | ||
| 307 | /* | 310 | /* |
| 308 | * Machine check is different because we use a different | 311 | * Machine check is different because we use a different |
| 309 | * save area: PACA_EXMC instead of PACA_EXGEN. | 312 | * save area: PACA_EXMC instead of PACA_EXGEN. |
| 310 | */ | 313 | */ |
| 311 | .align 7 | 314 | .align 7 |
| 312 | .globl machine_check_common | 315 | .globl machine_check_common |
| 313 | machine_check_common: | 316 | machine_check_common: |
| 314 | EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) | 317 | EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) |
| 315 | FINISH_NAP | 318 | FINISH_NAP |
| 316 | DISABLE_INTS | 319 | DISABLE_INTS |
| 317 | bl .save_nvgprs | 320 | bl .save_nvgprs |
| 318 | addi r3,r1,STACK_FRAME_OVERHEAD | 321 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 319 | bl .machine_check_exception | 322 | bl .machine_check_exception |
| 320 | b .ret_from_except | 323 | b .ret_from_except |
| 321 | 324 | ||
| 322 | STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) | 325 | STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) |
| 323 | STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) | 326 | STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) |
| 324 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) | 327 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) |
| 325 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) | 328 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) |
| 326 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) | 329 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) |
| 327 | STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) | 330 | STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) |
| 328 | STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) | 331 | STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) |
| 329 | #ifdef CONFIG_ALTIVEC | 332 | #ifdef CONFIG_ALTIVEC |
| 330 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) | 333 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) |
| 331 | #else | 334 | #else |
| 332 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) | 335 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) |
| 333 | #endif | 336 | #endif |
| 334 | #ifdef CONFIG_CBE_RAS | 337 | #ifdef CONFIG_CBE_RAS |
| 335 | STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) | 338 | STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) |
| 336 | STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) | 339 | STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) |
| 337 | STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) | 340 | STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) |
| 338 | #endif /* CONFIG_CBE_RAS */ | 341 | #endif /* CONFIG_CBE_RAS */ |
| 339 | 342 | ||
| 340 | .align 7 | 343 | .align 7 |
| 341 | system_call_entry: | 344 | system_call_entry: |
| 342 | b system_call_common | 345 | b system_call_common |
| 343 | 346 | ||
| 344 | /* | 347 | /* |
| 345 | * Here we have detected that the kernel stack pointer is bad. | 348 | * Here we have detected that the kernel stack pointer is bad. |
| 346 | * R9 contains the saved CR, r13 points to the paca, | 349 | * R9 contains the saved CR, r13 points to the paca, |
| 347 | * r10 contains the (bad) kernel stack pointer, | 350 | * r10 contains the (bad) kernel stack pointer, |
| 348 | * r11 and r12 contain the saved SRR0 and SRR1. | 351 | * r11 and r12 contain the saved SRR0 and SRR1. |
| 349 | * We switch to using an emergency stack, save the registers there, | 352 | * We switch to using an emergency stack, save the registers there, |
| 350 | * and call kernel_bad_stack(), which panics. | 353 | * and call kernel_bad_stack(), which panics. |
| 351 | */ | 354 | */ |
| 352 | bad_stack: | 355 | bad_stack: |
| 353 | ld r1,PACAEMERGSP(r13) | 356 | ld r1,PACAEMERGSP(r13) |
| 354 | subi r1,r1,64+INT_FRAME_SIZE | 357 | subi r1,r1,64+INT_FRAME_SIZE |
| 355 | std r9,_CCR(r1) | 358 | std r9,_CCR(r1) |
| 356 | std r10,GPR1(r1) | 359 | std r10,GPR1(r1) |
| 357 | std r11,_NIP(r1) | 360 | std r11,_NIP(r1) |
| 358 | std r12,_MSR(r1) | 361 | std r12,_MSR(r1) |
| 359 | mfspr r11,SPRN_DAR | 362 | mfspr r11,SPRN_DAR |
| 360 | mfspr r12,SPRN_DSISR | 363 | mfspr r12,SPRN_DSISR |
| 361 | std r11,_DAR(r1) | 364 | std r11,_DAR(r1) |
| 362 | std r12,_DSISR(r1) | 365 | std r12,_DSISR(r1) |
| 363 | mflr r10 | 366 | mflr r10 |
| 364 | mfctr r11 | 367 | mfctr r11 |
| 365 | mfxer r12 | 368 | mfxer r12 |
| 366 | std r10,_LINK(r1) | 369 | std r10,_LINK(r1) |
| 367 | std r11,_CTR(r1) | 370 | std r11,_CTR(r1) |
| 368 | std r12,_XER(r1) | 371 | std r12,_XER(r1) |
| 369 | SAVE_GPR(0,r1) | 372 | SAVE_GPR(0,r1) |
| 370 | SAVE_GPR(2,r1) | 373 | SAVE_GPR(2,r1) |
| 371 | SAVE_4GPRS(3,r1) | 374 | SAVE_4GPRS(3,r1) |
| 372 | SAVE_2GPRS(7,r1) | 375 | SAVE_2GPRS(7,r1) |
| 373 | SAVE_10GPRS(12,r1) | 376 | SAVE_10GPRS(12,r1) |
| 374 | SAVE_10GPRS(22,r1) | 377 | SAVE_10GPRS(22,r1) |
| 375 | lhz r12,PACA_TRAP_SAVE(r13) | 378 | lhz r12,PACA_TRAP_SAVE(r13) |
| 376 | std r12,_TRAP(r1) | 379 | std r12,_TRAP(r1) |
| 377 | addi r11,r1,INT_FRAME_SIZE | 380 | addi r11,r1,INT_FRAME_SIZE |
| 378 | std r11,0(r1) | 381 | std r11,0(r1) |
| 379 | li r12,0 | 382 | li r12,0 |
| 380 | std r12,0(r11) | 383 | std r12,0(r11) |
| 381 | ld r2,PACATOC(r13) | 384 | ld r2,PACATOC(r13) |
| 382 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | 385 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
| 383 | bl .kernel_bad_stack | 386 | bl .kernel_bad_stack |
| 384 | b 1b | 387 | b 1b |
| 385 | 388 | ||
| 386 | /* | 389 | /* |
| 387 | * Here r13 points to the paca, r9 contains the saved CR, | 390 | * Here r13 points to the paca, r9 contains the saved CR, |
| 388 | * SRR0 and SRR1 are saved in r11 and r12, | 391 | * SRR0 and SRR1 are saved in r11 and r12, |
| 389 | * r9 - r13 are saved in paca->exgen. | 392 | * r9 - r13 are saved in paca->exgen. |
| 390 | */ | 393 | */ |
| 391 | .align 7 | 394 | .align 7 |
| 392 | .globl data_access_common | 395 | .globl data_access_common |
| 393 | data_access_common: | 396 | data_access_common: |
| 394 | mfspr r10,SPRN_DAR | 397 | mfspr r10,SPRN_DAR |
| 395 | std r10,PACA_EXGEN+EX_DAR(r13) | 398 | std r10,PACA_EXGEN+EX_DAR(r13) |
| 396 | mfspr r10,SPRN_DSISR | 399 | mfspr r10,SPRN_DSISR |
| 397 | stw r10,PACA_EXGEN+EX_DSISR(r13) | 400 | stw r10,PACA_EXGEN+EX_DSISR(r13) |
| 398 | EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) | 401 | EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) |
| 399 | ld r3,PACA_EXGEN+EX_DAR(r13) | 402 | ld r3,PACA_EXGEN+EX_DAR(r13) |
| 400 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | 403 | lwz r4,PACA_EXGEN+EX_DSISR(r13) |
| 401 | li r5,0x300 | 404 | li r5,0x300 |
| 402 | b .do_hash_page /* Try to handle as hpte fault */ | 405 | b .do_hash_page /* Try to handle as hpte fault */ |
| 403 | 406 | ||
| 404 | .align 7 | 407 | .align 7 |
| 405 | .globl instruction_access_common | 408 | .globl instruction_access_common |
| 406 | instruction_access_common: | 409 | instruction_access_common: |
| 407 | EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) | 410 | EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) |
| 408 | ld r3,_NIP(r1) | 411 | ld r3,_NIP(r1) |
| 409 | andis. r4,r12,0x5820 | 412 | andis. r4,r12,0x5820 |
| 410 | li r5,0x400 | 413 | li r5,0x400 |
| 411 | b .do_hash_page /* Try to handle as hpte fault */ | 414 | b .do_hash_page /* Try to handle as hpte fault */ |
| 412 | 415 | ||
| 413 | /* | 416 | /* |
| 414 | * Here is the common SLB miss user that is used when going to virtual | 417 | * Here is the common SLB miss user that is used when going to virtual |
| 415 | * mode for SLB misses, that is currently not used | 418 | * mode for SLB misses, that is currently not used |
| 416 | */ | 419 | */ |
| 417 | #ifdef __DISABLED__ | 420 | #ifdef __DISABLED__ |
| 418 | .align 7 | 421 | .align 7 |
| 419 | .globl slb_miss_user_common | 422 | .globl slb_miss_user_common |
| 420 | slb_miss_user_common: | 423 | slb_miss_user_common: |
| 421 | mflr r10 | 424 | mflr r10 |
| 422 | std r3,PACA_EXGEN+EX_DAR(r13) | 425 | std r3,PACA_EXGEN+EX_DAR(r13) |
| 423 | stw r9,PACA_EXGEN+EX_CCR(r13) | 426 | stw r9,PACA_EXGEN+EX_CCR(r13) |
| 424 | std r10,PACA_EXGEN+EX_LR(r13) | 427 | std r10,PACA_EXGEN+EX_LR(r13) |
| 425 | std r11,PACA_EXGEN+EX_SRR0(r13) | 428 | std r11,PACA_EXGEN+EX_SRR0(r13) |
| 426 | bl .slb_allocate_user | 429 | bl .slb_allocate_user |
| 427 | 430 | ||
| 428 | ld r10,PACA_EXGEN+EX_LR(r13) | 431 | ld r10,PACA_EXGEN+EX_LR(r13) |
| 429 | ld r3,PACA_EXGEN+EX_R3(r13) | 432 | ld r3,PACA_EXGEN+EX_R3(r13) |
| 430 | lwz r9,PACA_EXGEN+EX_CCR(r13) | 433 | lwz r9,PACA_EXGEN+EX_CCR(r13) |
| 431 | ld r11,PACA_EXGEN+EX_SRR0(r13) | 434 | ld r11,PACA_EXGEN+EX_SRR0(r13) |
| 432 | mtlr r10 | 435 | mtlr r10 |
| 433 | beq- slb_miss_fault | 436 | beq- slb_miss_fault |
| 434 | 437 | ||
| 435 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | 438 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ |
| 436 | beq- unrecov_user_slb | 439 | beq- unrecov_user_slb |
| 437 | mfmsr r10 | 440 | mfmsr r10 |
| 438 | 441 | ||
| 439 | .machine push | 442 | .machine push |
| 440 | .machine "power4" | 443 | .machine "power4" |
| 441 | mtcrf 0x80,r9 | 444 | mtcrf 0x80,r9 |
| 442 | .machine pop | 445 | .machine pop |
| 443 | 446 | ||
| 444 | clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ | 447 | clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ |
| 445 | mtmsrd r10,1 | 448 | mtmsrd r10,1 |
| 446 | 449 | ||
| 447 | mtspr SRR0,r11 | 450 | mtspr SRR0,r11 |
| 448 | mtspr SRR1,r12 | 451 | mtspr SRR1,r12 |
| 449 | 452 | ||
| 450 | ld r9,PACA_EXGEN+EX_R9(r13) | 453 | ld r9,PACA_EXGEN+EX_R9(r13) |
| 451 | ld r10,PACA_EXGEN+EX_R10(r13) | 454 | ld r10,PACA_EXGEN+EX_R10(r13) |
| 452 | ld r11,PACA_EXGEN+EX_R11(r13) | 455 | ld r11,PACA_EXGEN+EX_R11(r13) |
| 453 | ld r12,PACA_EXGEN+EX_R12(r13) | 456 | ld r12,PACA_EXGEN+EX_R12(r13) |
| 454 | ld r13,PACA_EXGEN+EX_R13(r13) | 457 | ld r13,PACA_EXGEN+EX_R13(r13) |
| 455 | rfid | 458 | rfid |
| 456 | b . | 459 | b . |
| 457 | 460 | ||
| 458 | slb_miss_fault: | 461 | slb_miss_fault: |
| 459 | EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) | 462 | EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) |
| 460 | ld r4,PACA_EXGEN+EX_DAR(r13) | 463 | ld r4,PACA_EXGEN+EX_DAR(r13) |
| 461 | li r5,0 | 464 | li r5,0 |
| 462 | std r4,_DAR(r1) | 465 | std r4,_DAR(r1) |
| 463 | std r5,_DSISR(r1) | 466 | std r5,_DSISR(r1) |
| 464 | b handle_page_fault | 467 | b handle_page_fault |
| 465 | 468 | ||
| 466 | unrecov_user_slb: | 469 | unrecov_user_slb: |
| 467 | EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) | 470 | EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) |
| 468 | DISABLE_INTS | 471 | DISABLE_INTS |
| 469 | bl .save_nvgprs | 472 | bl .save_nvgprs |
| 470 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | 473 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
| 471 | bl .unrecoverable_exception | 474 | bl .unrecoverable_exception |
| 472 | b 1b | 475 | b 1b |
| 473 | 476 | ||
| 474 | #endif /* __DISABLED__ */ | 477 | #endif /* __DISABLED__ */ |
| 475 | 478 | ||
| 476 | 479 | ||
| 477 | /* | 480 | /* |
| 478 | * r13 points to the PACA, r9 contains the saved CR, | 481 | * r13 points to the PACA, r9 contains the saved CR, |
| 479 | * r12 contain the saved SRR1, SRR0 is still ready for return | 482 | * r12 contain the saved SRR1, SRR0 is still ready for return |
| 480 | * r3 has the faulting address | 483 | * r3 has the faulting address |
| 481 | * r9 - r13 are saved in paca->exslb. | 484 | * r9 - r13 are saved in paca->exslb. |
| 482 | * r3 is saved in paca->slb_r3 | 485 | * r3 is saved in paca->slb_r3 |
| 483 | * We assume we aren't going to take any exceptions during this procedure. | 486 | * We assume we aren't going to take any exceptions during this procedure. |
| 484 | */ | 487 | */ |
| 485 | _GLOBAL(slb_miss_realmode) | 488 | _GLOBAL(slb_miss_realmode) |
| 486 | mflr r10 | 489 | mflr r10 |
| 487 | #ifdef CONFIG_RELOCATABLE | 490 | #ifdef CONFIG_RELOCATABLE |
| 488 | mtctr r11 | 491 | mtctr r11 |
| 489 | #endif | 492 | #endif |
| 490 | 493 | ||
| 491 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | 494 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ |
| 492 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ | 495 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ |
| 493 | 496 | ||
| 494 | bl .slb_allocate_realmode | 497 | bl .slb_allocate_realmode |
| 495 | 498 | ||
| 496 | /* All done -- return from exception. */ | 499 | /* All done -- return from exception. */ |
| 497 | 500 | ||
| 498 | ld r10,PACA_EXSLB+EX_LR(r13) | 501 | ld r10,PACA_EXSLB+EX_LR(r13) |
| 499 | ld r3,PACA_EXSLB+EX_R3(r13) | 502 | ld r3,PACA_EXSLB+EX_R3(r13) |
| 500 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | 503 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ |
| 501 | #ifdef CONFIG_PPC_ISERIES | 504 | #ifdef CONFIG_PPC_ISERIES |
| 502 | BEGIN_FW_FTR_SECTION | 505 | BEGIN_FW_FTR_SECTION |
| 503 | ld r11,PACALPPACAPTR(r13) | 506 | ld r11,PACALPPACAPTR(r13) |
| 504 | ld r11,LPPACASRR0(r11) /* get SRR0 value */ | 507 | ld r11,LPPACASRR0(r11) /* get SRR0 value */ |
| 505 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | 508 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
| 506 | #endif /* CONFIG_PPC_ISERIES */ | 509 | #endif /* CONFIG_PPC_ISERIES */ |
| 507 | 510 | ||
| 508 | mtlr r10 | 511 | mtlr r10 |
| 509 | 512 | ||
| 510 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | 513 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ |
| 511 | beq- 2f | 514 | beq- 2f |
| 512 | 515 | ||
| 513 | .machine push | 516 | .machine push |
| 514 | .machine "power4" | 517 | .machine "power4" |
| 515 | mtcrf 0x80,r9 | 518 | mtcrf 0x80,r9 |
| 516 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ | 519 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ |
| 517 | .machine pop | 520 | .machine pop |
| 518 | 521 | ||
| 519 | #ifdef CONFIG_PPC_ISERIES | 522 | #ifdef CONFIG_PPC_ISERIES |
| 520 | BEGIN_FW_FTR_SECTION | 523 | BEGIN_FW_FTR_SECTION |
| 521 | mtspr SPRN_SRR0,r11 | 524 | mtspr SPRN_SRR0,r11 |
| 522 | mtspr SPRN_SRR1,r12 | 525 | mtspr SPRN_SRR1,r12 |
| 523 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | 526 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
| 524 | #endif /* CONFIG_PPC_ISERIES */ | 527 | #endif /* CONFIG_PPC_ISERIES */ |
| 525 | ld r9,PACA_EXSLB+EX_R9(r13) | 528 | ld r9,PACA_EXSLB+EX_R9(r13) |
| 526 | ld r10,PACA_EXSLB+EX_R10(r13) | 529 | ld r10,PACA_EXSLB+EX_R10(r13) |
| 527 | ld r11,PACA_EXSLB+EX_R11(r13) | 530 | ld r11,PACA_EXSLB+EX_R11(r13) |
| 528 | ld r12,PACA_EXSLB+EX_R12(r13) | 531 | ld r12,PACA_EXSLB+EX_R12(r13) |
| 529 | ld r13,PACA_EXSLB+EX_R13(r13) | 532 | ld r13,PACA_EXSLB+EX_R13(r13) |
| 530 | rfid | 533 | rfid |
| 531 | b . /* prevent speculative execution */ | 534 | b . /* prevent speculative execution */ |
| 532 | 535 | ||
| 533 | 2: | 536 | 2: |
| 534 | #ifdef CONFIG_PPC_ISERIES | 537 | #ifdef CONFIG_PPC_ISERIES |
| 535 | BEGIN_FW_FTR_SECTION | 538 | BEGIN_FW_FTR_SECTION |
| 536 | b unrecov_slb | 539 | b unrecov_slb |
| 537 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | 540 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
| 538 | #endif /* CONFIG_PPC_ISERIES */ | 541 | #endif /* CONFIG_PPC_ISERIES */ |
| 539 | mfspr r11,SPRN_SRR0 | 542 | mfspr r11,SPRN_SRR0 |
| 540 | ld r10,PACAKBASE(r13) | 543 | ld r10,PACAKBASE(r13) |
| 541 | LOAD_HANDLER(r10,unrecov_slb) | 544 | LOAD_HANDLER(r10,unrecov_slb) |
| 542 | mtspr SPRN_SRR0,r10 | 545 | mtspr SPRN_SRR0,r10 |
| 543 | ld r10,PACAKMSR(r13) | 546 | ld r10,PACAKMSR(r13) |
| 544 | mtspr SPRN_SRR1,r10 | 547 | mtspr SPRN_SRR1,r10 |
| 545 | rfid | 548 | rfid |
| 546 | b . | 549 | b . |
| 547 | 550 | ||
| 548 | unrecov_slb: | 551 | unrecov_slb: |
| 549 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) | 552 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) |
| 550 | DISABLE_INTS | 553 | DISABLE_INTS |
| 551 | bl .save_nvgprs | 554 | bl .save_nvgprs |
| 552 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | 555 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
| 553 | bl .unrecoverable_exception | 556 | bl .unrecoverable_exception |
| 554 | b 1b | 557 | b 1b |
| 555 | 558 | ||
| 556 | .align 7 | 559 | .align 7 |
| 557 | .globl hardware_interrupt_common | 560 | .globl hardware_interrupt_common |
| 558 | .globl hardware_interrupt_entry | 561 | .globl hardware_interrupt_entry |
| 559 | hardware_interrupt_common: | 562 | hardware_interrupt_common: |
| 560 | EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) | 563 | EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) |
| 561 | FINISH_NAP | 564 | FINISH_NAP |
| 562 | hardware_interrupt_entry: | 565 | hardware_interrupt_entry: |
| 563 | DISABLE_INTS | 566 | DISABLE_INTS |
| 564 | BEGIN_FTR_SECTION | 567 | BEGIN_FTR_SECTION |
| 565 | bl .ppc64_runlatch_on | 568 | bl .ppc64_runlatch_on |
| 566 | END_FTR_SECTION_IFSET(CPU_FTR_CTRL) | 569 | END_FTR_SECTION_IFSET(CPU_FTR_CTRL) |
| 567 | addi r3,r1,STACK_FRAME_OVERHEAD | 570 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 568 | bl .do_IRQ | 571 | bl .do_IRQ |
| 569 | b .ret_from_except_lite | 572 | b .ret_from_except_lite |
| 570 | 573 | ||
| 571 | #ifdef CONFIG_PPC_970_NAP | 574 | #ifdef CONFIG_PPC_970_NAP |
| 572 | power4_fixup_nap: | 575 | power4_fixup_nap: |
| 573 | andc r9,r9,r10 | 576 | andc r9,r9,r10 |
| 574 | std r9,TI_LOCAL_FLAGS(r11) | 577 | std r9,TI_LOCAL_FLAGS(r11) |
| 575 | ld r10,_LINK(r1) /* make idle task do the */ | 578 | ld r10,_LINK(r1) /* make idle task do the */ |
| 576 | std r10,_NIP(r1) /* equivalent of a blr */ | 579 | std r10,_NIP(r1) /* equivalent of a blr */ |
| 577 | blr | 580 | blr |
| 578 | #endif | 581 | #endif |
| 579 | 582 | ||
| 580 | .align 7 | 583 | .align 7 |
| 581 | .globl alignment_common | 584 | .globl alignment_common |
| 582 | alignment_common: | 585 | alignment_common: |
| 583 | mfspr r10,SPRN_DAR | 586 | mfspr r10,SPRN_DAR |
| 584 | std r10,PACA_EXGEN+EX_DAR(r13) | 587 | std r10,PACA_EXGEN+EX_DAR(r13) |
| 585 | mfspr r10,SPRN_DSISR | 588 | mfspr r10,SPRN_DSISR |
| 586 | stw r10,PACA_EXGEN+EX_DSISR(r13) | 589 | stw r10,PACA_EXGEN+EX_DSISR(r13) |
| 587 | EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) | 590 | EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) |
| 588 | ld r3,PACA_EXGEN+EX_DAR(r13) | 591 | ld r3,PACA_EXGEN+EX_DAR(r13) |
| 589 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | 592 | lwz r4,PACA_EXGEN+EX_DSISR(r13) |
| 590 | std r3,_DAR(r1) | 593 | std r3,_DAR(r1) |
| 591 | std r4,_DSISR(r1) | 594 | std r4,_DSISR(r1) |
| 592 | bl .save_nvgprs | 595 | bl .save_nvgprs |
| 593 | addi r3,r1,STACK_FRAME_OVERHEAD | 596 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 594 | ENABLE_INTS | 597 | ENABLE_INTS |
| 595 | bl .alignment_exception | 598 | bl .alignment_exception |
| 596 | b .ret_from_except | 599 | b .ret_from_except |
| 597 | 600 | ||
| 598 | .align 7 | 601 | .align 7 |
| 599 | .globl program_check_common | 602 | .globl program_check_common |
| 600 | program_check_common: | 603 | program_check_common: |
| 601 | EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) | 604 | EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) |
| 602 | bl .save_nvgprs | 605 | bl .save_nvgprs |
| 603 | addi r3,r1,STACK_FRAME_OVERHEAD | 606 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 604 | ENABLE_INTS | 607 | ENABLE_INTS |
| 605 | bl .program_check_exception | 608 | bl .program_check_exception |
| 606 | b .ret_from_except | 609 | b .ret_from_except |
| 607 | 610 | ||
| 608 | .align 7 | 611 | .align 7 |
| 609 | .globl fp_unavailable_common | 612 | .globl fp_unavailable_common |
| 610 | fp_unavailable_common: | 613 | fp_unavailable_common: |
| 611 | EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) | 614 | EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) |
| 612 | bne 1f /* if from user, just load it up */ | 615 | bne 1f /* if from user, just load it up */ |
| 613 | bl .save_nvgprs | 616 | bl .save_nvgprs |
| 614 | addi r3,r1,STACK_FRAME_OVERHEAD | 617 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 615 | ENABLE_INTS | 618 | ENABLE_INTS |
| 616 | bl .kernel_fp_unavailable_exception | 619 | bl .kernel_fp_unavailable_exception |
| 617 | BUG_OPCODE | 620 | BUG_OPCODE |
| 618 | 1: bl .load_up_fpu | 621 | 1: bl .load_up_fpu |
| 619 | b fast_exception_return | 622 | b fast_exception_return |
| 620 | 623 | ||
| 621 | .align 7 | 624 | .align 7 |
| 622 | .globl altivec_unavailable_common | 625 | .globl altivec_unavailable_common |
| 623 | altivec_unavailable_common: | 626 | altivec_unavailable_common: |
| 624 | EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) | 627 | EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) |
| 625 | #ifdef CONFIG_ALTIVEC | 628 | #ifdef CONFIG_ALTIVEC |
| 626 | BEGIN_FTR_SECTION | 629 | BEGIN_FTR_SECTION |
| 627 | beq 1f | 630 | beq 1f |
| 628 | bl .load_up_altivec | 631 | bl .load_up_altivec |
| 629 | b fast_exception_return | 632 | b fast_exception_return |
| 630 | 1: | 633 | 1: |
| 631 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 634 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 632 | #endif | 635 | #endif |
| 633 | bl .save_nvgprs | 636 | bl .save_nvgprs |
| 634 | addi r3,r1,STACK_FRAME_OVERHEAD | 637 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 635 | ENABLE_INTS | 638 | ENABLE_INTS |
| 636 | bl .altivec_unavailable_exception | 639 | bl .altivec_unavailable_exception |
| 637 | b .ret_from_except | 640 | b .ret_from_except |
| 638 | 641 | ||
| 639 | .align 7 | 642 | .align 7 |
| 640 | .globl vsx_unavailable_common | 643 | .globl vsx_unavailable_common |
| 641 | vsx_unavailable_common: | 644 | vsx_unavailable_common: |
| 642 | EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) | 645 | EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) |
| 643 | #ifdef CONFIG_VSX | 646 | #ifdef CONFIG_VSX |
| 644 | BEGIN_FTR_SECTION | 647 | BEGIN_FTR_SECTION |
| 645 | bne .load_up_vsx | 648 | bne .load_up_vsx |
| 646 | 1: | 649 | 1: |
| 647 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | 650 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) |
| 648 | #endif | 651 | #endif |
| 649 | bl .save_nvgprs | 652 | bl .save_nvgprs |
| 650 | addi r3,r1,STACK_FRAME_OVERHEAD | 653 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 651 | ENABLE_INTS | 654 | ENABLE_INTS |
| 652 | bl .vsx_unavailable_exception | 655 | bl .vsx_unavailable_exception |
| 653 | b .ret_from_except | 656 | b .ret_from_except |
| 654 | 657 | ||
| 655 | .align 7 | 658 | .align 7 |
| 656 | .globl __end_handlers | 659 | .globl __end_handlers |
| 657 | __end_handlers: | 660 | __end_handlers: |
| 658 | 661 | ||
| 659 | /* | 662 | /* |
| 660 | * Return from an exception with minimal checks. | 663 | * Return from an exception with minimal checks. |
| 661 | * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. | 664 | * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. |
| 662 | * If interrupts have been enabled, or anything has been | 665 | * If interrupts have been enabled, or anything has been |
| 663 | * done that might have changed the scheduling status of | 666 | * done that might have changed the scheduling status of |
| 664 | * any task or sent any task a signal, you should use | 667 | * any task or sent any task a signal, you should use |
| 665 | * ret_from_except or ret_from_except_lite instead of this. | 668 | * ret_from_except or ret_from_except_lite instead of this. |
| 666 | */ | 669 | */ |
| 667 | fast_exc_return_irq: /* restores irq state too */ | 670 | fast_exc_return_irq: /* restores irq state too */ |
| 668 | ld r3,SOFTE(r1) | 671 | ld r3,SOFTE(r1) |
| 669 | TRACE_AND_RESTORE_IRQ(r3); | 672 | TRACE_AND_RESTORE_IRQ(r3); |
| 670 | ld r12,_MSR(r1) | 673 | ld r12,_MSR(r1) |
| 671 | rldicl r4,r12,49,63 /* get MSR_EE to LSB */ | 674 | rldicl r4,r12,49,63 /* get MSR_EE to LSB */ |
| 672 | stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ | 675 | stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ |
| 673 | b 1f | 676 | b 1f |
| 674 | 677 | ||
| 675 | .globl fast_exception_return | 678 | .globl fast_exception_return |
| 676 | fast_exception_return: | 679 | fast_exception_return: |
| 677 | ld r12,_MSR(r1) | 680 | ld r12,_MSR(r1) |
| 678 | 1: ld r11,_NIP(r1) | 681 | 1: ld r11,_NIP(r1) |
| 679 | andi. r3,r12,MSR_RI /* check if RI is set */ | 682 | andi. r3,r12,MSR_RI /* check if RI is set */ |
| 680 | beq- unrecov_fer | 683 | beq- unrecov_fer |
| 681 | 684 | ||
| 682 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 685 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
| 683 | andi. r3,r12,MSR_PR | 686 | andi. r3,r12,MSR_PR |
| 684 | beq 2f | 687 | beq 2f |
| 685 | ACCOUNT_CPU_USER_EXIT(r3, r4) | 688 | ACCOUNT_CPU_USER_EXIT(r3, r4) |
| 686 | 2: | 689 | 2: |
| 687 | #endif | 690 | #endif |
| 688 | 691 | ||
| 689 | ld r3,_CCR(r1) | 692 | ld r3,_CCR(r1) |
| 690 | ld r4,_LINK(r1) | 693 | ld r4,_LINK(r1) |
| 691 | ld r5,_CTR(r1) | 694 | ld r5,_CTR(r1) |
| 692 | ld r6,_XER(r1) | 695 | ld r6,_XER(r1) |
| 693 | mtcr r3 | 696 | mtcr r3 |
| 694 | mtlr r4 | 697 | mtlr r4 |
| 695 | mtctr r5 | 698 | mtctr r5 |
| 696 | mtxer r6 | 699 | mtxer r6 |
| 697 | REST_GPR(0, r1) | 700 | REST_GPR(0, r1) |
| 698 | REST_8GPRS(2, r1) | 701 | REST_8GPRS(2, r1) |
| 699 | 702 | ||
| 700 | mfmsr r10 | 703 | mfmsr r10 |
| 701 | rldicl r10,r10,48,1 /* clear EE */ | 704 | rldicl r10,r10,48,1 /* clear EE */ |
| 702 | rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */ | 705 | rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */ |
| 703 | mtmsrd r10,1 | 706 | mtmsrd r10,1 |
| 704 | 707 | ||
| 705 | mtspr SPRN_SRR1,r12 | 708 | mtspr SPRN_SRR1,r12 |
| 706 | mtspr SPRN_SRR0,r11 | 709 | mtspr SPRN_SRR0,r11 |
| 707 | REST_4GPRS(10, r1) | 710 | REST_4GPRS(10, r1) |
| 708 | ld r1,GPR1(r1) | 711 | ld r1,GPR1(r1) |
| 709 | rfid | 712 | rfid |
| 710 | b . /* prevent speculative execution */ | 713 | b . /* prevent speculative execution */ |
| 711 | 714 | ||
| 712 | unrecov_fer: | 715 | unrecov_fer: |
| 713 | bl .save_nvgprs | 716 | bl .save_nvgprs |
| 714 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | 717 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
| 715 | bl .unrecoverable_exception | 718 | bl .unrecoverable_exception |
| 716 | b 1b | 719 | b 1b |
| 717 | 720 | ||
| 718 | 721 | ||
| 719 | /* | 722 | /* |
| 720 | * Hash table stuff | 723 | * Hash table stuff |
| 721 | */ | 724 | */ |
| 722 | .align 7 | 725 | .align 7 |
| 723 | _STATIC(do_hash_page) | 726 | _STATIC(do_hash_page) |
| 724 | std r3,_DAR(r1) | 727 | std r3,_DAR(r1) |
| 725 | std r4,_DSISR(r1) | 728 | std r4,_DSISR(r1) |
| 726 | 729 | ||
| 727 | andis. r0,r4,0xa450 /* weird error? */ | 730 | andis. r0,r4,0xa450 /* weird error? */ |
| 728 | bne- handle_page_fault /* if not, try to insert a HPTE */ | 731 | bne- handle_page_fault /* if not, try to insert a HPTE */ |
| 729 | BEGIN_FTR_SECTION | 732 | BEGIN_FTR_SECTION |
| 730 | andis. r0,r4,0x0020 /* Is it a segment table fault? */ | 733 | andis. r0,r4,0x0020 /* Is it a segment table fault? */ |
| 731 | bne- do_ste_alloc /* If so handle it */ | 734 | bne- do_ste_alloc /* If so handle it */ |
| 732 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | 735 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) |
| 733 | 736 | ||
| 734 | clrrdi r11,r1,THREAD_SHIFT | 737 | clrrdi r11,r1,THREAD_SHIFT |
| 735 | lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ | 738 | lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ |
| 736 | andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ | 739 | andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ |
| 737 | bne 77f /* then don't call hash_page now */ | 740 | bne 77f /* then don't call hash_page now */ |
| 738 | 741 | ||
| 739 | /* | 742 | /* |
| 740 | * On iSeries, we soft-disable interrupts here, then | 743 | * On iSeries, we soft-disable interrupts here, then |
| 741 | * hard-enable interrupts so that the hash_page code can spin on | 744 | * hard-enable interrupts so that the hash_page code can spin on |
| 742 | * the hash_table_lock without problems on a shared processor. | 745 | * the hash_table_lock without problems on a shared processor. |
| 743 | */ | 746 | */ |
| 744 | DISABLE_INTS | 747 | DISABLE_INTS |
| 745 | 748 | ||
| 746 | /* | 749 | /* |
| 747 | * Currently, trace_hardirqs_off() will be called by DISABLE_INTS | 750 | * Currently, trace_hardirqs_off() will be called by DISABLE_INTS |
| 748 | * and will clobber volatile registers when irq tracing is enabled | 751 | * and will clobber volatile registers when irq tracing is enabled |
| 749 | * so we need to reload them. It may be possible to be smarter here | 752 | * so we need to reload them. It may be possible to be smarter here |
| 750 | * and move the irq tracing elsewhere but let's keep it simple for | 753 | * and move the irq tracing elsewhere but let's keep it simple for |
| 751 | * now | 754 | * now |
| 752 | */ | 755 | */ |
| 753 | #ifdef CONFIG_TRACE_IRQFLAGS | 756 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 754 | ld r3,_DAR(r1) | 757 | ld r3,_DAR(r1) |
| 755 | ld r4,_DSISR(r1) | 758 | ld r4,_DSISR(r1) |
| 756 | ld r5,_TRAP(r1) | 759 | ld r5,_TRAP(r1) |
| 757 | ld r12,_MSR(r1) | 760 | ld r12,_MSR(r1) |
| 758 | clrrdi r5,r5,4 | 761 | clrrdi r5,r5,4 |
| 759 | #endif /* CONFIG_TRACE_IRQFLAGS */ | 762 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
| 760 | /* | 763 | /* |
| 761 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are | 764 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are |
| 762 | * accessing a userspace segment (even from the kernel). We assume | 765 | * accessing a userspace segment (even from the kernel). We assume |
| 763 | * kernel addresses always have the high bit set. | 766 | * kernel addresses always have the high bit set. |
| 764 | */ | 767 | */ |
| 765 | rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ | 768 | rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ |
| 766 | rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ | 769 | rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ |
| 767 | orc r0,r12,r0 /* MSR_PR | ~high_bit */ | 770 | orc r0,r12,r0 /* MSR_PR | ~high_bit */ |
| 768 | rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ | 771 | rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ |
| 769 | ori r4,r4,1 /* add _PAGE_PRESENT */ | 772 | ori r4,r4,1 /* add _PAGE_PRESENT */ |
| 770 | rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ | 773 | rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ |
| 771 | 774 | ||
| 772 | /* | 775 | /* |
| 773 | * r3 contains the faulting address | 776 | * r3 contains the faulting address |
| 774 | * r4 contains the required access permissions | 777 | * r4 contains the required access permissions |
| 775 | * r5 contains the trap number | 778 | * r5 contains the trap number |
| 776 | * | 779 | * |
| 777 | * at return r3 = 0 for success | 780 | * at return r3 = 0 for success |
| 778 | */ | 781 | */ |
| 779 | bl .hash_page /* build HPTE if possible */ | 782 | bl .hash_page /* build HPTE if possible */ |
| 780 | cmpdi r3,0 /* see if hash_page succeeded */ | 783 | cmpdi r3,0 /* see if hash_page succeeded */ |
| 781 | 784 | ||
| 782 | BEGIN_FW_FTR_SECTION | 785 | BEGIN_FW_FTR_SECTION |
| 783 | /* | 786 | /* |
| 784 | * If we had interrupts soft-enabled at the point where the | 787 | * If we had interrupts soft-enabled at the point where the |
| 785 | * DSI/ISI occurred, and an interrupt came in during hash_page, | 788 | * DSI/ISI occurred, and an interrupt came in during hash_page, |
| 786 | * handle it now. | 789 | * handle it now. |
| 787 | * We jump to ret_from_except_lite rather than fast_exception_return | 790 | * We jump to ret_from_except_lite rather than fast_exception_return |
| 788 | * because ret_from_except_lite will check for and handle pending | 791 | * because ret_from_except_lite will check for and handle pending |
| 789 | * interrupts if necessary. | 792 | * interrupts if necessary. |
| 790 | */ | 793 | */ |
| 791 | beq 13f | 794 | beq 13f |
| 792 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | 795 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
| 793 | 796 | ||
| 794 | BEGIN_FW_FTR_SECTION | 797 | BEGIN_FW_FTR_SECTION |
| 795 | /* | 798 | /* |
| 796 | * Here we have interrupts hard-disabled, so it is sufficient | 799 | * Here we have interrupts hard-disabled, so it is sufficient |
| 797 | * to restore paca->{soft,hard}_enable and get out. | 800 | * to restore paca->{soft,hard}_enable and get out. |
| 798 | */ | 801 | */ |
| 799 | beq fast_exc_return_irq /* Return from exception on success */ | 802 | beq fast_exc_return_irq /* Return from exception on success */ |
| 800 | END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) | 803 | END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) |
| 801 | 804 | ||
| 802 | /* For a hash failure, we don't bother re-enabling interrupts */ | 805 | /* For a hash failure, we don't bother re-enabling interrupts */ |
| 803 | ble- 12f | 806 | ble- 12f |
| 804 | 807 | ||
| 805 | /* | 808 | /* |
| 806 | * hash_page couldn't handle it, set soft interrupt enable back | 809 | * hash_page couldn't handle it, set soft interrupt enable back |
| 807 | * to what it was before the trap. Note that .raw_local_irq_restore | 810 | * to what it was before the trap. Note that .raw_local_irq_restore |
| 808 | * handles any interrupts pending at this point. | 811 | * handles any interrupts pending at this point. |
| 809 | */ | 812 | */ |
| 810 | ld r3,SOFTE(r1) | 813 | ld r3,SOFTE(r1) |
| 811 | TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) | 814 | TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) |
| 812 | bl .raw_local_irq_restore | 815 | bl .raw_local_irq_restore |
| 813 | b 11f | 816 | b 11f |
| 814 | 817 | ||
| 815 | /* Here we have a page fault that hash_page can't handle. */ | 818 | /* Here we have a page fault that hash_page can't handle. */ |
| 816 | handle_page_fault: | 819 | handle_page_fault: |
| 817 | ENABLE_INTS | 820 | ENABLE_INTS |
| 818 | 11: ld r4,_DAR(r1) | 821 | 11: ld r4,_DAR(r1) |
| 819 | ld r5,_DSISR(r1) | 822 | ld r5,_DSISR(r1) |
| 820 | addi r3,r1,STACK_FRAME_OVERHEAD | 823 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 821 | bl .do_page_fault | 824 | bl .do_page_fault |
| 822 | cmpdi r3,0 | 825 | cmpdi r3,0 |
| 823 | beq+ 13f | 826 | beq+ 13f |
| 824 | bl .save_nvgprs | 827 | bl .save_nvgprs |
| 825 | mr r5,r3 | 828 | mr r5,r3 |
| 826 | addi r3,r1,STACK_FRAME_OVERHEAD | 829 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 827 | lwz r4,_DAR(r1) | 830 | lwz r4,_DAR(r1) |
| 828 | bl .bad_page_fault | 831 | bl .bad_page_fault |
| 829 | b .ret_from_except | 832 | b .ret_from_except |
| 830 | 833 | ||
| 831 | 13: b .ret_from_except_lite | 834 | 13: b .ret_from_except_lite |
| 832 | 835 | ||
| 833 | /* We have a page fault that hash_page could handle but HV refused | 836 | /* We have a page fault that hash_page could handle but HV refused |
| 834 | * the PTE insertion | 837 | * the PTE insertion |
| 835 | */ | 838 | */ |
| 836 | 12: bl .save_nvgprs | 839 | 12: bl .save_nvgprs |
| 837 | mr r5,r3 | 840 | mr r5,r3 |
| 838 | addi r3,r1,STACK_FRAME_OVERHEAD | 841 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 839 | ld r4,_DAR(r1) | 842 | ld r4,_DAR(r1) |
| 840 | bl .low_hash_fault | 843 | bl .low_hash_fault |
| 841 | b .ret_from_except | 844 | b .ret_from_except |
| 842 | 845 | ||
| 843 | /* | 846 | /* |
| 844 | * We come here as a result of a DSI at a point where we don't want | 847 | * We come here as a result of a DSI at a point where we don't want |
| 845 | * to call hash_page, such as when we are accessing memory (possibly | 848 | * to call hash_page, such as when we are accessing memory (possibly |
| 846 | * user memory) inside a PMU interrupt that occurred while interrupts | 849 | * user memory) inside a PMU interrupt that occurred while interrupts |
| 847 | * were soft-disabled. We want to invoke the exception handler for | 850 | * were soft-disabled. We want to invoke the exception handler for |
| 848 | * the access, or panic if there isn't a handler. | 851 | * the access, or panic if there isn't a handler. |
| 849 | */ | 852 | */ |
| 850 | 77: bl .save_nvgprs | 853 | 77: bl .save_nvgprs |
| 851 | mr r4,r3 | 854 | mr r4,r3 |
| 852 | addi r3,r1,STACK_FRAME_OVERHEAD | 855 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 853 | li r5,SIGSEGV | 856 | li r5,SIGSEGV |
| 854 | bl .bad_page_fault | 857 | bl .bad_page_fault |
| 855 | b .ret_from_except | 858 | b .ret_from_except |
| 856 | 859 | ||
| 857 | /* here we have a segment miss */ | 860 | /* here we have a segment miss */ |
| 858 | do_ste_alloc: | 861 | do_ste_alloc: |
| 859 | bl .ste_allocate /* try to insert stab entry */ | 862 | bl .ste_allocate /* try to insert stab entry */ |
| 860 | cmpdi r3,0 | 863 | cmpdi r3,0 |
| 861 | bne- handle_page_fault | 864 | bne- handle_page_fault |
| 862 | b fast_exception_return | 865 | b fast_exception_return |
| 863 | 866 | ||
| 864 | /* | 867 | /* |
| 865 | * r13 points to the PACA, r9 contains the saved CR, | 868 | * r13 points to the PACA, r9 contains the saved CR, |
| 866 | * r11 and r12 contain the saved SRR0 and SRR1. | 869 | * r11 and r12 contain the saved SRR0 and SRR1. |
| 867 | * r9 - r13 are saved in paca->exslb. | 870 | * r9 - r13 are saved in paca->exslb. |
| 868 | * We assume we aren't going to take any exceptions during this procedure. | 871 | * We assume we aren't going to take any exceptions during this procedure. |
| 869 | * We assume (DAR >> 60) == 0xc. | 872 | * We assume (DAR >> 60) == 0xc. |
| 870 | */ | 873 | */ |
| 871 | .align 7 | 874 | .align 7 |
| 872 | _GLOBAL(do_stab_bolted) | 875 | _GLOBAL(do_stab_bolted) |
| 873 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | 876 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ |
| 874 | std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ | 877 | std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ |
| 875 | 878 | ||
| 876 | /* Hash to the primary group */ | 879 | /* Hash to the primary group */ |
| 877 | ld r10,PACASTABVIRT(r13) | 880 | ld r10,PACASTABVIRT(r13) |
| 878 | mfspr r11,SPRN_DAR | 881 | mfspr r11,SPRN_DAR |
| 879 | srdi r11,r11,28 | 882 | srdi r11,r11,28 |
| 880 | rldimi r10,r11,7,52 /* r10 = first ste of the group */ | 883 | rldimi r10,r11,7,52 /* r10 = first ste of the group */ |
| 881 | 884 | ||
| 882 | /* Calculate VSID */ | 885 | /* Calculate VSID */ |
| 883 | /* This is a kernel address, so protovsid = ESID */ | 886 | /* This is a kernel address, so protovsid = ESID */ |
| 884 | ASM_VSID_SCRAMBLE(r11, r9, 256M) | 887 | ASM_VSID_SCRAMBLE(r11, r9, 256M) |
| 885 | rldic r9,r11,12,16 /* r9 = vsid << 12 */ | 888 | rldic r9,r11,12,16 /* r9 = vsid << 12 */ |
| 886 | 889 | ||
| 887 | /* Search the primary group for a free entry */ | 890 | /* Search the primary group for a free entry */ |
| 888 | 1: ld r11,0(r10) /* Test valid bit of the current ste */ | 891 | 1: ld r11,0(r10) /* Test valid bit of the current ste */ |
| 889 | andi. r11,r11,0x80 | 892 | andi. r11,r11,0x80 |
| 890 | beq 2f | 893 | beq 2f |
| 891 | addi r10,r10,16 | 894 | addi r10,r10,16 |
| 892 | andi. r11,r10,0x70 | 895 | andi. r11,r10,0x70 |
| 893 | bne 1b | 896 | bne 1b |
| 894 | 897 | ||
| 895 | /* Stick for only searching the primary group for now. */ | 898 | /* Stick for only searching the primary group for now. */ |
| 896 | /* At least for now, we use a very simple random castout scheme */ | 899 | /* At least for now, we use a very simple random castout scheme */ |
| 897 | /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ | 900 | /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ |
| 898 | mftb r11 | 901 | mftb r11 |
| 899 | rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ | 902 | rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ |
| 900 | ori r11,r11,0x10 | 903 | ori r11,r11,0x10 |
| 901 | 904 | ||
| 902 | /* r10 currently points to an ste one past the group of interest */ | 905 | /* r10 currently points to an ste one past the group of interest */ |
| 903 | /* make it point to the randomly selected entry */ | 906 | /* make it point to the randomly selected entry */ |
| 904 | subi r10,r10,128 | 907 | subi r10,r10,128 |
| 905 | or r10,r10,r11 /* r10 is the entry to invalidate */ | 908 | or r10,r10,r11 /* r10 is the entry to invalidate */ |
| 906 | 909 | ||
| 907 | isync /* mark the entry invalid */ | 910 | isync /* mark the entry invalid */ |
| 908 | ld r11,0(r10) | 911 | ld r11,0(r10) |
| 909 | rldicl r11,r11,56,1 /* clear the valid bit */ | 912 | rldicl r11,r11,56,1 /* clear the valid bit */ |
| 910 | rotldi r11,r11,8 | 913 | rotldi r11,r11,8 |
| 911 | std r11,0(r10) | 914 | std r11,0(r10) |
| 912 | sync | 915 | sync |
| 913 | 916 | ||
| 914 | clrrdi r11,r11,28 /* Get the esid part of the ste */ | 917 | clrrdi r11,r11,28 /* Get the esid part of the ste */ |
| 915 | slbie r11 | 918 | slbie r11 |
| 916 | 919 | ||
| 917 | 2: std r9,8(r10) /* Store the vsid part of the ste */ | 920 | 2: std r9,8(r10) /* Store the vsid part of the ste */ |
| 918 | eieio | 921 | eieio |
| 919 | 922 | ||
| 920 | mfspr r11,SPRN_DAR /* Get the new esid */ | 923 | mfspr r11,SPRN_DAR /* Get the new esid */ |
| 921 | clrrdi r11,r11,28 /* Permits a full 32b of ESID */ | 924 | clrrdi r11,r11,28 /* Permits a full 32b of ESID */ |
| 922 | ori r11,r11,0x90 /* Turn on valid and kp */ | 925 | ori r11,r11,0x90 /* Turn on valid and kp */ |
| 923 | std r11,0(r10) /* Put new entry back into the stab */ | 926 | std r11,0(r10) /* Put new entry back into the stab */ |
| 924 | 927 | ||
| 925 | sync | 928 | sync |
| 926 | 929 | ||
| 927 | /* All done -- return from exception. */ | 930 | /* All done -- return from exception. */ |
| 928 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | 931 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ |
| 929 | ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ | 932 | ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ |
| 930 | 933 | ||
| 931 | andi. r10,r12,MSR_RI | 934 | andi. r10,r12,MSR_RI |
| 932 | beq- unrecov_slb | 935 | beq- unrecov_slb |
| 933 | 936 | ||
| 934 | mtcrf 0x80,r9 /* restore CR */ | 937 | mtcrf 0x80,r9 /* restore CR */ |
| 935 | 938 | ||
| 936 | mfmsr r10 | 939 | mfmsr r10 |
| 937 | clrrdi r10,r10,2 | 940 | clrrdi r10,r10,2 |
| 938 | mtmsrd r10,1 | 941 | mtmsrd r10,1 |
| 939 | 942 | ||
| 940 | mtspr SPRN_SRR0,r11 | 943 | mtspr SPRN_SRR0,r11 |
| 941 | mtspr SPRN_SRR1,r12 | 944 | mtspr SPRN_SRR1,r12 |
| 942 | ld r9,PACA_EXSLB+EX_R9(r13) | 945 | ld r9,PACA_EXSLB+EX_R9(r13) |
| 943 | ld r10,PACA_EXSLB+EX_R10(r13) | 946 | ld r10,PACA_EXSLB+EX_R10(r13) |
| 944 | ld r11,PACA_EXSLB+EX_R11(r13) | 947 | ld r11,PACA_EXSLB+EX_R11(r13) |
| 945 | ld r12,PACA_EXSLB+EX_R12(r13) | 948 | ld r12,PACA_EXSLB+EX_R12(r13) |
| 946 | ld r13,PACA_EXSLB+EX_R13(r13) | 949 | ld r13,PACA_EXSLB+EX_R13(r13) |
| 947 | rfid | 950 | rfid |
| 948 | b . /* prevent speculative execution */ | 951 | b . /* prevent speculative execution */ |
| 949 | 952 | ||
| 950 | /* | 953 | /* |
| 951 | * Space for CPU0's segment table. | 954 | * Space for CPU0's segment table. |
| 952 | * | 955 | * |
| 953 | * On iSeries, the hypervisor must fill in at least one entry before | 956 | * On iSeries, the hypervisor must fill in at least one entry before |
| 954 | * we get control (with relocate on). The address is given to the hv | 957 | * we get control (with relocate on). The address is given to the hv |
| 955 | * as a page number (see xLparMap below), so this must be at a | 958 | * as a page number (see xLparMap below), so this must be at a |
| 956 | * fixed address (the linker can't compute (u64)&initial_stab >> | 959 | * fixed address (the linker can't compute (u64)&initial_stab >> |
| 957 | * PAGE_SHIFT). | 960 | * PAGE_SHIFT). |
| 958 | */ | 961 | */ |
| 959 | . = STAB0_OFFSET /* 0x6000 */ | 962 | . = STAB0_OFFSET /* 0x6000 */ |
| 960 | .globl initial_stab | 963 | .globl initial_stab |
| 961 | initial_stab: | 964 | initial_stab: |
| 962 | .space 4096 | 965 | .space 4096 |
| 963 | 966 | ||
| 964 | #ifdef CONFIG_PPC_PSERIES | 967 | #ifdef CONFIG_PPC_PSERIES |
| 965 | /* | 968 | /* |
| 966 | * Data area reserved for FWNMI option. | 969 | * Data area reserved for FWNMI option. |
| 967 | * This address (0x7000) is fixed by the RPA. | 970 | * This address (0x7000) is fixed by the RPA. |
| 968 | */ | 971 | */ |
| 969 | .= 0x7000 | 972 | .= 0x7000 |
| 970 | .globl fwnmi_data_area | 973 | .globl fwnmi_data_area |
| 971 | fwnmi_data_area: | 974 | fwnmi_data_area: |
| 972 | #endif /* CONFIG_PPC_PSERIES */ | 975 | #endif /* CONFIG_PPC_PSERIES */ |
| 973 | 976 | ||
| 974 | /* iSeries does not use the FWNMI stuff, so it is safe to put | 977 | /* iSeries does not use the FWNMI stuff, so it is safe to put |
| 975 | * this here, even if we later allow kernels that will boot on | 978 | * this here, even if we later allow kernels that will boot on |
| 976 | * both pSeries and iSeries */ | 979 | * both pSeries and iSeries */ |
| 977 | #ifdef CONFIG_PPC_ISERIES | 980 | #ifdef CONFIG_PPC_ISERIES |
| 978 | . = LPARMAP_PHYS | 981 | . = LPARMAP_PHYS |
| 979 | .globl xLparMap | 982 | .globl xLparMap |
| 980 | xLparMap: | 983 | xLparMap: |
| 981 | .quad HvEsidsToMap /* xNumberEsids */ | 984 | .quad HvEsidsToMap /* xNumberEsids */ |
| 982 | .quad HvRangesToMap /* xNumberRanges */ | 985 | .quad HvRangesToMap /* xNumberRanges */ |
| 983 | .quad STAB0_PAGE /* xSegmentTableOffs */ | 986 | .quad STAB0_PAGE /* xSegmentTableOffs */ |
| 984 | .zero 40 /* xRsvd */ | 987 | .zero 40 /* xRsvd */ |
| 985 | /* xEsids (HvEsidsToMap entries of 2 quads) */ | 988 | /* xEsids (HvEsidsToMap entries of 2 quads) */ |
| 986 | .quad PAGE_OFFSET_ESID /* xKernelEsid */ | 989 | .quad PAGE_OFFSET_ESID /* xKernelEsid */ |
| 987 | .quad PAGE_OFFSET_VSID /* xKernelVsid */ | 990 | .quad PAGE_OFFSET_VSID /* xKernelVsid */ |
| 988 | .quad VMALLOC_START_ESID /* xKernelEsid */ | 991 | .quad VMALLOC_START_ESID /* xKernelEsid */ |
| 989 | .quad VMALLOC_START_VSID /* xKernelVsid */ | 992 | .quad VMALLOC_START_VSID /* xKernelVsid */ |
| 990 | /* xRanges (HvRangesToMap entries of 3 quads) */ | 993 | /* xRanges (HvRangesToMap entries of 3 quads) */ |
| 991 | .quad HvPagesToMap /* xPages */ | 994 | .quad HvPagesToMap /* xPages */ |
| 992 | .quad 0 /* xOffset */ | 995 | .quad 0 /* xOffset */ |
| 993 | .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */ | 996 | .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */ |
| 994 | 997 | ||
| 995 | #endif /* CONFIG_PPC_ISERIES */ | 998 | #endif /* CONFIG_PPC_ISERIES */ |
| 996 | 999 | ||
| 997 | #ifdef CONFIG_PPC_PSERIES | 1000 | #ifdef CONFIG_PPC_PSERIES |
| 998 | . = 0x8000 | 1001 | . = 0x8000 |
| 999 | #endif /* CONFIG_PPC_PSERIES */ | 1002 | #endif /* CONFIG_PPC_PSERIES */ |
| 1000 | 1003 |
arch/powerpc/kernel/irq.c
| 1 | /* | 1 | /* |
| 2 | * Derived from arch/i386/kernel/irq.c | 2 | * Derived from arch/i386/kernel/irq.c |
| 3 | * Copyright (C) 1992 Linus Torvalds | 3 | * Copyright (C) 1992 Linus Torvalds |
| 4 | * Adapted from arch/i386 by Gary Thomas | 4 | * Adapted from arch/i386 by Gary Thomas |
| 5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 6 | * Updated and modified by Cort Dougan <cort@fsmlabs.com> | 6 | * Updated and modified by Cort Dougan <cort@fsmlabs.com> |
| 7 | * Copyright (C) 1996-2001 Cort Dougan | 7 | * Copyright (C) 1996-2001 Cort Dougan |
| 8 | * Adapted for Power Macintosh by Paul Mackerras | 8 | * Adapted for Power Macintosh by Paul Mackerras |
| 9 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) | 9 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) |
| 10 | * | 10 | * |
| 11 | * This program is free software; you can redistribute it and/or | 11 | * This program is free software; you can redistribute it and/or |
| 12 | * modify it under the terms of the GNU General Public License | 12 | * modify it under the terms of the GNU General Public License |
| 13 | * as published by the Free Software Foundation; either version | 13 | * as published by the Free Software Foundation; either version |
| 14 | * 2 of the License, or (at your option) any later version. | 14 | * 2 of the License, or (at your option) any later version. |
| 15 | * | 15 | * |
| 16 | * This file contains the code used by various IRQ handling routines: | 16 | * This file contains the code used by various IRQ handling routines: |
| 17 | * asking for different IRQ's should be done through these routines | 17 | * asking for different IRQ's should be done through these routines |
| 18 | * instead of just grabbing them. Thus setups with different IRQ numbers | 18 | * instead of just grabbing them. Thus setups with different IRQ numbers |
| 19 | * shouldn't result in any weird surprises, and installing new handlers | 19 | * shouldn't result in any weird surprises, and installing new handlers |
| 20 | * should be easier. | 20 | * should be easier. |
| 21 | * | 21 | * |
| 22 | * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the | 22 | * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the |
| 23 | * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit | 23 | * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit |
| 24 | * mask register (of which only 16 are defined), hence the weird shifting | 24 | * mask register (of which only 16 are defined), hence the weird shifting |
| 25 | * and complement of the cached_irq_mask. I want to be able to stuff | 25 | * and complement of the cached_irq_mask. I want to be able to stuff |
| 26 | * this right into the SIU SMASK register. | 26 | * this right into the SIU SMASK register. |
| 27 | * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx | 27 | * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx |
| 28 | * to reduce code space and undefined function references. | 28 | * to reduce code space and undefined function references. |
| 29 | */ | 29 | */ |
| 30 | 30 | ||
| 31 | #undef DEBUG | 31 | #undef DEBUG |
| 32 | 32 | ||
| 33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
| 34 | #include <linux/threads.h> | 34 | #include <linux/threads.h> |
| 35 | #include <linux/kernel_stat.h> | 35 | #include <linux/kernel_stat.h> |
| 36 | #include <linux/signal.h> | 36 | #include <linux/signal.h> |
| 37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
| 38 | #include <linux/ptrace.h> | 38 | #include <linux/ptrace.h> |
| 39 | #include <linux/ioport.h> | 39 | #include <linux/ioport.h> |
| 40 | #include <linux/interrupt.h> | 40 | #include <linux/interrupt.h> |
| 41 | #include <linux/timex.h> | 41 | #include <linux/timex.h> |
| 42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
| 43 | #include <linux/slab.h> | 43 | #include <linux/slab.h> |
| 44 | #include <linux/delay.h> | 44 | #include <linux/delay.h> |
| 45 | #include <linux/irq.h> | 45 | #include <linux/irq.h> |
| 46 | #include <linux/seq_file.h> | 46 | #include <linux/seq_file.h> |
| 47 | #include <linux/cpumask.h> | 47 | #include <linux/cpumask.h> |
| 48 | #include <linux/profile.h> | 48 | #include <linux/profile.h> |
| 49 | #include <linux/bitops.h> | 49 | #include <linux/bitops.h> |
| 50 | #include <linux/list.h> | 50 | #include <linux/list.h> |
| 51 | #include <linux/radix-tree.h> | 51 | #include <linux/radix-tree.h> |
| 52 | #include <linux/mutex.h> | 52 | #include <linux/mutex.h> |
| 53 | #include <linux/bootmem.h> | 53 | #include <linux/bootmem.h> |
| 54 | #include <linux/pci.h> | 54 | #include <linux/pci.h> |
| 55 | #include <linux/debugfs.h> | 55 | #include <linux/debugfs.h> |
| 56 | #include <linux/perf_event.h> | 56 | #include <linux/perf_event.h> |
| 57 | 57 | ||
| 58 | #include <asm/uaccess.h> | 58 | #include <asm/uaccess.h> |
| 59 | #include <asm/system.h> | 59 | #include <asm/system.h> |
| 60 | #include <asm/io.h> | 60 | #include <asm/io.h> |
| 61 | #include <asm/pgtable.h> | 61 | #include <asm/pgtable.h> |
| 62 | #include <asm/irq.h> | 62 | #include <asm/irq.h> |
| 63 | #include <asm/cache.h> | 63 | #include <asm/cache.h> |
| 64 | #include <asm/prom.h> | 64 | #include <asm/prom.h> |
| 65 | #include <asm/ptrace.h> | 65 | #include <asm/ptrace.h> |
| 66 | #include <asm/machdep.h> | 66 | #include <asm/machdep.h> |
| 67 | #include <asm/udbg.h> | 67 | #include <asm/udbg.h> |
| 68 | #ifdef CONFIG_PPC64 | 68 | #ifdef CONFIG_PPC64 |
| 69 | #include <asm/paca.h> | 69 | #include <asm/paca.h> |
| 70 | #include <asm/firmware.h> | 70 | #include <asm/firmware.h> |
| 71 | #include <asm/lv1call.h> | 71 | #include <asm/lv1call.h> |
| 72 | #endif | 72 | #endif |
| 73 | #define CREATE_TRACE_POINTS | ||
| 74 | #include <asm/trace.h> | ||
| 73 | 75 | ||
| 74 | int __irq_offset_value; | 76 | int __irq_offset_value; |
| 75 | static int ppc_spurious_interrupts; | 77 | static int ppc_spurious_interrupts; |
| 76 | 78 | ||
| 77 | #ifdef CONFIG_PPC32 | 79 | #ifdef CONFIG_PPC32 |
| 78 | EXPORT_SYMBOL(__irq_offset_value); | 80 | EXPORT_SYMBOL(__irq_offset_value); |
| 79 | atomic_t ppc_n_lost_interrupts; | 81 | atomic_t ppc_n_lost_interrupts; |
| 80 | 82 | ||
| 81 | #ifdef CONFIG_TAU_INT | 83 | #ifdef CONFIG_TAU_INT |
| 82 | extern int tau_initialized; | 84 | extern int tau_initialized; |
| 83 | extern int tau_interrupts(int); | 85 | extern int tau_interrupts(int); |
| 84 | #endif | 86 | #endif |
| 85 | #endif /* CONFIG_PPC32 */ | 87 | #endif /* CONFIG_PPC32 */ |
| 86 | 88 | ||
| 87 | #ifdef CONFIG_PPC64 | 89 | #ifdef CONFIG_PPC64 |
| 88 | EXPORT_SYMBOL(irq_desc); | 90 | EXPORT_SYMBOL(irq_desc); |
| 89 | 91 | ||
| 90 | int distribute_irqs = 1; | 92 | int distribute_irqs = 1; |
| 91 | 93 | ||
| 92 | static inline notrace unsigned long get_hard_enabled(void) | 94 | static inline notrace unsigned long get_hard_enabled(void) |
| 93 | { | 95 | { |
| 94 | unsigned long enabled; | 96 | unsigned long enabled; |
| 95 | 97 | ||
| 96 | __asm__ __volatile__("lbz %0,%1(13)" | 98 | __asm__ __volatile__("lbz %0,%1(13)" |
| 97 | : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled))); | 99 | : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled))); |
| 98 | 100 | ||
| 99 | return enabled; | 101 | return enabled; |
| 100 | } | 102 | } |
| 101 | 103 | ||
| 102 | static inline notrace void set_soft_enabled(unsigned long enable) | 104 | static inline notrace void set_soft_enabled(unsigned long enable) |
| 103 | { | 105 | { |
| 104 | __asm__ __volatile__("stb %0,%1(13)" | 106 | __asm__ __volatile__("stb %0,%1(13)" |
| 105 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); | 107 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); |
| 106 | } | 108 | } |
| 107 | 109 | ||
| 108 | notrace void raw_local_irq_restore(unsigned long en) | 110 | notrace void raw_local_irq_restore(unsigned long en) |
| 109 | { | 111 | { |
| 110 | /* | 112 | /* |
| 111 | * get_paca()->soft_enabled = en; | 113 | * get_paca()->soft_enabled = en; |
| 112 | * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1? | 114 | * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1? |
| 113 | * That was allowed before, and in such a case we do need to take care | 115 | * That was allowed before, and in such a case we do need to take care |
| 114 | * that gcc will set soft_enabled directly via r13, not choose to use | 116 | * that gcc will set soft_enabled directly via r13, not choose to use |
| 115 | * an intermediate register, lest we're preempted to a different cpu. | 117 | * an intermediate register, lest we're preempted to a different cpu. |
| 116 | */ | 118 | */ |
| 117 | set_soft_enabled(en); | 119 | set_soft_enabled(en); |
| 118 | if (!en) | 120 | if (!en) |
| 119 | return; | 121 | return; |
| 120 | 122 | ||
| 121 | #ifdef CONFIG_PPC_STD_MMU_64 | 123 | #ifdef CONFIG_PPC_STD_MMU_64 |
| 122 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { | 124 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { |
| 123 | /* | 125 | /* |
| 124 | * Do we need to disable preemption here? Not really: in the | 126 | * Do we need to disable preemption here? Not really: in the |
| 125 | * unlikely event that we're preempted to a different cpu in | 127 | * unlikely event that we're preempted to a different cpu in |
| 126 | * between getting r13, loading its lppaca_ptr, and loading | 128 | * between getting r13, loading its lppaca_ptr, and loading |
| 127 | * its any_int, we might call iseries_handle_interrupts without | 129 | * its any_int, we might call iseries_handle_interrupts without |
| 128 | * an interrupt pending on the new cpu, but that's no disaster, | 130 | * an interrupt pending on the new cpu, but that's no disaster, |
| 129 | * is it? And the business of preempting us off the old cpu | 131 | * is it? And the business of preempting us off the old cpu |
| 130 | * would itself involve a local_irq_restore which handles the | 132 | * would itself involve a local_irq_restore which handles the |
| 131 | * interrupt to that cpu. | 133 | * interrupt to that cpu. |
| 132 | * | 134 | * |
| 133 | * But use "local_paca->lppaca_ptr" instead of "get_lppaca()" | 135 | * But use "local_paca->lppaca_ptr" instead of "get_lppaca()" |
| 134 | * to avoid any preemption checking added into get_paca(). | 136 | * to avoid any preemption checking added into get_paca(). |
| 135 | */ | 137 | */ |
| 136 | if (local_paca->lppaca_ptr->int_dword.any_int) | 138 | if (local_paca->lppaca_ptr->int_dword.any_int) |
| 137 | iseries_handle_interrupts(); | 139 | iseries_handle_interrupts(); |
| 138 | } | 140 | } |
| 139 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 141 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
| 140 | 142 | ||
| 141 | if (test_perf_event_pending()) { | 143 | if (test_perf_event_pending()) { |
| 142 | clear_perf_event_pending(); | 144 | clear_perf_event_pending(); |
| 143 | perf_event_do_pending(); | 145 | perf_event_do_pending(); |
| 144 | } | 146 | } |
| 145 | 147 | ||
| 146 | /* | 148 | /* |
| 147 | * if (get_paca()->hard_enabled) return; | 149 | * if (get_paca()->hard_enabled) return; |
| 148 | * But again we need to take care that gcc gets hard_enabled directly | 150 | * But again we need to take care that gcc gets hard_enabled directly |
| 149 | * via r13, not choose to use an intermediate register, lest we're | 151 | * via r13, not choose to use an intermediate register, lest we're |
| 150 | * preempted to a different cpu in between the two instructions. | 152 | * preempted to a different cpu in between the two instructions. |
| 151 | */ | 153 | */ |
| 152 | if (get_hard_enabled()) | 154 | if (get_hard_enabled()) |
| 153 | return; | 155 | return; |
| 154 | 156 | ||
| 155 | /* | 157 | /* |
| 156 | * Need to hard-enable interrupts here. Since currently disabled, | 158 | * Need to hard-enable interrupts here. Since currently disabled, |
| 157 | * no need to take further asm precautions against preemption; but | 159 | * no need to take further asm precautions against preemption; but |
| 158 | * use local_paca instead of get_paca() to avoid preemption checking. | 160 | * use local_paca instead of get_paca() to avoid preemption checking. |
| 159 | */ | 161 | */ |
| 160 | local_paca->hard_enabled = en; | 162 | local_paca->hard_enabled = en; |
| 161 | if ((int)mfspr(SPRN_DEC) < 0) | 163 | if ((int)mfspr(SPRN_DEC) < 0) |
| 162 | mtspr(SPRN_DEC, 1); | 164 | mtspr(SPRN_DEC, 1); |
| 163 | 165 | ||
| 164 | /* | 166 | /* |
| 165 | * Force the delivery of pending soft-disabled interrupts on PS3. | 167 | * Force the delivery of pending soft-disabled interrupts on PS3. |
| 166 | * Any HV call will have this side effect. | 168 | * Any HV call will have this side effect. |
| 167 | */ | 169 | */ |
| 168 | if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { | 170 | if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { |
| 169 | u64 tmp; | 171 | u64 tmp; |
| 170 | lv1_get_version_info(&tmp); | 172 | lv1_get_version_info(&tmp); |
| 171 | } | 173 | } |
| 172 | 174 | ||
| 173 | __hard_irq_enable(); | 175 | __hard_irq_enable(); |
| 174 | } | 176 | } |
| 175 | EXPORT_SYMBOL(raw_local_irq_restore); | 177 | EXPORT_SYMBOL(raw_local_irq_restore); |
| 176 | #endif /* CONFIG_PPC64 */ | 178 | #endif /* CONFIG_PPC64 */ |
| 177 | 179 | ||
| 178 | int show_interrupts(struct seq_file *p, void *v) | 180 | int show_interrupts(struct seq_file *p, void *v) |
| 179 | { | 181 | { |
| 180 | int i = *(loff_t *)v, j; | 182 | int i = *(loff_t *)v, j; |
| 181 | struct irqaction *action; | 183 | struct irqaction *action; |
| 182 | struct irq_desc *desc; | 184 | struct irq_desc *desc; |
| 183 | unsigned long flags; | 185 | unsigned long flags; |
| 184 | 186 | ||
| 185 | if (i == 0) { | 187 | if (i == 0) { |
| 186 | seq_puts(p, " "); | 188 | seq_puts(p, " "); |
| 187 | for_each_online_cpu(j) | 189 | for_each_online_cpu(j) |
| 188 | seq_printf(p, "CPU%d ", j); | 190 | seq_printf(p, "CPU%d ", j); |
| 189 | seq_putc(p, '\n'); | 191 | seq_putc(p, '\n'); |
| 190 | } | 192 | } |
| 191 | 193 | ||
| 192 | if (i < NR_IRQS) { | 194 | if (i < NR_IRQS) { |
| 193 | desc = get_irq_desc(i); | 195 | desc = get_irq_desc(i); |
| 194 | spin_lock_irqsave(&desc->lock, flags); | 196 | spin_lock_irqsave(&desc->lock, flags); |
| 195 | action = desc->action; | 197 | action = desc->action; |
| 196 | if (!action || !action->handler) | 198 | if (!action || !action->handler) |
| 197 | goto skip; | 199 | goto skip; |
| 198 | seq_printf(p, "%3d: ", i); | 200 | seq_printf(p, "%3d: ", i); |
| 199 | #ifdef CONFIG_SMP | 201 | #ifdef CONFIG_SMP |
| 200 | for_each_online_cpu(j) | 202 | for_each_online_cpu(j) |
| 201 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 203 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
| 202 | #else | 204 | #else |
| 203 | seq_printf(p, "%10u ", kstat_irqs(i)); | 205 | seq_printf(p, "%10u ", kstat_irqs(i)); |
| 204 | #endif /* CONFIG_SMP */ | 206 | #endif /* CONFIG_SMP */ |
| 205 | if (desc->chip) | 207 | if (desc->chip) |
| 206 | seq_printf(p, " %s ", desc->chip->typename); | 208 | seq_printf(p, " %s ", desc->chip->typename); |
| 207 | else | 209 | else |
| 208 | seq_puts(p, " None "); | 210 | seq_puts(p, " None "); |
| 209 | seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge "); | 211 | seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge "); |
| 210 | seq_printf(p, " %s", action->name); | 212 | seq_printf(p, " %s", action->name); |
| 211 | for (action = action->next; action; action = action->next) | 213 | for (action = action->next; action; action = action->next) |
| 212 | seq_printf(p, ", %s", action->name); | 214 | seq_printf(p, ", %s", action->name); |
| 213 | seq_putc(p, '\n'); | 215 | seq_putc(p, '\n'); |
| 214 | skip: | 216 | skip: |
| 215 | spin_unlock_irqrestore(&desc->lock, flags); | 217 | spin_unlock_irqrestore(&desc->lock, flags); |
| 216 | } else if (i == NR_IRQS) { | 218 | } else if (i == NR_IRQS) { |
| 217 | #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) | 219 | #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) |
| 218 | if (tau_initialized){ | 220 | if (tau_initialized){ |
| 219 | seq_puts(p, "TAU: "); | 221 | seq_puts(p, "TAU: "); |
| 220 | for_each_online_cpu(j) | 222 | for_each_online_cpu(j) |
| 221 | seq_printf(p, "%10u ", tau_interrupts(j)); | 223 | seq_printf(p, "%10u ", tau_interrupts(j)); |
| 222 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); | 224 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); |
| 223 | } | 225 | } |
| 224 | #endif /* CONFIG_PPC32 && CONFIG_TAU_INT*/ | 226 | #endif /* CONFIG_PPC32 && CONFIG_TAU_INT*/ |
| 225 | seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); | 227 | seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); |
| 226 | } | 228 | } |
| 227 | return 0; | 229 | return 0; |
| 228 | } | 230 | } |
| 229 | 231 | ||
| 230 | #ifdef CONFIG_HOTPLUG_CPU | 232 | #ifdef CONFIG_HOTPLUG_CPU |
| 231 | void fixup_irqs(cpumask_t map) | 233 | void fixup_irqs(cpumask_t map) |
| 232 | { | 234 | { |
| 233 | unsigned int irq; | 235 | unsigned int irq; |
| 234 | static int warned; | 236 | static int warned; |
| 235 | 237 | ||
| 236 | for_each_irq(irq) { | 238 | for_each_irq(irq) { |
| 237 | cpumask_t mask; | 239 | cpumask_t mask; |
| 238 | 240 | ||
| 239 | if (irq_desc[irq].status & IRQ_PER_CPU) | 241 | if (irq_desc[irq].status & IRQ_PER_CPU) |
| 240 | continue; | 242 | continue; |
| 241 | 243 | ||
| 242 | cpumask_and(&mask, irq_desc[irq].affinity, &map); | 244 | cpumask_and(&mask, irq_desc[irq].affinity, &map); |
| 243 | if (any_online_cpu(mask) == NR_CPUS) { | 245 | if (any_online_cpu(mask) == NR_CPUS) { |
| 244 | printk("Breaking affinity for irq %i\n", irq); | 246 | printk("Breaking affinity for irq %i\n", irq); |
| 245 | mask = map; | 247 | mask = map; |
| 246 | } | 248 | } |
| 247 | if (irq_desc[irq].chip->set_affinity) | 249 | if (irq_desc[irq].chip->set_affinity) |
| 248 | irq_desc[irq].chip->set_affinity(irq, &mask); | 250 | irq_desc[irq].chip->set_affinity(irq, &mask); |
| 249 | else if (irq_desc[irq].action && !(warned++)) | 251 | else if (irq_desc[irq].action && !(warned++)) |
| 250 | printk("Cannot set affinity for irq %i\n", irq); | 252 | printk("Cannot set affinity for irq %i\n", irq); |
| 251 | } | 253 | } |
| 252 | 254 | ||
| 253 | local_irq_enable(); | 255 | local_irq_enable(); |
| 254 | mdelay(1); | 256 | mdelay(1); |
| 255 | local_irq_disable(); | 257 | local_irq_disable(); |
| 256 | } | 258 | } |
| 257 | #endif | 259 | #endif |
| 258 | 260 | ||
| 259 | #ifdef CONFIG_IRQSTACKS | 261 | #ifdef CONFIG_IRQSTACKS |
| 260 | static inline void handle_one_irq(unsigned int irq) | 262 | static inline void handle_one_irq(unsigned int irq) |
| 261 | { | 263 | { |
| 262 | struct thread_info *curtp, *irqtp; | 264 | struct thread_info *curtp, *irqtp; |
| 263 | unsigned long saved_sp_limit; | 265 | unsigned long saved_sp_limit; |
| 264 | struct irq_desc *desc; | 266 | struct irq_desc *desc; |
| 265 | 267 | ||
| 266 | /* Switch to the irq stack to handle this */ | 268 | /* Switch to the irq stack to handle this */ |
| 267 | curtp = current_thread_info(); | 269 | curtp = current_thread_info(); |
| 268 | irqtp = hardirq_ctx[smp_processor_id()]; | 270 | irqtp = hardirq_ctx[smp_processor_id()]; |
| 269 | 271 | ||
| 270 | if (curtp == irqtp) { | 272 | if (curtp == irqtp) { |
| 271 | /* We're already on the irq stack, just handle it */ | 273 | /* We're already on the irq stack, just handle it */ |
| 272 | generic_handle_irq(irq); | 274 | generic_handle_irq(irq); |
| 273 | return; | 275 | return; |
| 274 | } | 276 | } |
| 275 | 277 | ||
| 276 | desc = irq_desc + irq; | 278 | desc = irq_desc + irq; |
| 277 | saved_sp_limit = current->thread.ksp_limit; | 279 | saved_sp_limit = current->thread.ksp_limit; |
| 278 | 280 | ||
| 279 | irqtp->task = curtp->task; | 281 | irqtp->task = curtp->task; |
| 280 | irqtp->flags = 0; | 282 | irqtp->flags = 0; |
| 281 | 283 | ||
| 282 | /* Copy the softirq bits in preempt_count so that the | 284 | /* Copy the softirq bits in preempt_count so that the |
| 283 | * softirq checks work in the hardirq context. */ | 285 | * softirq checks work in the hardirq context. */ |
| 284 | irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) | | 286 | irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) | |
| 285 | (curtp->preempt_count & SOFTIRQ_MASK); | 287 | (curtp->preempt_count & SOFTIRQ_MASK); |
| 286 | 288 | ||
| 287 | current->thread.ksp_limit = (unsigned long)irqtp + | 289 | current->thread.ksp_limit = (unsigned long)irqtp + |
| 288 | _ALIGN_UP(sizeof(struct thread_info), 16); | 290 | _ALIGN_UP(sizeof(struct thread_info), 16); |
| 289 | 291 | ||
| 290 | call_handle_irq(irq, desc, irqtp, desc->handle_irq); | 292 | call_handle_irq(irq, desc, irqtp, desc->handle_irq); |
| 291 | current->thread.ksp_limit = saved_sp_limit; | 293 | current->thread.ksp_limit = saved_sp_limit; |
| 292 | irqtp->task = NULL; | 294 | irqtp->task = NULL; |
| 293 | 295 | ||
| 294 | /* Set any flag that may have been set on the | 296 | /* Set any flag that may have been set on the |
| 295 | * alternate stack | 297 | * alternate stack |
| 296 | */ | 298 | */ |
| 297 | if (irqtp->flags) | 299 | if (irqtp->flags) |
| 298 | set_bits(irqtp->flags, &curtp->flags); | 300 | set_bits(irqtp->flags, &curtp->flags); |
| 299 | } | 301 | } |
| 300 | #else | 302 | #else |
| 301 | static inline void handle_one_irq(unsigned int irq) | 303 | static inline void handle_one_irq(unsigned int irq) |
| 302 | { | 304 | { |
| 303 | generic_handle_irq(irq); | 305 | generic_handle_irq(irq); |
| 304 | } | 306 | } |
| 305 | #endif | 307 | #endif |
| 306 | 308 | ||
| 307 | static inline void check_stack_overflow(void) | 309 | static inline void check_stack_overflow(void) |
| 308 | { | 310 | { |
| 309 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 311 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
| 310 | long sp; | 312 | long sp; |
| 311 | 313 | ||
| 312 | sp = __get_SP() & (THREAD_SIZE-1); | 314 | sp = __get_SP() & (THREAD_SIZE-1); |
| 313 | 315 | ||
| 314 | /* check for stack overflow: is there less than 2KB free? */ | 316 | /* check for stack overflow: is there less than 2KB free? */ |
| 315 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { | 317 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { |
| 316 | printk("do_IRQ: stack overflow: %ld\n", | 318 | printk("do_IRQ: stack overflow: %ld\n", |
| 317 | sp - sizeof(struct thread_info)); | 319 | sp - sizeof(struct thread_info)); |
| 318 | dump_stack(); | 320 | dump_stack(); |
| 319 | } | 321 | } |
| 320 | #endif | 322 | #endif |
| 321 | } | 323 | } |
| 322 | 324 | ||
| 323 | void do_IRQ(struct pt_regs *regs) | 325 | void do_IRQ(struct pt_regs *regs) |
| 324 | { | 326 | { |
| 325 | struct pt_regs *old_regs = set_irq_regs(regs); | 327 | struct pt_regs *old_regs = set_irq_regs(regs); |
| 326 | unsigned int irq; | 328 | unsigned int irq; |
| 327 | 329 | ||
| 330 | trace_irq_entry(regs); | ||
| 331 | |||
| 328 | irq_enter(); | 332 | irq_enter(); |
| 329 | 333 | ||
| 330 | check_stack_overflow(); | 334 | check_stack_overflow(); |
| 331 | 335 | ||
| 332 | irq = ppc_md.get_irq(); | 336 | irq = ppc_md.get_irq(); |
| 333 | 337 | ||
| 334 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) | 338 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) |
| 335 | handle_one_irq(irq); | 339 | handle_one_irq(irq); |
| 336 | else if (irq != NO_IRQ_IGNORE) | 340 | else if (irq != NO_IRQ_IGNORE) |
| 337 | /* That's not SMP safe ... but who cares ? */ | 341 | /* That's not SMP safe ... but who cares ? */ |
| 338 | ppc_spurious_interrupts++; | 342 | ppc_spurious_interrupts++; |
| 339 | 343 | ||
| 340 | irq_exit(); | 344 | irq_exit(); |
| 341 | set_irq_regs(old_regs); | 345 | set_irq_regs(old_regs); |
| 342 | 346 | ||
| 343 | #ifdef CONFIG_PPC_ISERIES | 347 | #ifdef CONFIG_PPC_ISERIES |
| 344 | if (firmware_has_feature(FW_FEATURE_ISERIES) && | 348 | if (firmware_has_feature(FW_FEATURE_ISERIES) && |
| 345 | get_lppaca()->int_dword.fields.decr_int) { | 349 | get_lppaca()->int_dword.fields.decr_int) { |
| 346 | get_lppaca()->int_dword.fields.decr_int = 0; | 350 | get_lppaca()->int_dword.fields.decr_int = 0; |
| 347 | /* Signal a fake decrementer interrupt */ | 351 | /* Signal a fake decrementer interrupt */ |
| 348 | timer_interrupt(regs); | 352 | timer_interrupt(regs); |
| 349 | } | 353 | } |
| 350 | #endif | 354 | #endif |
| 355 | |||
| 356 | trace_irq_exit(regs); | ||
| 351 | } | 357 | } |
| 352 | 358 | ||
| 353 | void __init init_IRQ(void) | 359 | void __init init_IRQ(void) |
| 354 | { | 360 | { |
| 355 | if (ppc_md.init_IRQ) | 361 | if (ppc_md.init_IRQ) |
| 356 | ppc_md.init_IRQ(); | 362 | ppc_md.init_IRQ(); |
| 357 | 363 | ||
| 358 | exc_lvl_ctx_init(); | 364 | exc_lvl_ctx_init(); |
| 359 | 365 | ||
| 360 | irq_ctx_init(); | 366 | irq_ctx_init(); |
| 361 | } | 367 | } |
| 362 | 368 | ||
| 363 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) | 369 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) |
| 364 | struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; | 370 | struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; |
| 365 | struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; | 371 | struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; |
| 366 | struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; | 372 | struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; |
| 367 | 373 | ||
| 368 | void exc_lvl_ctx_init(void) | 374 | void exc_lvl_ctx_init(void) |
| 369 | { | 375 | { |
| 370 | struct thread_info *tp; | 376 | struct thread_info *tp; |
| 371 | int i; | 377 | int i; |
| 372 | 378 | ||
| 373 | for_each_possible_cpu(i) { | 379 | for_each_possible_cpu(i) { |
| 374 | memset((void *)critirq_ctx[i], 0, THREAD_SIZE); | 380 | memset((void *)critirq_ctx[i], 0, THREAD_SIZE); |
| 375 | tp = critirq_ctx[i]; | 381 | tp = critirq_ctx[i]; |
| 376 | tp->cpu = i; | 382 | tp->cpu = i; |
| 377 | tp->preempt_count = 0; | 383 | tp->preempt_count = 0; |
| 378 | 384 | ||
| 379 | #ifdef CONFIG_BOOKE | 385 | #ifdef CONFIG_BOOKE |
| 380 | memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE); | 386 | memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE); |
| 381 | tp = dbgirq_ctx[i]; | 387 | tp = dbgirq_ctx[i]; |
| 382 | tp->cpu = i; | 388 | tp->cpu = i; |
| 383 | tp->preempt_count = 0; | 389 | tp->preempt_count = 0; |
| 384 | 390 | ||
| 385 | memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE); | 391 | memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE); |
| 386 | tp = mcheckirq_ctx[i]; | 392 | tp = mcheckirq_ctx[i]; |
| 387 | tp->cpu = i; | 393 | tp->cpu = i; |
| 388 | tp->preempt_count = HARDIRQ_OFFSET; | 394 | tp->preempt_count = HARDIRQ_OFFSET; |
| 389 | #endif | 395 | #endif |
| 390 | } | 396 | } |
| 391 | } | 397 | } |
| 392 | #endif | 398 | #endif |
| 393 | 399 | ||
| 394 | #ifdef CONFIG_IRQSTACKS | 400 | #ifdef CONFIG_IRQSTACKS |
| 395 | struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; | 401 | struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; |
| 396 | struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; | 402 | struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; |
| 397 | 403 | ||
| 398 | void irq_ctx_init(void) | 404 | void irq_ctx_init(void) |
| 399 | { | 405 | { |
| 400 | struct thread_info *tp; | 406 | struct thread_info *tp; |
| 401 | int i; | 407 | int i; |
| 402 | 408 | ||
| 403 | for_each_possible_cpu(i) { | 409 | for_each_possible_cpu(i) { |
| 404 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); | 410 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); |
| 405 | tp = softirq_ctx[i]; | 411 | tp = softirq_ctx[i]; |
| 406 | tp->cpu = i; | 412 | tp->cpu = i; |
| 407 | tp->preempt_count = 0; | 413 | tp->preempt_count = 0; |
| 408 | 414 | ||
| 409 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); | 415 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); |
| 410 | tp = hardirq_ctx[i]; | 416 | tp = hardirq_ctx[i]; |
| 411 | tp->cpu = i; | 417 | tp->cpu = i; |
| 412 | tp->preempt_count = HARDIRQ_OFFSET; | 418 | tp->preempt_count = HARDIRQ_OFFSET; |
| 413 | } | 419 | } |
| 414 | } | 420 | } |
| 415 | 421 | ||
| 416 | static inline void do_softirq_onstack(void) | 422 | static inline void do_softirq_onstack(void) |
| 417 | { | 423 | { |
| 418 | struct thread_info *curtp, *irqtp; | 424 | struct thread_info *curtp, *irqtp; |
| 419 | unsigned long saved_sp_limit = current->thread.ksp_limit; | 425 | unsigned long saved_sp_limit = current->thread.ksp_limit; |
| 420 | 426 | ||
| 421 | curtp = current_thread_info(); | 427 | curtp = current_thread_info(); |
| 422 | irqtp = softirq_ctx[smp_processor_id()]; | 428 | irqtp = softirq_ctx[smp_processor_id()]; |
| 423 | irqtp->task = curtp->task; | 429 | irqtp->task = curtp->task; |
| 424 | current->thread.ksp_limit = (unsigned long)irqtp + | 430 | current->thread.ksp_limit = (unsigned long)irqtp + |
| 425 | _ALIGN_UP(sizeof(struct thread_info), 16); | 431 | _ALIGN_UP(sizeof(struct thread_info), 16); |
| 426 | call_do_softirq(irqtp); | 432 | call_do_softirq(irqtp); |
| 427 | current->thread.ksp_limit = saved_sp_limit; | 433 | current->thread.ksp_limit = saved_sp_limit; |
| 428 | irqtp->task = NULL; | 434 | irqtp->task = NULL; |
| 429 | } | 435 | } |
| 430 | 436 | ||
| 431 | #else | 437 | #else |
| 432 | #define do_softirq_onstack() __do_softirq() | 438 | #define do_softirq_onstack() __do_softirq() |
| 433 | #endif /* CONFIG_IRQSTACKS */ | 439 | #endif /* CONFIG_IRQSTACKS */ |
| 434 | 440 | ||
| 435 | void do_softirq(void) | 441 | void do_softirq(void) |
| 436 | { | 442 | { |
| 437 | unsigned long flags; | 443 | unsigned long flags; |
| 438 | 444 | ||
| 439 | if (in_interrupt()) | 445 | if (in_interrupt()) |
| 440 | return; | 446 | return; |
| 441 | 447 | ||
| 442 | local_irq_save(flags); | 448 | local_irq_save(flags); |
| 443 | 449 | ||
| 444 | if (local_softirq_pending()) | 450 | if (local_softirq_pending()) |
| 445 | do_softirq_onstack(); | 451 | do_softirq_onstack(); |
| 446 | 452 | ||
| 447 | local_irq_restore(flags); | 453 | local_irq_restore(flags); |
| 448 | } | 454 | } |
| 449 | 455 | ||
| 450 | 456 | ||
| 451 | /* | 457 | /* |
| 452 | * IRQ controller and virtual interrupts | 458 | * IRQ controller and virtual interrupts |
| 453 | */ | 459 | */ |
| 454 | 460 | ||
| 455 | static LIST_HEAD(irq_hosts); | 461 | static LIST_HEAD(irq_hosts); |
| 456 | static DEFINE_SPINLOCK(irq_big_lock); | 462 | static DEFINE_SPINLOCK(irq_big_lock); |
| 457 | static unsigned int revmap_trees_allocated; | 463 | static unsigned int revmap_trees_allocated; |
| 458 | static DEFINE_MUTEX(revmap_trees_mutex); | 464 | static DEFINE_MUTEX(revmap_trees_mutex); |
| 459 | struct irq_map_entry irq_map[NR_IRQS]; | 465 | struct irq_map_entry irq_map[NR_IRQS]; |
| 460 | static unsigned int irq_virq_count = NR_IRQS; | 466 | static unsigned int irq_virq_count = NR_IRQS; |
| 461 | static struct irq_host *irq_default_host; | 467 | static struct irq_host *irq_default_host; |
| 462 | 468 | ||
| 463 | irq_hw_number_t virq_to_hw(unsigned int virq) | 469 | irq_hw_number_t virq_to_hw(unsigned int virq) |
| 464 | { | 470 | { |
| 465 | return irq_map[virq].hwirq; | 471 | return irq_map[virq].hwirq; |
| 466 | } | 472 | } |
| 467 | EXPORT_SYMBOL_GPL(virq_to_hw); | 473 | EXPORT_SYMBOL_GPL(virq_to_hw); |
| 468 | 474 | ||
| 469 | static int default_irq_host_match(struct irq_host *h, struct device_node *np) | 475 | static int default_irq_host_match(struct irq_host *h, struct device_node *np) |
| 470 | { | 476 | { |
| 471 | return h->of_node != NULL && h->of_node == np; | 477 | return h->of_node != NULL && h->of_node == np; |
| 472 | } | 478 | } |
| 473 | 479 | ||
| 474 | struct irq_host *irq_alloc_host(struct device_node *of_node, | 480 | struct irq_host *irq_alloc_host(struct device_node *of_node, |
| 475 | unsigned int revmap_type, | 481 | unsigned int revmap_type, |
| 476 | unsigned int revmap_arg, | 482 | unsigned int revmap_arg, |
| 477 | struct irq_host_ops *ops, | 483 | struct irq_host_ops *ops, |
| 478 | irq_hw_number_t inval_irq) | 484 | irq_hw_number_t inval_irq) |
| 479 | { | 485 | { |
| 480 | struct irq_host *host; | 486 | struct irq_host *host; |
| 481 | unsigned int size = sizeof(struct irq_host); | 487 | unsigned int size = sizeof(struct irq_host); |
| 482 | unsigned int i; | 488 | unsigned int i; |
| 483 | unsigned int *rmap; | 489 | unsigned int *rmap; |
| 484 | unsigned long flags; | 490 | unsigned long flags; |
| 485 | 491 | ||
| 486 | /* Allocate structure and revmap table if using linear mapping */ | 492 | /* Allocate structure and revmap table if using linear mapping */ |
| 487 | if (revmap_type == IRQ_HOST_MAP_LINEAR) | 493 | if (revmap_type == IRQ_HOST_MAP_LINEAR) |
| 488 | size += revmap_arg * sizeof(unsigned int); | 494 | size += revmap_arg * sizeof(unsigned int); |
| 489 | host = zalloc_maybe_bootmem(size, GFP_KERNEL); | 495 | host = zalloc_maybe_bootmem(size, GFP_KERNEL); |
| 490 | if (host == NULL) | 496 | if (host == NULL) |
| 491 | return NULL; | 497 | return NULL; |
| 492 | 498 | ||
| 493 | /* Fill structure */ | 499 | /* Fill structure */ |
| 494 | host->revmap_type = revmap_type; | 500 | host->revmap_type = revmap_type; |
| 495 | host->inval_irq = inval_irq; | 501 | host->inval_irq = inval_irq; |
| 496 | host->ops = ops; | 502 | host->ops = ops; |
| 497 | host->of_node = of_node_get(of_node); | 503 | host->of_node = of_node_get(of_node); |
| 498 | 504 | ||
| 499 | if (host->ops->match == NULL) | 505 | if (host->ops->match == NULL) |
| 500 | host->ops->match = default_irq_host_match; | 506 | host->ops->match = default_irq_host_match; |
| 501 | 507 | ||
| 502 | spin_lock_irqsave(&irq_big_lock, flags); | 508 | spin_lock_irqsave(&irq_big_lock, flags); |
| 503 | 509 | ||
| 504 | /* If it's a legacy controller, check for duplicates and | 510 | /* If it's a legacy controller, check for duplicates and |
| 505 | * mark it as allocated (we use irq 0 host pointer for that | 511 | * mark it as allocated (we use irq 0 host pointer for that |
| 506 | */ | 512 | */ |
| 507 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { | 513 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { |
| 508 | if (irq_map[0].host != NULL) { | 514 | if (irq_map[0].host != NULL) { |
| 509 | spin_unlock_irqrestore(&irq_big_lock, flags); | 515 | spin_unlock_irqrestore(&irq_big_lock, flags); |
| 510 | /* If we are early boot, we can't free the structure, | 516 | /* If we are early boot, we can't free the structure, |
| 511 | * too bad... | 517 | * too bad... |
| 512 | * this will be fixed once slab is made available early | 518 | * this will be fixed once slab is made available early |
| 513 | * instead of the current cruft | 519 | * instead of the current cruft |
| 514 | */ | 520 | */ |
| 515 | if (mem_init_done) | 521 | if (mem_init_done) |
| 516 | kfree(host); | 522 | kfree(host); |
| 517 | return NULL; | 523 | return NULL; |
| 518 | } | 524 | } |
| 519 | irq_map[0].host = host; | 525 | irq_map[0].host = host; |
| 520 | } | 526 | } |
| 521 | 527 | ||
| 522 | list_add(&host->link, &irq_hosts); | 528 | list_add(&host->link, &irq_hosts); |
| 523 | spin_unlock_irqrestore(&irq_big_lock, flags); | 529 | spin_unlock_irqrestore(&irq_big_lock, flags); |
| 524 | 530 | ||
| 525 | /* Additional setups per revmap type */ | 531 | /* Additional setups per revmap type */ |
| 526 | switch(revmap_type) { | 532 | switch(revmap_type) { |
| 527 | case IRQ_HOST_MAP_LEGACY: | 533 | case IRQ_HOST_MAP_LEGACY: |
| 528 | /* 0 is always the invalid number for legacy */ | 534 | /* 0 is always the invalid number for legacy */ |
| 529 | host->inval_irq = 0; | 535 | host->inval_irq = 0; |
| 530 | /* setup us as the host for all legacy interrupts */ | 536 | /* setup us as the host for all legacy interrupts */ |
| 531 | for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { | 537 | for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { |
| 532 | irq_map[i].hwirq = i; | 538 | irq_map[i].hwirq = i; |
| 533 | smp_wmb(); | 539 | smp_wmb(); |
| 534 | irq_map[i].host = host; | 540 | irq_map[i].host = host; |
| 535 | smp_wmb(); | 541 | smp_wmb(); |
| 536 | 542 | ||
| 537 | /* Clear norequest flags */ | 543 | /* Clear norequest flags */ |
| 538 | get_irq_desc(i)->status &= ~IRQ_NOREQUEST; | 544 | get_irq_desc(i)->status &= ~IRQ_NOREQUEST; |
| 539 | 545 | ||
| 540 | /* Legacy flags are left to default at this point, | 546 | /* Legacy flags are left to default at this point, |
| 541 | * one can then use irq_create_mapping() to | 547 | * one can then use irq_create_mapping() to |
| 542 | * explicitly change them | 548 | * explicitly change them |
| 543 | */ | 549 | */ |
| 544 | ops->map(host, i, i); | 550 | ops->map(host, i, i); |
| 545 | } | 551 | } |
| 546 | break; | 552 | break; |
| 547 | case IRQ_HOST_MAP_LINEAR: | 553 | case IRQ_HOST_MAP_LINEAR: |
| 548 | rmap = (unsigned int *)(host + 1); | 554 | rmap = (unsigned int *)(host + 1); |
| 549 | for (i = 0; i < revmap_arg; i++) | 555 | for (i = 0; i < revmap_arg; i++) |
| 550 | rmap[i] = NO_IRQ; | 556 | rmap[i] = NO_IRQ; |
| 551 | host->revmap_data.linear.size = revmap_arg; | 557 | host->revmap_data.linear.size = revmap_arg; |
| 552 | smp_wmb(); | 558 | smp_wmb(); |
| 553 | host->revmap_data.linear.revmap = rmap; | 559 | host->revmap_data.linear.revmap = rmap; |
| 554 | break; | 560 | break; |
| 555 | default: | 561 | default: |
| 556 | break; | 562 | break; |
| 557 | } | 563 | } |
| 558 | 564 | ||
| 559 | pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); | 565 | pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); |
| 560 | 566 | ||
| 561 | return host; | 567 | return host; |
| 562 | } | 568 | } |
| 563 | 569 | ||
| 564 | struct irq_host *irq_find_host(struct device_node *node) | 570 | struct irq_host *irq_find_host(struct device_node *node) |
| 565 | { | 571 | { |
| 566 | struct irq_host *h, *found = NULL; | 572 | struct irq_host *h, *found = NULL; |
| 567 | unsigned long flags; | 573 | unsigned long flags; |
| 568 | 574 | ||
| 569 | /* We might want to match the legacy controller last since | 575 | /* We might want to match the legacy controller last since |
| 570 | * it might potentially be set to match all interrupts in | 576 | * it might potentially be set to match all interrupts in |
| 571 | * the absence of a device node. This isn't a problem so far | 577 | * the absence of a device node. This isn't a problem so far |
| 572 | * yet though... | 578 | * yet though... |
| 573 | */ | 579 | */ |
| 574 | spin_lock_irqsave(&irq_big_lock, flags); | 580 | spin_lock_irqsave(&irq_big_lock, flags); |
| 575 | list_for_each_entry(h, &irq_hosts, link) | 581 | list_for_each_entry(h, &irq_hosts, link) |
| 576 | if (h->ops->match(h, node)) { | 582 | if (h->ops->match(h, node)) { |
| 577 | found = h; | 583 | found = h; |
| 578 | break; | 584 | break; |
| 579 | } | 585 | } |
| 580 | spin_unlock_irqrestore(&irq_big_lock, flags); | 586 | spin_unlock_irqrestore(&irq_big_lock, flags); |
| 581 | return found; | 587 | return found; |
| 582 | } | 588 | } |
| 583 | EXPORT_SYMBOL_GPL(irq_find_host); | 589 | EXPORT_SYMBOL_GPL(irq_find_host); |
| 584 | 590 | ||
| 585 | void irq_set_default_host(struct irq_host *host) | 591 | void irq_set_default_host(struct irq_host *host) |
| 586 | { | 592 | { |
| 587 | pr_debug("irq: Default host set to @0x%p\n", host); | 593 | pr_debug("irq: Default host set to @0x%p\n", host); |
| 588 | 594 | ||
| 589 | irq_default_host = host; | 595 | irq_default_host = host; |
| 590 | } | 596 | } |
| 591 | 597 | ||
| 592 | void irq_set_virq_count(unsigned int count) | 598 | void irq_set_virq_count(unsigned int count) |
| 593 | { | 599 | { |
| 594 | pr_debug("irq: Trying to set virq count to %d\n", count); | 600 | pr_debug("irq: Trying to set virq count to %d\n", count); |
| 595 | 601 | ||
| 596 | BUG_ON(count < NUM_ISA_INTERRUPTS); | 602 | BUG_ON(count < NUM_ISA_INTERRUPTS); |
| 597 | if (count < NR_IRQS) | 603 | if (count < NR_IRQS) |
| 598 | irq_virq_count = count; | 604 | irq_virq_count = count; |
| 599 | } | 605 | } |
| 600 | 606 | ||
| 601 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, | 607 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, |
| 602 | irq_hw_number_t hwirq) | 608 | irq_hw_number_t hwirq) |
| 603 | { | 609 | { |
| 604 | /* Clear IRQ_NOREQUEST flag */ | 610 | /* Clear IRQ_NOREQUEST flag */ |
| 605 | get_irq_desc(virq)->status &= ~IRQ_NOREQUEST; | 611 | get_irq_desc(virq)->status &= ~IRQ_NOREQUEST; |
| 606 | 612 | ||
| 607 | /* map it */ | 613 | /* map it */ |
| 608 | smp_wmb(); | 614 | smp_wmb(); |
| 609 | irq_map[virq].hwirq = hwirq; | 615 | irq_map[virq].hwirq = hwirq; |
| 610 | smp_mb(); | 616 | smp_mb(); |
| 611 | 617 | ||
| 612 | if (host->ops->map(host, virq, hwirq)) { | 618 | if (host->ops->map(host, virq, hwirq)) { |
| 613 | pr_debug("irq: -> mapping failed, freeing\n"); | 619 | pr_debug("irq: -> mapping failed, freeing\n"); |
| 614 | irq_free_virt(virq, 1); | 620 | irq_free_virt(virq, 1); |
| 615 | return -1; | 621 | return -1; |
| 616 | } | 622 | } |
| 617 | 623 | ||
| 618 | return 0; | 624 | return 0; |
| 619 | } | 625 | } |
| 620 | 626 | ||
| 621 | unsigned int irq_create_direct_mapping(struct irq_host *host) | 627 | unsigned int irq_create_direct_mapping(struct irq_host *host) |
| 622 | { | 628 | { |
| 623 | unsigned int virq; | 629 | unsigned int virq; |
| 624 | 630 | ||
| 625 | if (host == NULL) | 631 | if (host == NULL) |
| 626 | host = irq_default_host; | 632 | host = irq_default_host; |
| 627 | 633 | ||
| 628 | BUG_ON(host == NULL); | 634 | BUG_ON(host == NULL); |
| 629 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP); | 635 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP); |
| 630 | 636 | ||
| 631 | virq = irq_alloc_virt(host, 1, 0); | 637 | virq = irq_alloc_virt(host, 1, 0); |
| 632 | if (virq == NO_IRQ) { | 638 | if (virq == NO_IRQ) { |
| 633 | pr_debug("irq: create_direct virq allocation failed\n"); | 639 | pr_debug("irq: create_direct virq allocation failed\n"); |
| 634 | return NO_IRQ; | 640 | return NO_IRQ; |
| 635 | } | 641 | } |
| 636 | 642 | ||
| 637 | pr_debug("irq: create_direct obtained virq %d\n", virq); | 643 | pr_debug("irq: create_direct obtained virq %d\n", virq); |
| 638 | 644 | ||
| 639 | if (irq_setup_virq(host, virq, virq)) | 645 | if (irq_setup_virq(host, virq, virq)) |
| 640 | return NO_IRQ; | 646 | return NO_IRQ; |
| 641 | 647 | ||
| 642 | return virq; | 648 | return virq; |
| 643 | } | 649 | } |
| 644 | 650 | ||
| 645 | unsigned int irq_create_mapping(struct irq_host *host, | 651 | unsigned int irq_create_mapping(struct irq_host *host, |
| 646 | irq_hw_number_t hwirq) | 652 | irq_hw_number_t hwirq) |
| 647 | { | 653 | { |
| 648 | unsigned int virq, hint; | 654 | unsigned int virq, hint; |
| 649 | 655 | ||
| 650 | pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq); | 656 | pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq); |
| 651 | 657 | ||
| 652 | /* Look for default host if nececssary */ | 658 | /* Look for default host if nececssary */ |
| 653 | if (host == NULL) | 659 | if (host == NULL) |
| 654 | host = irq_default_host; | 660 | host = irq_default_host; |
| 655 | if (host == NULL) { | 661 | if (host == NULL) { |
| 656 | printk(KERN_WARNING "irq_create_mapping called for" | 662 | printk(KERN_WARNING "irq_create_mapping called for" |
| 657 | " NULL host, hwirq=%lx\n", hwirq); | 663 | " NULL host, hwirq=%lx\n", hwirq); |
| 658 | WARN_ON(1); | 664 | WARN_ON(1); |
| 659 | return NO_IRQ; | 665 | return NO_IRQ; |
| 660 | } | 666 | } |
| 661 | pr_debug("irq: -> using host @%p\n", host); | 667 | pr_debug("irq: -> using host @%p\n", host); |
| 662 | 668 | ||
| 663 | /* Check if mapping already exist, if it does, call | 669 | /* Check if mapping already exist, if it does, call |
| 664 | * host->ops->map() to update the flags | 670 | * host->ops->map() to update the flags |
| 665 | */ | 671 | */ |
| 666 | virq = irq_find_mapping(host, hwirq); | 672 | virq = irq_find_mapping(host, hwirq); |
| 667 | if (virq != NO_IRQ) { | 673 | if (virq != NO_IRQ) { |
| 668 | if (host->ops->remap) | 674 | if (host->ops->remap) |
| 669 | host->ops->remap(host, virq, hwirq); | 675 | host->ops->remap(host, virq, hwirq); |
| 670 | pr_debug("irq: -> existing mapping on virq %d\n", virq); | 676 | pr_debug("irq: -> existing mapping on virq %d\n", virq); |
| 671 | return virq; | 677 | return virq; |
| 672 | } | 678 | } |
| 673 | 679 | ||
| 674 | /* Get a virtual interrupt number */ | 680 | /* Get a virtual interrupt number */ |
| 675 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) { | 681 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) { |
| 676 | /* Handle legacy */ | 682 | /* Handle legacy */ |
| 677 | virq = (unsigned int)hwirq; | 683 | virq = (unsigned int)hwirq; |
| 678 | if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) | 684 | if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) |
| 679 | return NO_IRQ; | 685 | return NO_IRQ; |
| 680 | return virq; | 686 | return virq; |
| 681 | } else { | 687 | } else { |
| 682 | /* Allocate a virtual interrupt number */ | 688 | /* Allocate a virtual interrupt number */ |
| 683 | hint = hwirq % irq_virq_count; | 689 | hint = hwirq % irq_virq_count; |
| 684 | virq = irq_alloc_virt(host, 1, hint); | 690 | virq = irq_alloc_virt(host, 1, hint); |
| 685 | if (virq == NO_IRQ) { | 691 | if (virq == NO_IRQ) { |
| 686 | pr_debug("irq: -> virq allocation failed\n"); | 692 | pr_debug("irq: -> virq allocation failed\n"); |
| 687 | return NO_IRQ; | 693 | return NO_IRQ; |
| 688 | } | 694 | } |
| 689 | } | 695 | } |
| 690 | 696 | ||
| 691 | if (irq_setup_virq(host, virq, hwirq)) | 697 | if (irq_setup_virq(host, virq, hwirq)) |
| 692 | return NO_IRQ; | 698 | return NO_IRQ; |
| 693 | 699 | ||
| 694 | printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n", | 700 | printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n", |
| 695 | hwirq, host->of_node ? host->of_node->full_name : "null", virq); | 701 | hwirq, host->of_node ? host->of_node->full_name : "null", virq); |
| 696 | 702 | ||
| 697 | return virq; | 703 | return virq; |
| 698 | } | 704 | } |
| 699 | EXPORT_SYMBOL_GPL(irq_create_mapping); | 705 | EXPORT_SYMBOL_GPL(irq_create_mapping); |
| 700 | 706 | ||
| 701 | unsigned int irq_create_of_mapping(struct device_node *controller, | 707 | unsigned int irq_create_of_mapping(struct device_node *controller, |
| 702 | u32 *intspec, unsigned int intsize) | 708 | u32 *intspec, unsigned int intsize) |
| 703 | { | 709 | { |
| 704 | struct irq_host *host; | 710 | struct irq_host *host; |
| 705 | irq_hw_number_t hwirq; | 711 | irq_hw_number_t hwirq; |
| 706 | unsigned int type = IRQ_TYPE_NONE; | 712 | unsigned int type = IRQ_TYPE_NONE; |
| 707 | unsigned int virq; | 713 | unsigned int virq; |
| 708 | 714 | ||
| 709 | if (controller == NULL) | 715 | if (controller == NULL) |
| 710 | host = irq_default_host; | 716 | host = irq_default_host; |
| 711 | else | 717 | else |
| 712 | host = irq_find_host(controller); | 718 | host = irq_find_host(controller); |
| 713 | if (host == NULL) { | 719 | if (host == NULL) { |
| 714 | printk(KERN_WARNING "irq: no irq host found for %s !\n", | 720 | printk(KERN_WARNING "irq: no irq host found for %s !\n", |
| 715 | controller->full_name); | 721 | controller->full_name); |
| 716 | return NO_IRQ; | 722 | return NO_IRQ; |
| 717 | } | 723 | } |
| 718 | 724 | ||
| 719 | /* If host has no translation, then we assume interrupt line */ | 725 | /* If host has no translation, then we assume interrupt line */ |
| 720 | if (host->ops->xlate == NULL) | 726 | if (host->ops->xlate == NULL) |
| 721 | hwirq = intspec[0]; | 727 | hwirq = intspec[0]; |
| 722 | else { | 728 | else { |
| 723 | if (host->ops->xlate(host, controller, intspec, intsize, | 729 | if (host->ops->xlate(host, controller, intspec, intsize, |
| 724 | &hwirq, &type)) | 730 | &hwirq, &type)) |
| 725 | return NO_IRQ; | 731 | return NO_IRQ; |
| 726 | } | 732 | } |
| 727 | 733 | ||
| 728 | /* Create mapping */ | 734 | /* Create mapping */ |
| 729 | virq = irq_create_mapping(host, hwirq); | 735 | virq = irq_create_mapping(host, hwirq); |
| 730 | if (virq == NO_IRQ) | 736 | if (virq == NO_IRQ) |
| 731 | return virq; | 737 | return virq; |
| 732 | 738 | ||
| 733 | /* Set type if specified and different than the current one */ | 739 | /* Set type if specified and different than the current one */ |
| 734 | if (type != IRQ_TYPE_NONE && | 740 | if (type != IRQ_TYPE_NONE && |
| 735 | type != (get_irq_desc(virq)->status & IRQF_TRIGGER_MASK)) | 741 | type != (get_irq_desc(virq)->status & IRQF_TRIGGER_MASK)) |
| 736 | set_irq_type(virq, type); | 742 | set_irq_type(virq, type); |
| 737 | return virq; | 743 | return virq; |
| 738 | } | 744 | } |
| 739 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); | 745 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); |
| 740 | 746 | ||
| 741 | unsigned int irq_of_parse_and_map(struct device_node *dev, int index) | 747 | unsigned int irq_of_parse_and_map(struct device_node *dev, int index) |
| 742 | { | 748 | { |
| 743 | struct of_irq oirq; | 749 | struct of_irq oirq; |
| 744 | 750 | ||
| 745 | if (of_irq_map_one(dev, index, &oirq)) | 751 | if (of_irq_map_one(dev, index, &oirq)) |
| 746 | return NO_IRQ; | 752 | return NO_IRQ; |
| 747 | 753 | ||
| 748 | return irq_create_of_mapping(oirq.controller, oirq.specifier, | 754 | return irq_create_of_mapping(oirq.controller, oirq.specifier, |
| 749 | oirq.size); | 755 | oirq.size); |
| 750 | } | 756 | } |
| 751 | EXPORT_SYMBOL_GPL(irq_of_parse_and_map); | 757 | EXPORT_SYMBOL_GPL(irq_of_parse_and_map); |
| 752 | 758 | ||
| 753 | void irq_dispose_mapping(unsigned int virq) | 759 | void irq_dispose_mapping(unsigned int virq) |
| 754 | { | 760 | { |
| 755 | struct irq_host *host; | 761 | struct irq_host *host; |
| 756 | irq_hw_number_t hwirq; | 762 | irq_hw_number_t hwirq; |
| 757 | 763 | ||
| 758 | if (virq == NO_IRQ) | 764 | if (virq == NO_IRQ) |
| 759 | return; | 765 | return; |
| 760 | 766 | ||
| 761 | host = irq_map[virq].host; | 767 | host = irq_map[virq].host; |
| 762 | WARN_ON (host == NULL); | 768 | WARN_ON (host == NULL); |
| 763 | if (host == NULL) | 769 | if (host == NULL) |
| 764 | return; | 770 | return; |
| 765 | 771 | ||
| 766 | /* Never unmap legacy interrupts */ | 772 | /* Never unmap legacy interrupts */ |
| 767 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | 773 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) |
| 768 | return; | 774 | return; |
| 769 | 775 | ||
| 770 | /* remove chip and handler */ | 776 | /* remove chip and handler */ |
| 771 | set_irq_chip_and_handler(virq, NULL, NULL); | 777 | set_irq_chip_and_handler(virq, NULL, NULL); |
| 772 | 778 | ||
| 773 | /* Make sure it's completed */ | 779 | /* Make sure it's completed */ |
| 774 | synchronize_irq(virq); | 780 | synchronize_irq(virq); |
| 775 | 781 | ||
| 776 | /* Tell the PIC about it */ | 782 | /* Tell the PIC about it */ |
| 777 | if (host->ops->unmap) | 783 | if (host->ops->unmap) |
| 778 | host->ops->unmap(host, virq); | 784 | host->ops->unmap(host, virq); |
| 779 | smp_mb(); | 785 | smp_mb(); |
| 780 | 786 | ||
| 781 | /* Clear reverse map */ | 787 | /* Clear reverse map */ |
| 782 | hwirq = irq_map[virq].hwirq; | 788 | hwirq = irq_map[virq].hwirq; |
| 783 | switch(host->revmap_type) { | 789 | switch(host->revmap_type) { |
| 784 | case IRQ_HOST_MAP_LINEAR: | 790 | case IRQ_HOST_MAP_LINEAR: |
| 785 | if (hwirq < host->revmap_data.linear.size) | 791 | if (hwirq < host->revmap_data.linear.size) |
| 786 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; | 792 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; |
| 787 | break; | 793 | break; |
| 788 | case IRQ_HOST_MAP_TREE: | 794 | case IRQ_HOST_MAP_TREE: |
| 789 | /* | 795 | /* |
| 790 | * Check if radix tree allocated yet, if not then nothing to | 796 | * Check if radix tree allocated yet, if not then nothing to |
| 791 | * remove. | 797 | * remove. |
| 792 | */ | 798 | */ |
| 793 | smp_rmb(); | 799 | smp_rmb(); |
| 794 | if (revmap_trees_allocated < 1) | 800 | if (revmap_trees_allocated < 1) |
| 795 | break; | 801 | break; |
| 796 | mutex_lock(&revmap_trees_mutex); | 802 | mutex_lock(&revmap_trees_mutex); |
| 797 | radix_tree_delete(&host->revmap_data.tree, hwirq); | 803 | radix_tree_delete(&host->revmap_data.tree, hwirq); |
| 798 | mutex_unlock(&revmap_trees_mutex); | 804 | mutex_unlock(&revmap_trees_mutex); |
| 799 | break; | 805 | break; |
| 800 | } | 806 | } |
| 801 | 807 | ||
| 802 | /* Destroy map */ | 808 | /* Destroy map */ |
| 803 | smp_mb(); | 809 | smp_mb(); |
| 804 | irq_map[virq].hwirq = host->inval_irq; | 810 | irq_map[virq].hwirq = host->inval_irq; |
| 805 | 811 | ||
| 806 | /* Set some flags */ | 812 | /* Set some flags */ |
| 807 | get_irq_desc(virq)->status |= IRQ_NOREQUEST; | 813 | get_irq_desc(virq)->status |= IRQ_NOREQUEST; |
| 808 | 814 | ||
| 809 | /* Free it */ | 815 | /* Free it */ |
| 810 | irq_free_virt(virq, 1); | 816 | irq_free_virt(virq, 1); |
| 811 | } | 817 | } |
| 812 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); | 818 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); |
| 813 | 819 | ||
| 814 | unsigned int irq_find_mapping(struct irq_host *host, | 820 | unsigned int irq_find_mapping(struct irq_host *host, |
| 815 | irq_hw_number_t hwirq) | 821 | irq_hw_number_t hwirq) |
| 816 | { | 822 | { |
| 817 | unsigned int i; | 823 | unsigned int i; |
| 818 | unsigned int hint = hwirq % irq_virq_count; | 824 | unsigned int hint = hwirq % irq_virq_count; |
| 819 | 825 | ||
| 820 | /* Look for default host if nececssary */ | 826 | /* Look for default host if nececssary */ |
| 821 | if (host == NULL) | 827 | if (host == NULL) |
| 822 | host = irq_default_host; | 828 | host = irq_default_host; |
| 823 | if (host == NULL) | 829 | if (host == NULL) |
| 824 | return NO_IRQ; | 830 | return NO_IRQ; |
| 825 | 831 | ||
| 826 | /* legacy -> bail early */ | 832 | /* legacy -> bail early */ |
| 827 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | 833 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) |
| 828 | return hwirq; | 834 | return hwirq; |
| 829 | 835 | ||
| 830 | /* Slow path does a linear search of the map */ | 836 | /* Slow path does a linear search of the map */ |
| 831 | if (hint < NUM_ISA_INTERRUPTS) | 837 | if (hint < NUM_ISA_INTERRUPTS) |
| 832 | hint = NUM_ISA_INTERRUPTS; | 838 | hint = NUM_ISA_INTERRUPTS; |
| 833 | i = hint; | 839 | i = hint; |
| 834 | do { | 840 | do { |
| 835 | if (irq_map[i].host == host && | 841 | if (irq_map[i].host == host && |
| 836 | irq_map[i].hwirq == hwirq) | 842 | irq_map[i].hwirq == hwirq) |
| 837 | return i; | 843 | return i; |
| 838 | i++; | 844 | i++; |
| 839 | if (i >= irq_virq_count) | 845 | if (i >= irq_virq_count) |
| 840 | i = NUM_ISA_INTERRUPTS; | 846 | i = NUM_ISA_INTERRUPTS; |
| 841 | } while(i != hint); | 847 | } while(i != hint); |
| 842 | return NO_IRQ; | 848 | return NO_IRQ; |
| 843 | } | 849 | } |
| 844 | EXPORT_SYMBOL_GPL(irq_find_mapping); | 850 | EXPORT_SYMBOL_GPL(irq_find_mapping); |
| 845 | 851 | ||
| 846 | 852 | ||
| 847 | unsigned int irq_radix_revmap_lookup(struct irq_host *host, | 853 | unsigned int irq_radix_revmap_lookup(struct irq_host *host, |
| 848 | irq_hw_number_t hwirq) | 854 | irq_hw_number_t hwirq) |
| 849 | { | 855 | { |
| 850 | struct irq_map_entry *ptr; | 856 | struct irq_map_entry *ptr; |
| 851 | unsigned int virq; | 857 | unsigned int virq; |
| 852 | 858 | ||
| 853 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); | 859 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); |
| 854 | 860 | ||
| 855 | /* | 861 | /* |
| 856 | * Check if the radix tree exists and has bee initialized. | 862 | * Check if the radix tree exists and has bee initialized. |
| 857 | * If not, we fallback to slow mode | 863 | * If not, we fallback to slow mode |
| 858 | */ | 864 | */ |
| 859 | if (revmap_trees_allocated < 2) | 865 | if (revmap_trees_allocated < 2) |
| 860 | return irq_find_mapping(host, hwirq); | 866 | return irq_find_mapping(host, hwirq); |
| 861 | 867 | ||
| 862 | /* Now try to resolve */ | 868 | /* Now try to resolve */ |
| 863 | /* | 869 | /* |
| 864 | * No rcu_read_lock(ing) needed, the ptr returned can't go under us | 870 | * No rcu_read_lock(ing) needed, the ptr returned can't go under us |
| 865 | * as it's referencing an entry in the static irq_map table. | 871 | * as it's referencing an entry in the static irq_map table. |
| 866 | */ | 872 | */ |
| 867 | ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); | 873 | ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); |
| 868 | 874 | ||
| 869 | /* | 875 | /* |
| 870 | * If found in radix tree, then fine. | 876 | * If found in radix tree, then fine. |
| 871 | * Else fallback to linear lookup - this should not happen in practice | 877 | * Else fallback to linear lookup - this should not happen in practice |
| 872 | * as it means that we failed to insert the node in the radix tree. | 878 | * as it means that we failed to insert the node in the radix tree. |
| 873 | */ | 879 | */ |
| 874 | if (ptr) | 880 | if (ptr) |
| 875 | virq = ptr - irq_map; | 881 | virq = ptr - irq_map; |
| 876 | else | 882 | else |
| 877 | virq = irq_find_mapping(host, hwirq); | 883 | virq = irq_find_mapping(host, hwirq); |
| 878 | 884 | ||
| 879 | return virq; | 885 | return virq; |
| 880 | } | 886 | } |
| 881 | 887 | ||
| 882 | void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, | 888 | void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, |
| 883 | irq_hw_number_t hwirq) | 889 | irq_hw_number_t hwirq) |
| 884 | { | 890 | { |
| 885 | 891 | ||
| 886 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); | 892 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); |
| 887 | 893 | ||
| 888 | /* | 894 | /* |
| 889 | * Check if the radix tree exists yet. | 895 | * Check if the radix tree exists yet. |
| 890 | * If not, then the irq will be inserted into the tree when it gets | 896 | * If not, then the irq will be inserted into the tree when it gets |
| 891 | * initialized. | 897 | * initialized. |
| 892 | */ | 898 | */ |
| 893 | smp_rmb(); | 899 | smp_rmb(); |
| 894 | if (revmap_trees_allocated < 1) | 900 | if (revmap_trees_allocated < 1) |
| 895 | return; | 901 | return; |
| 896 | 902 | ||
| 897 | if (virq != NO_IRQ) { | 903 | if (virq != NO_IRQ) { |
| 898 | mutex_lock(&revmap_trees_mutex); | 904 | mutex_lock(&revmap_trees_mutex); |
| 899 | radix_tree_insert(&host->revmap_data.tree, hwirq, | 905 | radix_tree_insert(&host->revmap_data.tree, hwirq, |
| 900 | &irq_map[virq]); | 906 | &irq_map[virq]); |
| 901 | mutex_unlock(&revmap_trees_mutex); | 907 | mutex_unlock(&revmap_trees_mutex); |
| 902 | } | 908 | } |
| 903 | } | 909 | } |
| 904 | 910 | ||
| 905 | unsigned int irq_linear_revmap(struct irq_host *host, | 911 | unsigned int irq_linear_revmap(struct irq_host *host, |
| 906 | irq_hw_number_t hwirq) | 912 | irq_hw_number_t hwirq) |
| 907 | { | 913 | { |
| 908 | unsigned int *revmap; | 914 | unsigned int *revmap; |
| 909 | 915 | ||
| 910 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR); | 916 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR); |
| 911 | 917 | ||
| 912 | /* Check revmap bounds */ | 918 | /* Check revmap bounds */ |
| 913 | if (unlikely(hwirq >= host->revmap_data.linear.size)) | 919 | if (unlikely(hwirq >= host->revmap_data.linear.size)) |
| 914 | return irq_find_mapping(host, hwirq); | 920 | return irq_find_mapping(host, hwirq); |
| 915 | 921 | ||
| 916 | /* Check if revmap was allocated */ | 922 | /* Check if revmap was allocated */ |
| 917 | revmap = host->revmap_data.linear.revmap; | 923 | revmap = host->revmap_data.linear.revmap; |
| 918 | if (unlikely(revmap == NULL)) | 924 | if (unlikely(revmap == NULL)) |
| 919 | return irq_find_mapping(host, hwirq); | 925 | return irq_find_mapping(host, hwirq); |
| 920 | 926 | ||
| 921 | /* Fill up revmap with slow path if no mapping found */ | 927 | /* Fill up revmap with slow path if no mapping found */ |
| 922 | if (unlikely(revmap[hwirq] == NO_IRQ)) | 928 | if (unlikely(revmap[hwirq] == NO_IRQ)) |
| 923 | revmap[hwirq] = irq_find_mapping(host, hwirq); | 929 | revmap[hwirq] = irq_find_mapping(host, hwirq); |
| 924 | 930 | ||
| 925 | return revmap[hwirq]; | 931 | return revmap[hwirq]; |
| 926 | } | 932 | } |
| 927 | 933 | ||
| 928 | unsigned int irq_alloc_virt(struct irq_host *host, | 934 | unsigned int irq_alloc_virt(struct irq_host *host, |
| 929 | unsigned int count, | 935 | unsigned int count, |
| 930 | unsigned int hint) | 936 | unsigned int hint) |
| 931 | { | 937 | { |
| 932 | unsigned long flags; | 938 | unsigned long flags; |
| 933 | unsigned int i, j, found = NO_IRQ; | 939 | unsigned int i, j, found = NO_IRQ; |
| 934 | 940 | ||
| 935 | if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) | 941 | if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) |
| 936 | return NO_IRQ; | 942 | return NO_IRQ; |
| 937 | 943 | ||
| 938 | spin_lock_irqsave(&irq_big_lock, flags); | 944 | spin_lock_irqsave(&irq_big_lock, flags); |
| 939 | 945 | ||
| 940 | /* Use hint for 1 interrupt if any */ | 946 | /* Use hint for 1 interrupt if any */ |
| 941 | if (count == 1 && hint >= NUM_ISA_INTERRUPTS && | 947 | if (count == 1 && hint >= NUM_ISA_INTERRUPTS && |
| 942 | hint < irq_virq_count && irq_map[hint].host == NULL) { | 948 | hint < irq_virq_count && irq_map[hint].host == NULL) { |
| 943 | found = hint; | 949 | found = hint; |
| 944 | goto hint_found; | 950 | goto hint_found; |
| 945 | } | 951 | } |
| 946 | 952 | ||
| 947 | /* Look for count consecutive numbers in the allocatable | 953 | /* Look for count consecutive numbers in the allocatable |
| 948 | * (non-legacy) space | 954 | * (non-legacy) space |
| 949 | */ | 955 | */ |
| 950 | for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) { | 956 | for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) { |
| 951 | if (irq_map[i].host != NULL) | 957 | if (irq_map[i].host != NULL) |
| 952 | j = 0; | 958 | j = 0; |
| 953 | else | 959 | else |
| 954 | j++; | 960 | j++; |
| 955 | 961 | ||
| 956 | if (j == count) { | 962 | if (j == count) { |
| 957 | found = i - count + 1; | 963 | found = i - count + 1; |
| 958 | break; | 964 | break; |
| 959 | } | 965 | } |
| 960 | } | 966 | } |
| 961 | if (found == NO_IRQ) { | 967 | if (found == NO_IRQ) { |
| 962 | spin_unlock_irqrestore(&irq_big_lock, flags); | 968 | spin_unlock_irqrestore(&irq_big_lock, flags); |
| 963 | return NO_IRQ; | 969 | return NO_IRQ; |
| 964 | } | 970 | } |
| 965 | hint_found: | 971 | hint_found: |
| 966 | for (i = found; i < (found + count); i++) { | 972 | for (i = found; i < (found + count); i++) { |
| 967 | irq_map[i].hwirq = host->inval_irq; | 973 | irq_map[i].hwirq = host->inval_irq; |
| 968 | smp_wmb(); | 974 | smp_wmb(); |
| 969 | irq_map[i].host = host; | 975 | irq_map[i].host = host; |
| 970 | } | 976 | } |
| 971 | spin_unlock_irqrestore(&irq_big_lock, flags); | 977 | spin_unlock_irqrestore(&irq_big_lock, flags); |
| 972 | return found; | 978 | return found; |
| 973 | } | 979 | } |
| 974 | 980 | ||
| 975 | void irq_free_virt(unsigned int virq, unsigned int count) | 981 | void irq_free_virt(unsigned int virq, unsigned int count) |
| 976 | { | 982 | { |
| 977 | unsigned long flags; | 983 | unsigned long flags; |
| 978 | unsigned int i; | 984 | unsigned int i; |
| 979 | 985 | ||
| 980 | WARN_ON (virq < NUM_ISA_INTERRUPTS); | 986 | WARN_ON (virq < NUM_ISA_INTERRUPTS); |
| 981 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); | 987 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); |
| 982 | 988 | ||
| 983 | spin_lock_irqsave(&irq_big_lock, flags); | 989 | spin_lock_irqsave(&irq_big_lock, flags); |
| 984 | for (i = virq; i < (virq + count); i++) { | 990 | for (i = virq; i < (virq + count); i++) { |
| 985 | struct irq_host *host; | 991 | struct irq_host *host; |
| 986 | 992 | ||
| 987 | if (i < NUM_ISA_INTERRUPTS || | 993 | if (i < NUM_ISA_INTERRUPTS || |
| 988 | (virq + count) > irq_virq_count) | 994 | (virq + count) > irq_virq_count) |
| 989 | continue; | 995 | continue; |
| 990 | 996 | ||
| 991 | host = irq_map[i].host; | 997 | host = irq_map[i].host; |
| 992 | irq_map[i].hwirq = host->inval_irq; | 998 | irq_map[i].hwirq = host->inval_irq; |
| 993 | smp_wmb(); | 999 | smp_wmb(); |
| 994 | irq_map[i].host = NULL; | 1000 | irq_map[i].host = NULL; |
| 995 | } | 1001 | } |
| 996 | spin_unlock_irqrestore(&irq_big_lock, flags); | 1002 | spin_unlock_irqrestore(&irq_big_lock, flags); |
| 997 | } | 1003 | } |
| 998 | 1004 | ||
| 999 | void irq_early_init(void) | 1005 | void irq_early_init(void) |
| 1000 | { | 1006 | { |
| 1001 | unsigned int i; | 1007 | unsigned int i; |
| 1002 | 1008 | ||
| 1003 | for (i = 0; i < NR_IRQS; i++) | 1009 | for (i = 0; i < NR_IRQS; i++) |
| 1004 | get_irq_desc(i)->status |= IRQ_NOREQUEST; | 1010 | get_irq_desc(i)->status |= IRQ_NOREQUEST; |
| 1005 | } | 1011 | } |
| 1006 | 1012 | ||
| 1007 | /* We need to create the radix trees late */ | 1013 | /* We need to create the radix trees late */ |
| 1008 | static int irq_late_init(void) | 1014 | static int irq_late_init(void) |
| 1009 | { | 1015 | { |
| 1010 | struct irq_host *h; | 1016 | struct irq_host *h; |
| 1011 | unsigned int i; | 1017 | unsigned int i; |
| 1012 | 1018 | ||
| 1013 | /* | 1019 | /* |
| 1014 | * No mutual exclusion with respect to accessors of the tree is needed | 1020 | * No mutual exclusion with respect to accessors of the tree is needed |
| 1015 | * here as the synchronization is done via the state variable | 1021 | * here as the synchronization is done via the state variable |
| 1016 | * revmap_trees_allocated. | 1022 | * revmap_trees_allocated. |
| 1017 | */ | 1023 | */ |
| 1018 | list_for_each_entry(h, &irq_hosts, link) { | 1024 | list_for_each_entry(h, &irq_hosts, link) { |
| 1019 | if (h->revmap_type == IRQ_HOST_MAP_TREE) | 1025 | if (h->revmap_type == IRQ_HOST_MAP_TREE) |
| 1020 | INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL); | 1026 | INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL); |
| 1021 | } | 1027 | } |
| 1022 | 1028 | ||
| 1023 | /* | 1029 | /* |
| 1024 | * Make sure the radix trees inits are visible before setting | 1030 | * Make sure the radix trees inits are visible before setting |
| 1025 | * the flag | 1031 | * the flag |
| 1026 | */ | 1032 | */ |
| 1027 | smp_wmb(); | 1033 | smp_wmb(); |
| 1028 | revmap_trees_allocated = 1; | 1034 | revmap_trees_allocated = 1; |
| 1029 | 1035 | ||
| 1030 | /* | 1036 | /* |
| 1031 | * Insert the reverse mapping for those interrupts already present | 1037 | * Insert the reverse mapping for those interrupts already present |
| 1032 | * in irq_map[]. | 1038 | * in irq_map[]. |
| 1033 | */ | 1039 | */ |
| 1034 | mutex_lock(&revmap_trees_mutex); | 1040 | mutex_lock(&revmap_trees_mutex); |
| 1035 | for (i = 0; i < irq_virq_count; i++) { | 1041 | for (i = 0; i < irq_virq_count; i++) { |
| 1036 | if (irq_map[i].host && | 1042 | if (irq_map[i].host && |
| 1037 | (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE)) | 1043 | (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE)) |
| 1038 | radix_tree_insert(&irq_map[i].host->revmap_data.tree, | 1044 | radix_tree_insert(&irq_map[i].host->revmap_data.tree, |
| 1039 | irq_map[i].hwirq, &irq_map[i]); | 1045 | irq_map[i].hwirq, &irq_map[i]); |
| 1040 | } | 1046 | } |
| 1041 | mutex_unlock(&revmap_trees_mutex); | 1047 | mutex_unlock(&revmap_trees_mutex); |
| 1042 | 1048 | ||
| 1043 | /* | 1049 | /* |
| 1044 | * Make sure the radix trees insertions are visible before setting | 1050 | * Make sure the radix trees insertions are visible before setting |
| 1045 | * the flag | 1051 | * the flag |
| 1046 | */ | 1052 | */ |
| 1047 | smp_wmb(); | 1053 | smp_wmb(); |
| 1048 | revmap_trees_allocated = 2; | 1054 | revmap_trees_allocated = 2; |
| 1049 | 1055 | ||
| 1050 | return 0; | 1056 | return 0; |
| 1051 | } | 1057 | } |
| 1052 | arch_initcall(irq_late_init); | 1058 | arch_initcall(irq_late_init); |
| 1053 | 1059 | ||
| 1054 | #ifdef CONFIG_VIRQ_DEBUG | 1060 | #ifdef CONFIG_VIRQ_DEBUG |
| 1055 | static int virq_debug_show(struct seq_file *m, void *private) | 1061 | static int virq_debug_show(struct seq_file *m, void *private) |
| 1056 | { | 1062 | { |
| 1057 | unsigned long flags; | 1063 | unsigned long flags; |
| 1058 | struct irq_desc *desc; | 1064 | struct irq_desc *desc; |
| 1059 | const char *p; | 1065 | const char *p; |
| 1060 | char none[] = "none"; | 1066 | char none[] = "none"; |
| 1061 | int i; | 1067 | int i; |
| 1062 | 1068 | ||
| 1063 | seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq", | 1069 | seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq", |
| 1064 | "chip name", "host name"); | 1070 | "chip name", "host name"); |
| 1065 | 1071 | ||
| 1066 | for (i = 1; i < NR_IRQS; i++) { | 1072 | for (i = 1; i < NR_IRQS; i++) { |
| 1067 | desc = get_irq_desc(i); | 1073 | desc = get_irq_desc(i); |
| 1068 | spin_lock_irqsave(&desc->lock, flags); | 1074 | spin_lock_irqsave(&desc->lock, flags); |
| 1069 | 1075 | ||
| 1070 | if (desc->action && desc->action->handler) { | 1076 | if (desc->action && desc->action->handler) { |
| 1071 | seq_printf(m, "%5d ", i); | 1077 | seq_printf(m, "%5d ", i); |
| 1072 | seq_printf(m, "0x%05lx ", virq_to_hw(i)); | 1078 | seq_printf(m, "0x%05lx ", virq_to_hw(i)); |
| 1073 | 1079 | ||
| 1074 | if (desc->chip && desc->chip->typename) | 1080 | if (desc->chip && desc->chip->typename) |
| 1075 | p = desc->chip->typename; | 1081 | p = desc->chip->typename; |
| 1076 | else | 1082 | else |
| 1077 | p = none; | 1083 | p = none; |
| 1078 | seq_printf(m, "%-15s ", p); | 1084 | seq_printf(m, "%-15s ", p); |
| 1079 | 1085 | ||
| 1080 | if (irq_map[i].host && irq_map[i].host->of_node) | 1086 | if (irq_map[i].host && irq_map[i].host->of_node) |
| 1081 | p = irq_map[i].host->of_node->full_name; | 1087 | p = irq_map[i].host->of_node->full_name; |
| 1082 | else | 1088 | else |
| 1083 | p = none; | 1089 | p = none; |
| 1084 | seq_printf(m, "%s\n", p); | 1090 | seq_printf(m, "%s\n", p); |
| 1085 | } | 1091 | } |
| 1086 | 1092 | ||
| 1087 | spin_unlock_irqrestore(&desc->lock, flags); | 1093 | spin_unlock_irqrestore(&desc->lock, flags); |
| 1088 | } | 1094 | } |
| 1089 | 1095 | ||
| 1090 | return 0; | 1096 | return 0; |
| 1091 | } | 1097 | } |
| 1092 | 1098 | ||
| 1093 | static int virq_debug_open(struct inode *inode, struct file *file) | 1099 | static int virq_debug_open(struct inode *inode, struct file *file) |
| 1094 | { | 1100 | { |
| 1095 | return single_open(file, virq_debug_show, inode->i_private); | 1101 | return single_open(file, virq_debug_show, inode->i_private); |
| 1096 | } | 1102 | } |
| 1097 | 1103 | ||
| 1098 | static const struct file_operations virq_debug_fops = { | 1104 | static const struct file_operations virq_debug_fops = { |
| 1099 | .open = virq_debug_open, | 1105 | .open = virq_debug_open, |
| 1100 | .read = seq_read, | 1106 | .read = seq_read, |
| 1101 | .llseek = seq_lseek, | 1107 | .llseek = seq_lseek, |
| 1102 | .release = single_release, | 1108 | .release = single_release, |
| 1103 | }; | 1109 | }; |
| 1104 | 1110 | ||
| 1105 | static int __init irq_debugfs_init(void) | 1111 | static int __init irq_debugfs_init(void) |
| 1106 | { | 1112 | { |
| 1107 | if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root, | 1113 | if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root, |
| 1108 | NULL, &virq_debug_fops) == NULL) | 1114 | NULL, &virq_debug_fops) == NULL) |
| 1109 | return -ENOMEM; | 1115 | return -ENOMEM; |
| 1110 | 1116 | ||
| 1111 | return 0; | 1117 | return 0; |
| 1112 | } | 1118 | } |
| 1113 | __initcall(irq_debugfs_init); | 1119 | __initcall(irq_debugfs_init); |
| 1114 | #endif /* CONFIG_VIRQ_DEBUG */ | 1120 | #endif /* CONFIG_VIRQ_DEBUG */ |
| 1115 | 1121 | ||
| 1116 | #ifdef CONFIG_PPC64 | 1122 | #ifdef CONFIG_PPC64 |
| 1117 | static int __init setup_noirqdistrib(char *str) | 1123 | static int __init setup_noirqdistrib(char *str) |
| 1118 | { | 1124 | { |
| 1119 | distribute_irqs = 0; | 1125 | distribute_irqs = 0; |
| 1120 | return 1; | 1126 | return 1; |
| 1121 | } | 1127 | } |
| 1122 | 1128 | ||
| 1123 | __setup("noirqdistrib", setup_noirqdistrib); | 1129 | __setup("noirqdistrib", setup_noirqdistrib); |
| 1124 | #endif /* CONFIG_PPC64 */ | 1130 | #endif /* CONFIG_PPC64 */ |
| 1125 | 1131 |
arch/powerpc/kernel/perf_event.c
| 1 | /* | 1 | /* |
| 2 | * Performance event support - powerpc architecture code | 2 | * Performance event support - powerpc architecture code |
| 3 | * | 3 | * |
| 4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | 4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License | 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ | 10 | */ |
| 11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
| 12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
| 13 | #include <linux/perf_event.h> | 13 | #include <linux/perf_event.h> |
| 14 | #include <linux/percpu.h> | 14 | #include <linux/percpu.h> |
| 15 | #include <linux/hardirq.h> | 15 | #include <linux/hardirq.h> |
| 16 | #include <asm/reg.h> | 16 | #include <asm/reg.h> |
| 17 | #include <asm/pmc.h> | 17 | #include <asm/pmc.h> |
| 18 | #include <asm/machdep.h> | 18 | #include <asm/machdep.h> |
| 19 | #include <asm/firmware.h> | 19 | #include <asm/firmware.h> |
| 20 | #include <asm/ptrace.h> | 20 | #include <asm/ptrace.h> |
| 21 | 21 | ||
| 22 | struct cpu_hw_events { | 22 | struct cpu_hw_events { |
| 23 | int n_events; | 23 | int n_events; |
| 24 | int n_percpu; | 24 | int n_percpu; |
| 25 | int disabled; | 25 | int disabled; |
| 26 | int n_added; | 26 | int n_added; |
| 27 | int n_limited; | 27 | int n_limited; |
| 28 | u8 pmcs_enabled; | 28 | u8 pmcs_enabled; |
| 29 | struct perf_event *event[MAX_HWEVENTS]; | 29 | struct perf_event *event[MAX_HWEVENTS]; |
| 30 | u64 events[MAX_HWEVENTS]; | 30 | u64 events[MAX_HWEVENTS]; |
| 31 | unsigned int flags[MAX_HWEVENTS]; | 31 | unsigned int flags[MAX_HWEVENTS]; |
| 32 | unsigned long mmcr[3]; | 32 | unsigned long mmcr[3]; |
| 33 | struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS]; | 33 | struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS]; |
| 34 | u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; | 34 | u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; |
| 35 | u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; | 35 | u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; |
| 36 | unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; | 36 | unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; |
| 37 | unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; | 37 | unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; |
| 38 | }; | 38 | }; |
| 39 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | 39 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); |
| 40 | 40 | ||
| 41 | struct power_pmu *ppmu; | 41 | struct power_pmu *ppmu; |
| 42 | 42 | ||
| 43 | /* | 43 | /* |
| 44 | * Normally, to ignore kernel events we set the FCS (freeze counters | 44 | * Normally, to ignore kernel events we set the FCS (freeze counters |
| 45 | * in supervisor mode) bit in MMCR0, but if the kernel runs with the | 45 | * in supervisor mode) bit in MMCR0, but if the kernel runs with the |
| 46 | * hypervisor bit set in the MSR, or if we are running on a processor | 46 | * hypervisor bit set in the MSR, or if we are running on a processor |
| 47 | * where the hypervisor bit is forced to 1 (as on Apple G5 processors), | 47 | * where the hypervisor bit is forced to 1 (as on Apple G5 processors), |
| 48 | * then we need to use the FCHV bit to ignore kernel events. | 48 | * then we need to use the FCHV bit to ignore kernel events. |
| 49 | */ | 49 | */ |
| 50 | static unsigned int freeze_events_kernel = MMCR0_FCS; | 50 | static unsigned int freeze_events_kernel = MMCR0_FCS; |
| 51 | 51 | ||
| 52 | /* | 52 | /* |
| 53 | * 32-bit doesn't have MMCRA but does have an MMCR2, | 53 | * 32-bit doesn't have MMCRA but does have an MMCR2, |
| 54 | * and a few other names are different. | 54 | * and a few other names are different. |
| 55 | */ | 55 | */ |
| 56 | #ifdef CONFIG_PPC32 | 56 | #ifdef CONFIG_PPC32 |
| 57 | 57 | ||
| 58 | #define MMCR0_FCHV 0 | 58 | #define MMCR0_FCHV 0 |
| 59 | #define MMCR0_PMCjCE MMCR0_PMCnCE | 59 | #define MMCR0_PMCjCE MMCR0_PMCnCE |
| 60 | 60 | ||
| 61 | #define SPRN_MMCRA SPRN_MMCR2 | 61 | #define SPRN_MMCRA SPRN_MMCR2 |
| 62 | #define MMCRA_SAMPLE_ENABLE 0 | 62 | #define MMCRA_SAMPLE_ENABLE 0 |
| 63 | 63 | ||
| 64 | static inline unsigned long perf_ip_adjust(struct pt_regs *regs) | 64 | static inline unsigned long perf_ip_adjust(struct pt_regs *regs) |
| 65 | { | 65 | { |
| 66 | return 0; | 66 | return 0; |
| 67 | } | 67 | } |
| 68 | static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { } | 68 | static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { } |
| 69 | static inline u32 perf_get_misc_flags(struct pt_regs *regs) | 69 | static inline u32 perf_get_misc_flags(struct pt_regs *regs) |
| 70 | { | 70 | { |
| 71 | return 0; | 71 | return 0; |
| 72 | } | 72 | } |
| 73 | static inline void perf_read_regs(struct pt_regs *regs) { } | 73 | static inline void perf_read_regs(struct pt_regs *regs) { } |
| 74 | static inline int perf_intr_is_nmi(struct pt_regs *regs) | 74 | static inline int perf_intr_is_nmi(struct pt_regs *regs) |
| 75 | { | 75 | { |
| 76 | return 0; | 76 | return 0; |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | #endif /* CONFIG_PPC32 */ | 79 | #endif /* CONFIG_PPC32 */ |
| 80 | 80 | ||
| 81 | /* | 81 | /* |
| 82 | * Things that are specific to 64-bit implementations. | 82 | * Things that are specific to 64-bit implementations. |
| 83 | */ | 83 | */ |
| 84 | #ifdef CONFIG_PPC64 | 84 | #ifdef CONFIG_PPC64 |
| 85 | 85 | ||
| 86 | static inline unsigned long perf_ip_adjust(struct pt_regs *regs) | 86 | static inline unsigned long perf_ip_adjust(struct pt_regs *regs) |
| 87 | { | 87 | { |
| 88 | unsigned long mmcra = regs->dsisr; | 88 | unsigned long mmcra = regs->dsisr; |
| 89 | 89 | ||
| 90 | if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) { | 90 | if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) { |
| 91 | unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT; | 91 | unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT; |
| 92 | if (slot > 1) | 92 | if (slot > 1) |
| 93 | return 4 * (slot - 1); | 93 | return 4 * (slot - 1); |
| 94 | } | 94 | } |
| 95 | return 0; | 95 | return 0; |
| 96 | } | 96 | } |
| 97 | 97 | ||
| 98 | /* | 98 | /* |
| 99 | * The user wants a data address recorded. | 99 | * The user wants a data address recorded. |
| 100 | * If we're not doing instruction sampling, give them the SDAR | 100 | * If we're not doing instruction sampling, give them the SDAR |
| 101 | * (sampled data address). If we are doing instruction sampling, then | 101 | * (sampled data address). If we are doing instruction sampling, then |
| 102 | * only give them the SDAR if it corresponds to the instruction | 102 | * only give them the SDAR if it corresponds to the instruction |
| 103 | * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC | 103 | * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC |
| 104 | * bit in MMCRA. | 104 | * bit in MMCRA. |
| 105 | */ | 105 | */ |
| 106 | static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) | 106 | static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) |
| 107 | { | 107 | { |
| 108 | unsigned long mmcra = regs->dsisr; | 108 | unsigned long mmcra = regs->dsisr; |
| 109 | unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ? | 109 | unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ? |
| 110 | POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC; | 110 | POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC; |
| 111 | 111 | ||
| 112 | if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) | 112 | if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) |
| 113 | *addrp = mfspr(SPRN_SDAR); | 113 | *addrp = mfspr(SPRN_SDAR); |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | static inline u32 perf_get_misc_flags(struct pt_regs *regs) | 116 | static inline u32 perf_get_misc_flags(struct pt_regs *regs) |
| 117 | { | 117 | { |
| 118 | unsigned long mmcra = regs->dsisr; | 118 | unsigned long mmcra = regs->dsisr; |
| 119 | unsigned long sihv = MMCRA_SIHV; | 119 | unsigned long sihv = MMCRA_SIHV; |
| 120 | unsigned long sipr = MMCRA_SIPR; | 120 | unsigned long sipr = MMCRA_SIPR; |
| 121 | 121 | ||
| 122 | if (TRAP(regs) != 0xf00) | 122 | if (TRAP(regs) != 0xf00) |
| 123 | return 0; /* not a PMU interrupt */ | 123 | return 0; /* not a PMU interrupt */ |
| 124 | 124 | ||
| 125 | if (ppmu->flags & PPMU_ALT_SIPR) { | 125 | if (ppmu->flags & PPMU_ALT_SIPR) { |
| 126 | sihv = POWER6_MMCRA_SIHV; | 126 | sihv = POWER6_MMCRA_SIHV; |
| 127 | sipr = POWER6_MMCRA_SIPR; | 127 | sipr = POWER6_MMCRA_SIPR; |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | /* PR has priority over HV, so order below is important */ | 130 | /* PR has priority over HV, so order below is important */ |
| 131 | if (mmcra & sipr) | 131 | if (mmcra & sipr) |
| 132 | return PERF_RECORD_MISC_USER; | 132 | return PERF_RECORD_MISC_USER; |
| 133 | if ((mmcra & sihv) && (freeze_events_kernel != MMCR0_FCHV)) | 133 | if ((mmcra & sihv) && (freeze_events_kernel != MMCR0_FCHV)) |
| 134 | return PERF_RECORD_MISC_HYPERVISOR; | 134 | return PERF_RECORD_MISC_HYPERVISOR; |
| 135 | return PERF_RECORD_MISC_KERNEL; | 135 | return PERF_RECORD_MISC_KERNEL; |
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | /* | 138 | /* |
| 139 | * Overload regs->dsisr to store MMCRA so we only need to read it once | 139 | * Overload regs->dsisr to store MMCRA so we only need to read it once |
| 140 | * on each interrupt. | 140 | * on each interrupt. |
| 141 | */ | 141 | */ |
| 142 | static inline void perf_read_regs(struct pt_regs *regs) | 142 | static inline void perf_read_regs(struct pt_regs *regs) |
| 143 | { | 143 | { |
| 144 | regs->dsisr = mfspr(SPRN_MMCRA); | 144 | regs->dsisr = mfspr(SPRN_MMCRA); |
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | /* | 147 | /* |
| 148 | * If interrupts were soft-disabled when a PMU interrupt occurs, treat | 148 | * If interrupts were soft-disabled when a PMU interrupt occurs, treat |
| 149 | * it as an NMI. | 149 | * it as an NMI. |
| 150 | */ | 150 | */ |
| 151 | static inline int perf_intr_is_nmi(struct pt_regs *regs) | 151 | static inline int perf_intr_is_nmi(struct pt_regs *regs) |
| 152 | { | 152 | { |
| 153 | return !regs->softe; | 153 | return !regs->softe; |
| 154 | } | 154 | } |
| 155 | 155 | ||
| 156 | #endif /* CONFIG_PPC64 */ | 156 | #endif /* CONFIG_PPC64 */ |
| 157 | 157 | ||
| 158 | static void perf_event_interrupt(struct pt_regs *regs); | 158 | static void perf_event_interrupt(struct pt_regs *regs); |
| 159 | 159 | ||
| 160 | void perf_event_print_debug(void) | 160 | void perf_event_print_debug(void) |
| 161 | { | 161 | { |
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | /* | 164 | /* |
| 165 | * Read one performance monitor counter (PMC). | 165 | * Read one performance monitor counter (PMC). |
| 166 | */ | 166 | */ |
| 167 | static unsigned long read_pmc(int idx) | 167 | static unsigned long read_pmc(int idx) |
| 168 | { | 168 | { |
| 169 | unsigned long val; | 169 | unsigned long val; |
| 170 | 170 | ||
| 171 | switch (idx) { | 171 | switch (idx) { |
| 172 | case 1: | 172 | case 1: |
| 173 | val = mfspr(SPRN_PMC1); | 173 | val = mfspr(SPRN_PMC1); |
| 174 | break; | 174 | break; |
| 175 | case 2: | 175 | case 2: |
| 176 | val = mfspr(SPRN_PMC2); | 176 | val = mfspr(SPRN_PMC2); |
| 177 | break; | 177 | break; |
| 178 | case 3: | 178 | case 3: |
| 179 | val = mfspr(SPRN_PMC3); | 179 | val = mfspr(SPRN_PMC3); |
| 180 | break; | 180 | break; |
| 181 | case 4: | 181 | case 4: |
| 182 | val = mfspr(SPRN_PMC4); | 182 | val = mfspr(SPRN_PMC4); |
| 183 | break; | 183 | break; |
| 184 | case 5: | 184 | case 5: |
| 185 | val = mfspr(SPRN_PMC5); | 185 | val = mfspr(SPRN_PMC5); |
| 186 | break; | 186 | break; |
| 187 | case 6: | 187 | case 6: |
| 188 | val = mfspr(SPRN_PMC6); | 188 | val = mfspr(SPRN_PMC6); |
| 189 | break; | 189 | break; |
| 190 | #ifdef CONFIG_PPC64 | 190 | #ifdef CONFIG_PPC64 |
| 191 | case 7: | 191 | case 7: |
| 192 | val = mfspr(SPRN_PMC7); | 192 | val = mfspr(SPRN_PMC7); |
| 193 | break; | 193 | break; |
| 194 | case 8: | 194 | case 8: |
| 195 | val = mfspr(SPRN_PMC8); | 195 | val = mfspr(SPRN_PMC8); |
| 196 | break; | 196 | break; |
| 197 | #endif /* CONFIG_PPC64 */ | 197 | #endif /* CONFIG_PPC64 */ |
| 198 | default: | 198 | default: |
| 199 | printk(KERN_ERR "oops trying to read PMC%d\n", idx); | 199 | printk(KERN_ERR "oops trying to read PMC%d\n", idx); |
| 200 | val = 0; | 200 | val = 0; |
| 201 | } | 201 | } |
| 202 | return val; | 202 | return val; |
| 203 | } | 203 | } |
| 204 | 204 | ||
| 205 | /* | 205 | /* |
| 206 | * Write one PMC. | 206 | * Write one PMC. |
| 207 | */ | 207 | */ |
| 208 | static void write_pmc(int idx, unsigned long val) | 208 | static void write_pmc(int idx, unsigned long val) |
| 209 | { | 209 | { |
| 210 | switch (idx) { | 210 | switch (idx) { |
| 211 | case 1: | 211 | case 1: |
| 212 | mtspr(SPRN_PMC1, val); | 212 | mtspr(SPRN_PMC1, val); |
| 213 | break; | 213 | break; |
| 214 | case 2: | 214 | case 2: |
| 215 | mtspr(SPRN_PMC2, val); | 215 | mtspr(SPRN_PMC2, val); |
| 216 | break; | 216 | break; |
| 217 | case 3: | 217 | case 3: |
| 218 | mtspr(SPRN_PMC3, val); | 218 | mtspr(SPRN_PMC3, val); |
| 219 | break; | 219 | break; |
| 220 | case 4: | 220 | case 4: |
| 221 | mtspr(SPRN_PMC4, val); | 221 | mtspr(SPRN_PMC4, val); |
| 222 | break; | 222 | break; |
| 223 | case 5: | 223 | case 5: |
| 224 | mtspr(SPRN_PMC5, val); | 224 | mtspr(SPRN_PMC5, val); |
| 225 | break; | 225 | break; |
| 226 | case 6: | 226 | case 6: |
| 227 | mtspr(SPRN_PMC6, val); | 227 | mtspr(SPRN_PMC6, val); |
| 228 | break; | 228 | break; |
| 229 | #ifdef CONFIG_PPC64 | 229 | #ifdef CONFIG_PPC64 |
| 230 | case 7: | 230 | case 7: |
| 231 | mtspr(SPRN_PMC7, val); | 231 | mtspr(SPRN_PMC7, val); |
| 232 | break; | 232 | break; |
| 233 | case 8: | 233 | case 8: |
| 234 | mtspr(SPRN_PMC8, val); | 234 | mtspr(SPRN_PMC8, val); |
| 235 | break; | 235 | break; |
| 236 | #endif /* CONFIG_PPC64 */ | 236 | #endif /* CONFIG_PPC64 */ |
| 237 | default: | 237 | default: |
| 238 | printk(KERN_ERR "oops trying to write PMC%d\n", idx); | 238 | printk(KERN_ERR "oops trying to write PMC%d\n", idx); |
| 239 | } | 239 | } |
| 240 | } | 240 | } |
| 241 | 241 | ||
| 242 | /* | 242 | /* |
| 243 | * Check if a set of events can all go on the PMU at once. | 243 | * Check if a set of events can all go on the PMU at once. |
| 244 | * If they can't, this will look at alternative codes for the events | 244 | * If they can't, this will look at alternative codes for the events |
| 245 | * and see if any combination of alternative codes is feasible. | 245 | * and see if any combination of alternative codes is feasible. |
| 246 | * The feasible set is returned in event_id[]. | 246 | * The feasible set is returned in event_id[]. |
| 247 | */ | 247 | */ |
| 248 | static int power_check_constraints(struct cpu_hw_events *cpuhw, | 248 | static int power_check_constraints(struct cpu_hw_events *cpuhw, |
| 249 | u64 event_id[], unsigned int cflags[], | 249 | u64 event_id[], unsigned int cflags[], |
| 250 | int n_ev) | 250 | int n_ev) |
| 251 | { | 251 | { |
| 252 | unsigned long mask, value, nv; | 252 | unsigned long mask, value, nv; |
| 253 | unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS]; | 253 | unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS]; |
| 254 | int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS]; | 254 | int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS]; |
| 255 | int i, j; | 255 | int i, j; |
| 256 | unsigned long addf = ppmu->add_fields; | 256 | unsigned long addf = ppmu->add_fields; |
| 257 | unsigned long tadd = ppmu->test_adder; | 257 | unsigned long tadd = ppmu->test_adder; |
| 258 | 258 | ||
| 259 | if (n_ev > ppmu->n_counter) | 259 | if (n_ev > ppmu->n_counter) |
| 260 | return -1; | 260 | return -1; |
| 261 | 261 | ||
| 262 | /* First see if the events will go on as-is */ | 262 | /* First see if the events will go on as-is */ |
| 263 | for (i = 0; i < n_ev; ++i) { | 263 | for (i = 0; i < n_ev; ++i) { |
| 264 | if ((cflags[i] & PPMU_LIMITED_PMC_REQD) | 264 | if ((cflags[i] & PPMU_LIMITED_PMC_REQD) |
| 265 | && !ppmu->limited_pmc_event(event_id[i])) { | 265 | && !ppmu->limited_pmc_event(event_id[i])) { |
| 266 | ppmu->get_alternatives(event_id[i], cflags[i], | 266 | ppmu->get_alternatives(event_id[i], cflags[i], |
| 267 | cpuhw->alternatives[i]); | 267 | cpuhw->alternatives[i]); |
| 268 | event_id[i] = cpuhw->alternatives[i][0]; | 268 | event_id[i] = cpuhw->alternatives[i][0]; |
| 269 | } | 269 | } |
| 270 | if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0], | 270 | if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0], |
| 271 | &cpuhw->avalues[i][0])) | 271 | &cpuhw->avalues[i][0])) |
| 272 | return -1; | 272 | return -1; |
| 273 | } | 273 | } |
| 274 | value = mask = 0; | 274 | value = mask = 0; |
| 275 | for (i = 0; i < n_ev; ++i) { | 275 | for (i = 0; i < n_ev; ++i) { |
| 276 | nv = (value | cpuhw->avalues[i][0]) + | 276 | nv = (value | cpuhw->avalues[i][0]) + |
| 277 | (value & cpuhw->avalues[i][0] & addf); | 277 | (value & cpuhw->avalues[i][0] & addf); |
| 278 | if ((((nv + tadd) ^ value) & mask) != 0 || | 278 | if ((((nv + tadd) ^ value) & mask) != 0 || |
| 279 | (((nv + tadd) ^ cpuhw->avalues[i][0]) & | 279 | (((nv + tadd) ^ cpuhw->avalues[i][0]) & |
| 280 | cpuhw->amasks[i][0]) != 0) | 280 | cpuhw->amasks[i][0]) != 0) |
| 281 | break; | 281 | break; |
| 282 | value = nv; | 282 | value = nv; |
| 283 | mask |= cpuhw->amasks[i][0]; | 283 | mask |= cpuhw->amasks[i][0]; |
| 284 | } | 284 | } |
| 285 | if (i == n_ev) | 285 | if (i == n_ev) |
| 286 | return 0; /* all OK */ | 286 | return 0; /* all OK */ |
| 287 | 287 | ||
| 288 | /* doesn't work, gather alternatives... */ | 288 | /* doesn't work, gather alternatives... */ |
| 289 | if (!ppmu->get_alternatives) | 289 | if (!ppmu->get_alternatives) |
| 290 | return -1; | 290 | return -1; |
| 291 | for (i = 0; i < n_ev; ++i) { | 291 | for (i = 0; i < n_ev; ++i) { |
| 292 | choice[i] = 0; | 292 | choice[i] = 0; |
| 293 | n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i], | 293 | n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i], |
| 294 | cpuhw->alternatives[i]); | 294 | cpuhw->alternatives[i]); |
| 295 | for (j = 1; j < n_alt[i]; ++j) | 295 | for (j = 1; j < n_alt[i]; ++j) |
| 296 | ppmu->get_constraint(cpuhw->alternatives[i][j], | 296 | ppmu->get_constraint(cpuhw->alternatives[i][j], |
| 297 | &cpuhw->amasks[i][j], | 297 | &cpuhw->amasks[i][j], |
| 298 | &cpuhw->avalues[i][j]); | 298 | &cpuhw->avalues[i][j]); |
| 299 | } | 299 | } |
| 300 | 300 | ||
| 301 | /* enumerate all possibilities and see if any will work */ | 301 | /* enumerate all possibilities and see if any will work */ |
| 302 | i = 0; | 302 | i = 0; |
| 303 | j = -1; | 303 | j = -1; |
| 304 | value = mask = nv = 0; | 304 | value = mask = nv = 0; |
| 305 | while (i < n_ev) { | 305 | while (i < n_ev) { |
| 306 | if (j >= 0) { | 306 | if (j >= 0) { |
| 307 | /* we're backtracking, restore context */ | 307 | /* we're backtracking, restore context */ |
| 308 | value = svalues[i]; | 308 | value = svalues[i]; |
| 309 | mask = smasks[i]; | 309 | mask = smasks[i]; |
| 310 | j = choice[i]; | 310 | j = choice[i]; |
| 311 | } | 311 | } |
| 312 | /* | 312 | /* |
| 313 | * See if any alternative k for event_id i, | 313 | * See if any alternative k for event_id i, |
| 314 | * where k > j, will satisfy the constraints. | 314 | * where k > j, will satisfy the constraints. |
| 315 | */ | 315 | */ |
| 316 | while (++j < n_alt[i]) { | 316 | while (++j < n_alt[i]) { |
| 317 | nv = (value | cpuhw->avalues[i][j]) + | 317 | nv = (value | cpuhw->avalues[i][j]) + |
| 318 | (value & cpuhw->avalues[i][j] & addf); | 318 | (value & cpuhw->avalues[i][j] & addf); |
| 319 | if ((((nv + tadd) ^ value) & mask) == 0 && | 319 | if ((((nv + tadd) ^ value) & mask) == 0 && |
| 320 | (((nv + tadd) ^ cpuhw->avalues[i][j]) | 320 | (((nv + tadd) ^ cpuhw->avalues[i][j]) |
| 321 | & cpuhw->amasks[i][j]) == 0) | 321 | & cpuhw->amasks[i][j]) == 0) |
| 322 | break; | 322 | break; |
| 323 | } | 323 | } |
| 324 | if (j >= n_alt[i]) { | 324 | if (j >= n_alt[i]) { |
| 325 | /* | 325 | /* |
| 326 | * No feasible alternative, backtrack | 326 | * No feasible alternative, backtrack |
| 327 | * to event_id i-1 and continue enumerating its | 327 | * to event_id i-1 and continue enumerating its |
| 328 | * alternatives from where we got up to. | 328 | * alternatives from where we got up to. |
| 329 | */ | 329 | */ |
| 330 | if (--i < 0) | 330 | if (--i < 0) |
| 331 | return -1; | 331 | return -1; |
| 332 | } else { | 332 | } else { |
| 333 | /* | 333 | /* |
| 334 | * Found a feasible alternative for event_id i, | 334 | * Found a feasible alternative for event_id i, |
| 335 | * remember where we got up to with this event_id, | 335 | * remember where we got up to with this event_id, |
| 336 | * go on to the next event_id, and start with | 336 | * go on to the next event_id, and start with |
| 337 | * the first alternative for it. | 337 | * the first alternative for it. |
| 338 | */ | 338 | */ |
| 339 | choice[i] = j; | 339 | choice[i] = j; |
| 340 | svalues[i] = value; | 340 | svalues[i] = value; |
| 341 | smasks[i] = mask; | 341 | smasks[i] = mask; |
| 342 | value = nv; | 342 | value = nv; |
| 343 | mask |= cpuhw->amasks[i][j]; | 343 | mask |= cpuhw->amasks[i][j]; |
| 344 | ++i; | 344 | ++i; |
| 345 | j = -1; | 345 | j = -1; |
| 346 | } | 346 | } |
| 347 | } | 347 | } |
| 348 | 348 | ||
| 349 | /* OK, we have a feasible combination, tell the caller the solution */ | 349 | /* OK, we have a feasible combination, tell the caller the solution */ |
| 350 | for (i = 0; i < n_ev; ++i) | 350 | for (i = 0; i < n_ev; ++i) |
| 351 | event_id[i] = cpuhw->alternatives[i][choice[i]]; | 351 | event_id[i] = cpuhw->alternatives[i][choice[i]]; |
| 352 | return 0; | 352 | return 0; |
| 353 | } | 353 | } |
| 354 | 354 | ||
| 355 | /* | 355 | /* |
| 356 | * Check if newly-added events have consistent settings for | 356 | * Check if newly-added events have consistent settings for |
| 357 | * exclude_{user,kernel,hv} with each other and any previously | 357 | * exclude_{user,kernel,hv} with each other and any previously |
| 358 | * added events. | 358 | * added events. |
| 359 | */ | 359 | */ |
| 360 | static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], | 360 | static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], |
| 361 | int n_prev, int n_new) | 361 | int n_prev, int n_new) |
| 362 | { | 362 | { |
| 363 | int eu = 0, ek = 0, eh = 0; | 363 | int eu = 0, ek = 0, eh = 0; |
| 364 | int i, n, first; | 364 | int i, n, first; |
| 365 | struct perf_event *event; | 365 | struct perf_event *event; |
| 366 | 366 | ||
| 367 | n = n_prev + n_new; | 367 | n = n_prev + n_new; |
| 368 | if (n <= 1) | 368 | if (n <= 1) |
| 369 | return 0; | 369 | return 0; |
| 370 | 370 | ||
| 371 | first = 1; | 371 | first = 1; |
| 372 | for (i = 0; i < n; ++i) { | 372 | for (i = 0; i < n; ++i) { |
| 373 | if (cflags[i] & PPMU_LIMITED_PMC_OK) { | 373 | if (cflags[i] & PPMU_LIMITED_PMC_OK) { |
| 374 | cflags[i] &= ~PPMU_LIMITED_PMC_REQD; | 374 | cflags[i] &= ~PPMU_LIMITED_PMC_REQD; |
| 375 | continue; | 375 | continue; |
| 376 | } | 376 | } |
| 377 | event = ctrs[i]; | 377 | event = ctrs[i]; |
| 378 | if (first) { | 378 | if (first) { |
| 379 | eu = event->attr.exclude_user; | 379 | eu = event->attr.exclude_user; |
| 380 | ek = event->attr.exclude_kernel; | 380 | ek = event->attr.exclude_kernel; |
| 381 | eh = event->attr.exclude_hv; | 381 | eh = event->attr.exclude_hv; |
| 382 | first = 0; | 382 | first = 0; |
| 383 | } else if (event->attr.exclude_user != eu || | 383 | } else if (event->attr.exclude_user != eu || |
| 384 | event->attr.exclude_kernel != ek || | 384 | event->attr.exclude_kernel != ek || |
| 385 | event->attr.exclude_hv != eh) { | 385 | event->attr.exclude_hv != eh) { |
| 386 | return -EAGAIN; | 386 | return -EAGAIN; |
| 387 | } | 387 | } |
| 388 | } | 388 | } |
| 389 | 389 | ||
| 390 | if (eu || ek || eh) | 390 | if (eu || ek || eh) |
| 391 | for (i = 0; i < n; ++i) | 391 | for (i = 0; i < n; ++i) |
| 392 | if (cflags[i] & PPMU_LIMITED_PMC_OK) | 392 | if (cflags[i] & PPMU_LIMITED_PMC_OK) |
| 393 | cflags[i] |= PPMU_LIMITED_PMC_REQD; | 393 | cflags[i] |= PPMU_LIMITED_PMC_REQD; |
| 394 | 394 | ||
| 395 | return 0; | 395 | return 0; |
| 396 | } | 396 | } |
| 397 | 397 | ||
| 398 | static void power_pmu_read(struct perf_event *event) | 398 | static void power_pmu_read(struct perf_event *event) |
| 399 | { | 399 | { |
| 400 | s64 val, delta, prev; | 400 | s64 val, delta, prev; |
| 401 | 401 | ||
| 402 | if (!event->hw.idx) | 402 | if (!event->hw.idx) |
| 403 | return; | 403 | return; |
| 404 | /* | 404 | /* |
| 405 | * Performance monitor interrupts come even when interrupts | 405 | * Performance monitor interrupts come even when interrupts |
| 406 | * are soft-disabled, as long as interrupts are hard-enabled. | 406 | * are soft-disabled, as long as interrupts are hard-enabled. |
| 407 | * Therefore we treat them like NMIs. | 407 | * Therefore we treat them like NMIs. |
| 408 | */ | 408 | */ |
| 409 | do { | 409 | do { |
| 410 | prev = atomic64_read(&event->hw.prev_count); | 410 | prev = atomic64_read(&event->hw.prev_count); |
| 411 | barrier(); | 411 | barrier(); |
| 412 | val = read_pmc(event->hw.idx); | 412 | val = read_pmc(event->hw.idx); |
| 413 | } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev); | 413 | } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev); |
| 414 | 414 | ||
| 415 | /* The counters are only 32 bits wide */ | 415 | /* The counters are only 32 bits wide */ |
| 416 | delta = (val - prev) & 0xfffffffful; | 416 | delta = (val - prev) & 0xfffffffful; |
| 417 | atomic64_add(delta, &event->count); | 417 | atomic64_add(delta, &event->count); |
| 418 | atomic64_sub(delta, &event->hw.period_left); | 418 | atomic64_sub(delta, &event->hw.period_left); |
| 419 | } | 419 | } |
| 420 | 420 | ||
| 421 | /* | 421 | /* |
| 422 | * On some machines, PMC5 and PMC6 can't be written, don't respect | 422 | * On some machines, PMC5 and PMC6 can't be written, don't respect |
| 423 | * the freeze conditions, and don't generate interrupts. This tells | 423 | * the freeze conditions, and don't generate interrupts. This tells |
| 424 | * us if `event' is using such a PMC. | 424 | * us if `event' is using such a PMC. |
| 425 | */ | 425 | */ |
| 426 | static int is_limited_pmc(int pmcnum) | 426 | static int is_limited_pmc(int pmcnum) |
| 427 | { | 427 | { |
| 428 | return (ppmu->flags & PPMU_LIMITED_PMC5_6) | 428 | return (ppmu->flags & PPMU_LIMITED_PMC5_6) |
| 429 | && (pmcnum == 5 || pmcnum == 6); | 429 | && (pmcnum == 5 || pmcnum == 6); |
| 430 | } | 430 | } |
| 431 | 431 | ||
| 432 | static void freeze_limited_counters(struct cpu_hw_events *cpuhw, | 432 | static void freeze_limited_counters(struct cpu_hw_events *cpuhw, |
| 433 | unsigned long pmc5, unsigned long pmc6) | 433 | unsigned long pmc5, unsigned long pmc6) |
| 434 | { | 434 | { |
| 435 | struct perf_event *event; | 435 | struct perf_event *event; |
| 436 | u64 val, prev, delta; | 436 | u64 val, prev, delta; |
| 437 | int i; | 437 | int i; |
| 438 | 438 | ||
| 439 | for (i = 0; i < cpuhw->n_limited; ++i) { | 439 | for (i = 0; i < cpuhw->n_limited; ++i) { |
| 440 | event = cpuhw->limited_counter[i]; | 440 | event = cpuhw->limited_counter[i]; |
| 441 | if (!event->hw.idx) | 441 | if (!event->hw.idx) |
| 442 | continue; | 442 | continue; |
| 443 | val = (event->hw.idx == 5) ? pmc5 : pmc6; | 443 | val = (event->hw.idx == 5) ? pmc5 : pmc6; |
| 444 | prev = atomic64_read(&event->hw.prev_count); | 444 | prev = atomic64_read(&event->hw.prev_count); |
| 445 | event->hw.idx = 0; | 445 | event->hw.idx = 0; |
| 446 | delta = (val - prev) & 0xfffffffful; | 446 | delta = (val - prev) & 0xfffffffful; |
| 447 | atomic64_add(delta, &event->count); | 447 | atomic64_add(delta, &event->count); |
| 448 | } | 448 | } |
| 449 | } | 449 | } |
| 450 | 450 | ||
| 451 | static void thaw_limited_counters(struct cpu_hw_events *cpuhw, | 451 | static void thaw_limited_counters(struct cpu_hw_events *cpuhw, |
| 452 | unsigned long pmc5, unsigned long pmc6) | 452 | unsigned long pmc5, unsigned long pmc6) |
| 453 | { | 453 | { |
| 454 | struct perf_event *event; | 454 | struct perf_event *event; |
| 455 | u64 val; | 455 | u64 val; |
| 456 | int i; | 456 | int i; |
| 457 | 457 | ||
| 458 | for (i = 0; i < cpuhw->n_limited; ++i) { | 458 | for (i = 0; i < cpuhw->n_limited; ++i) { |
| 459 | event = cpuhw->limited_counter[i]; | 459 | event = cpuhw->limited_counter[i]; |
| 460 | event->hw.idx = cpuhw->limited_hwidx[i]; | 460 | event->hw.idx = cpuhw->limited_hwidx[i]; |
| 461 | val = (event->hw.idx == 5) ? pmc5 : pmc6; | 461 | val = (event->hw.idx == 5) ? pmc5 : pmc6; |
| 462 | atomic64_set(&event->hw.prev_count, val); | 462 | atomic64_set(&event->hw.prev_count, val); |
| 463 | perf_event_update_userpage(event); | 463 | perf_event_update_userpage(event); |
| 464 | } | 464 | } |
| 465 | } | 465 | } |
| 466 | 466 | ||
| 467 | /* | 467 | /* |
| 468 | * Since limited events don't respect the freeze conditions, we | 468 | * Since limited events don't respect the freeze conditions, we |
| 469 | * have to read them immediately after freezing or unfreezing the | 469 | * have to read them immediately after freezing or unfreezing the |
| 470 | * other events. We try to keep the values from the limited | 470 | * other events. We try to keep the values from the limited |
| 471 | * events as consistent as possible by keeping the delay (in | 471 | * events as consistent as possible by keeping the delay (in |
| 472 | * cycles and instructions) between freezing/unfreezing and reading | 472 | * cycles and instructions) between freezing/unfreezing and reading |
| 473 | * the limited events as small and consistent as possible. | 473 | * the limited events as small and consistent as possible. |
| 474 | * Therefore, if any limited events are in use, we read them | 474 | * Therefore, if any limited events are in use, we read them |
| 475 | * both, and always in the same order, to minimize variability, | 475 | * both, and always in the same order, to minimize variability, |
| 476 | * and do it inside the same asm that writes MMCR0. | 476 | * and do it inside the same asm that writes MMCR0. |
| 477 | */ | 477 | */ |
| 478 | static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) | 478 | static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) |
| 479 | { | 479 | { |
| 480 | unsigned long pmc5, pmc6; | 480 | unsigned long pmc5, pmc6; |
| 481 | 481 | ||
| 482 | if (!cpuhw->n_limited) { | 482 | if (!cpuhw->n_limited) { |
| 483 | mtspr(SPRN_MMCR0, mmcr0); | 483 | mtspr(SPRN_MMCR0, mmcr0); |
| 484 | return; | 484 | return; |
| 485 | } | 485 | } |
| 486 | 486 | ||
| 487 | /* | 487 | /* |
| 488 | * Write MMCR0, then read PMC5 and PMC6 immediately. | 488 | * Write MMCR0, then read PMC5 and PMC6 immediately. |
| 489 | * To ensure we don't get a performance monitor interrupt | 489 | * To ensure we don't get a performance monitor interrupt |
| 490 | * between writing MMCR0 and freezing/thawing the limited | 490 | * between writing MMCR0 and freezing/thawing the limited |
| 491 | * events, we first write MMCR0 with the event overflow | 491 | * events, we first write MMCR0 with the event overflow |
| 492 | * interrupt enable bits turned off. | 492 | * interrupt enable bits turned off. |
| 493 | */ | 493 | */ |
| 494 | asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5" | 494 | asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5" |
| 495 | : "=&r" (pmc5), "=&r" (pmc6) | 495 | : "=&r" (pmc5), "=&r" (pmc6) |
| 496 | : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)), | 496 | : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)), |
| 497 | "i" (SPRN_MMCR0), | 497 | "i" (SPRN_MMCR0), |
| 498 | "i" (SPRN_PMC5), "i" (SPRN_PMC6)); | 498 | "i" (SPRN_PMC5), "i" (SPRN_PMC6)); |
| 499 | 499 | ||
| 500 | if (mmcr0 & MMCR0_FC) | 500 | if (mmcr0 & MMCR0_FC) |
| 501 | freeze_limited_counters(cpuhw, pmc5, pmc6); | 501 | freeze_limited_counters(cpuhw, pmc5, pmc6); |
| 502 | else | 502 | else |
| 503 | thaw_limited_counters(cpuhw, pmc5, pmc6); | 503 | thaw_limited_counters(cpuhw, pmc5, pmc6); |
| 504 | 504 | ||
| 505 | /* | 505 | /* |
| 506 | * Write the full MMCR0 including the event overflow interrupt | 506 | * Write the full MMCR0 including the event overflow interrupt |
| 507 | * enable bits, if necessary. | 507 | * enable bits, if necessary. |
| 508 | */ | 508 | */ |
| 509 | if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE)) | 509 | if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE)) |
| 510 | mtspr(SPRN_MMCR0, mmcr0); | 510 | mtspr(SPRN_MMCR0, mmcr0); |
| 511 | } | 511 | } |
| 512 | 512 | ||
| 513 | /* | 513 | /* |
| 514 | * Disable all events to prevent PMU interrupts and to allow | 514 | * Disable all events to prevent PMU interrupts and to allow |
| 515 | * events to be added or removed. | 515 | * events to be added or removed. |
| 516 | */ | 516 | */ |
| 517 | void hw_perf_disable(void) | 517 | void hw_perf_disable(void) |
| 518 | { | 518 | { |
| 519 | struct cpu_hw_events *cpuhw; | 519 | struct cpu_hw_events *cpuhw; |
| 520 | unsigned long flags; | 520 | unsigned long flags; |
| 521 | 521 | ||
| 522 | if (!ppmu) | 522 | if (!ppmu) |
| 523 | return; | 523 | return; |
| 524 | local_irq_save(flags); | 524 | local_irq_save(flags); |
| 525 | cpuhw = &__get_cpu_var(cpu_hw_events); | 525 | cpuhw = &__get_cpu_var(cpu_hw_events); |
| 526 | 526 | ||
| 527 | if (!cpuhw->disabled) { | 527 | if (!cpuhw->disabled) { |
| 528 | cpuhw->disabled = 1; | 528 | cpuhw->disabled = 1; |
| 529 | cpuhw->n_added = 0; | 529 | cpuhw->n_added = 0; |
| 530 | 530 | ||
| 531 | /* | 531 | /* |
| 532 | * Check if we ever enabled the PMU on this cpu. | 532 | * Check if we ever enabled the PMU on this cpu. |
| 533 | */ | 533 | */ |
| 534 | if (!cpuhw->pmcs_enabled) { | 534 | if (!cpuhw->pmcs_enabled) { |
| 535 | ppc_enable_pmcs(); | 535 | ppc_enable_pmcs(); |
| 536 | cpuhw->pmcs_enabled = 1; | 536 | cpuhw->pmcs_enabled = 1; |
| 537 | } | 537 | } |
| 538 | 538 | ||
| 539 | /* | 539 | /* |
| 540 | * Disable instruction sampling if it was enabled | 540 | * Disable instruction sampling if it was enabled |
| 541 | */ | 541 | */ |
| 542 | if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { | 542 | if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { |
| 543 | mtspr(SPRN_MMCRA, | 543 | mtspr(SPRN_MMCRA, |
| 544 | cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); | 544 | cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); |
| 545 | mb(); | 545 | mb(); |
| 546 | } | 546 | } |
| 547 | 547 | ||
| 548 | /* | 548 | /* |
| 549 | * Set the 'freeze counters' bit. | 549 | * Set the 'freeze counters' bit. |
| 550 | * The barrier is to make sure the mtspr has been | 550 | * The barrier is to make sure the mtspr has been |
| 551 | * executed and the PMU has frozen the events | 551 | * executed and the PMU has frozen the events |
| 552 | * before we return. | 552 | * before we return. |
| 553 | */ | 553 | */ |
| 554 | write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC); | 554 | write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC); |
| 555 | mb(); | 555 | mb(); |
| 556 | } | 556 | } |
| 557 | local_irq_restore(flags); | 557 | local_irq_restore(flags); |
| 558 | } | 558 | } |
| 559 | 559 | ||
| 560 | /* | 560 | /* |
| 561 | * Re-enable all events if disable == 0. | 561 | * Re-enable all events if disable == 0. |
| 562 | * If we were previously disabled and events were added, then | 562 | * If we were previously disabled and events were added, then |
| 563 | * put the new config on the PMU. | 563 | * put the new config on the PMU. |
| 564 | */ | 564 | */ |
| 565 | void hw_perf_enable(void) | 565 | void hw_perf_enable(void) |
| 566 | { | 566 | { |
| 567 | struct perf_event *event; | 567 | struct perf_event *event; |
| 568 | struct cpu_hw_events *cpuhw; | 568 | struct cpu_hw_events *cpuhw; |
| 569 | unsigned long flags; | 569 | unsigned long flags; |
| 570 | long i; | 570 | long i; |
| 571 | unsigned long val; | 571 | unsigned long val; |
| 572 | s64 left; | 572 | s64 left; |
| 573 | unsigned int hwc_index[MAX_HWEVENTS]; | 573 | unsigned int hwc_index[MAX_HWEVENTS]; |
| 574 | int n_lim; | 574 | int n_lim; |
| 575 | int idx; | 575 | int idx; |
| 576 | 576 | ||
| 577 | if (!ppmu) | 577 | if (!ppmu) |
| 578 | return; | 578 | return; |
| 579 | local_irq_save(flags); | 579 | local_irq_save(flags); |
| 580 | cpuhw = &__get_cpu_var(cpu_hw_events); | 580 | cpuhw = &__get_cpu_var(cpu_hw_events); |
| 581 | if (!cpuhw->disabled) { | 581 | if (!cpuhw->disabled) { |
| 582 | local_irq_restore(flags); | 582 | local_irq_restore(flags); |
| 583 | return; | 583 | return; |
| 584 | } | 584 | } |
| 585 | cpuhw->disabled = 0; | 585 | cpuhw->disabled = 0; |
| 586 | 586 | ||
| 587 | /* | 587 | /* |
| 588 | * If we didn't change anything, or only removed events, | 588 | * If we didn't change anything, or only removed events, |
| 589 | * no need to recalculate MMCR* settings and reset the PMCs. | 589 | * no need to recalculate MMCR* settings and reset the PMCs. |
| 590 | * Just reenable the PMU with the current MMCR* settings | 590 | * Just reenable the PMU with the current MMCR* settings |
| 591 | * (possibly updated for removal of events). | 591 | * (possibly updated for removal of events). |
| 592 | */ | 592 | */ |
| 593 | if (!cpuhw->n_added) { | 593 | if (!cpuhw->n_added) { |
| 594 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); | 594 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); |
| 595 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); | 595 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); |
| 596 | if (cpuhw->n_events == 0) | 596 | if (cpuhw->n_events == 0) |
| 597 | ppc_set_pmu_inuse(0); | 597 | ppc_set_pmu_inuse(0); |
| 598 | goto out_enable; | 598 | goto out_enable; |
| 599 | } | 599 | } |
| 600 | 600 | ||
| 601 | /* | 601 | /* |
| 602 | * Compute MMCR* values for the new set of events | 602 | * Compute MMCR* values for the new set of events |
| 603 | */ | 603 | */ |
| 604 | if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index, | 604 | if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index, |
| 605 | cpuhw->mmcr)) { | 605 | cpuhw->mmcr)) { |
| 606 | /* shouldn't ever get here */ | 606 | /* shouldn't ever get here */ |
| 607 | printk(KERN_ERR "oops compute_mmcr failed\n"); | 607 | printk(KERN_ERR "oops compute_mmcr failed\n"); |
| 608 | goto out; | 608 | goto out; |
| 609 | } | 609 | } |
| 610 | 610 | ||
| 611 | /* | 611 | /* |
| 612 | * Add in MMCR0 freeze bits corresponding to the | 612 | * Add in MMCR0 freeze bits corresponding to the |
| 613 | * attr.exclude_* bits for the first event. | 613 | * attr.exclude_* bits for the first event. |
| 614 | * We have already checked that all events have the | 614 | * We have already checked that all events have the |
| 615 | * same values for these bits as the first event. | 615 | * same values for these bits as the first event. |
| 616 | */ | 616 | */ |
| 617 | event = cpuhw->event[0]; | 617 | event = cpuhw->event[0]; |
| 618 | if (event->attr.exclude_user) | 618 | if (event->attr.exclude_user) |
| 619 | cpuhw->mmcr[0] |= MMCR0_FCP; | 619 | cpuhw->mmcr[0] |= MMCR0_FCP; |
| 620 | if (event->attr.exclude_kernel) | 620 | if (event->attr.exclude_kernel) |
| 621 | cpuhw->mmcr[0] |= freeze_events_kernel; | 621 | cpuhw->mmcr[0] |= freeze_events_kernel; |
| 622 | if (event->attr.exclude_hv) | 622 | if (event->attr.exclude_hv) |
| 623 | cpuhw->mmcr[0] |= MMCR0_FCHV; | 623 | cpuhw->mmcr[0] |= MMCR0_FCHV; |
| 624 | 624 | ||
| 625 | /* | 625 | /* |
| 626 | * Write the new configuration to MMCR* with the freeze | 626 | * Write the new configuration to MMCR* with the freeze |
| 627 | * bit set and set the hardware events to their initial values. | 627 | * bit set and set the hardware events to their initial values. |
| 628 | * Then unfreeze the events. | 628 | * Then unfreeze the events. |
| 629 | */ | 629 | */ |
| 630 | ppc_set_pmu_inuse(1); | 630 | ppc_set_pmu_inuse(1); |
| 631 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); | 631 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); |
| 632 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); | 632 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); |
| 633 | mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) | 633 | mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) |
| 634 | | MMCR0_FC); | 634 | | MMCR0_FC); |
| 635 | 635 | ||
| 636 | /* | 636 | /* |
| 637 | * Read off any pre-existing events that need to move | 637 | * Read off any pre-existing events that need to move |
| 638 | * to another PMC. | 638 | * to another PMC. |
| 639 | */ | 639 | */ |
| 640 | for (i = 0; i < cpuhw->n_events; ++i) { | 640 | for (i = 0; i < cpuhw->n_events; ++i) { |
| 641 | event = cpuhw->event[i]; | 641 | event = cpuhw->event[i]; |
| 642 | if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) { | 642 | if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) { |
| 643 | power_pmu_read(event); | 643 | power_pmu_read(event); |
| 644 | write_pmc(event->hw.idx, 0); | 644 | write_pmc(event->hw.idx, 0); |
| 645 | event->hw.idx = 0; | 645 | event->hw.idx = 0; |
| 646 | } | 646 | } |
| 647 | } | 647 | } |
| 648 | 648 | ||
| 649 | /* | 649 | /* |
| 650 | * Initialize the PMCs for all the new and moved events. | 650 | * Initialize the PMCs for all the new and moved events. |
| 651 | */ | 651 | */ |
| 652 | cpuhw->n_limited = n_lim = 0; | 652 | cpuhw->n_limited = n_lim = 0; |
| 653 | for (i = 0; i < cpuhw->n_events; ++i) { | 653 | for (i = 0; i < cpuhw->n_events; ++i) { |
| 654 | event = cpuhw->event[i]; | 654 | event = cpuhw->event[i]; |
| 655 | if (event->hw.idx) | 655 | if (event->hw.idx) |
| 656 | continue; | 656 | continue; |
| 657 | idx = hwc_index[i] + 1; | 657 | idx = hwc_index[i] + 1; |
| 658 | if (is_limited_pmc(idx)) { | 658 | if (is_limited_pmc(idx)) { |
| 659 | cpuhw->limited_counter[n_lim] = event; | 659 | cpuhw->limited_counter[n_lim] = event; |
| 660 | cpuhw->limited_hwidx[n_lim] = idx; | 660 | cpuhw->limited_hwidx[n_lim] = idx; |
| 661 | ++n_lim; | 661 | ++n_lim; |
| 662 | continue; | 662 | continue; |
| 663 | } | 663 | } |
| 664 | val = 0; | 664 | val = 0; |
| 665 | if (event->hw.sample_period) { | 665 | if (event->hw.sample_period) { |
| 666 | left = atomic64_read(&event->hw.period_left); | 666 | left = atomic64_read(&event->hw.period_left); |
| 667 | if (left < 0x80000000L) | 667 | if (left < 0x80000000L) |
| 668 | val = 0x80000000L - left; | 668 | val = 0x80000000L - left; |
| 669 | } | 669 | } |
| 670 | atomic64_set(&event->hw.prev_count, val); | 670 | atomic64_set(&event->hw.prev_count, val); |
| 671 | event->hw.idx = idx; | 671 | event->hw.idx = idx; |
| 672 | write_pmc(idx, val); | 672 | write_pmc(idx, val); |
| 673 | perf_event_update_userpage(event); | 673 | perf_event_update_userpage(event); |
| 674 | } | 674 | } |
| 675 | cpuhw->n_limited = n_lim; | 675 | cpuhw->n_limited = n_lim; |
| 676 | cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; | 676 | cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; |
| 677 | 677 | ||
| 678 | out_enable: | 678 | out_enable: |
| 679 | mb(); | 679 | mb(); |
| 680 | write_mmcr0(cpuhw, cpuhw->mmcr[0]); | 680 | write_mmcr0(cpuhw, cpuhw->mmcr[0]); |
| 681 | 681 | ||
| 682 | /* | 682 | /* |
| 683 | * Enable instruction sampling if necessary | 683 | * Enable instruction sampling if necessary |
| 684 | */ | 684 | */ |
| 685 | if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { | 685 | if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { |
| 686 | mb(); | 686 | mb(); |
| 687 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); | 687 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); |
| 688 | } | 688 | } |
| 689 | 689 | ||
| 690 | out: | 690 | out: |
| 691 | local_irq_restore(flags); | 691 | local_irq_restore(flags); |
| 692 | } | 692 | } |
| 693 | 693 | ||
| 694 | static int collect_events(struct perf_event *group, int max_count, | 694 | static int collect_events(struct perf_event *group, int max_count, |
| 695 | struct perf_event *ctrs[], u64 *events, | 695 | struct perf_event *ctrs[], u64 *events, |
| 696 | unsigned int *flags) | 696 | unsigned int *flags) |
| 697 | { | 697 | { |
| 698 | int n = 0; | 698 | int n = 0; |
| 699 | struct perf_event *event; | 699 | struct perf_event *event; |
| 700 | 700 | ||
| 701 | if (!is_software_event(group)) { | 701 | if (!is_software_event(group)) { |
| 702 | if (n >= max_count) | 702 | if (n >= max_count) |
| 703 | return -1; | 703 | return -1; |
| 704 | ctrs[n] = group; | 704 | ctrs[n] = group; |
| 705 | flags[n] = group->hw.event_base; | 705 | flags[n] = group->hw.event_base; |
| 706 | events[n++] = group->hw.config; | 706 | events[n++] = group->hw.config; |
| 707 | } | 707 | } |
| 708 | list_for_each_entry(event, &group->sibling_list, group_entry) { | 708 | list_for_each_entry(event, &group->sibling_list, group_entry) { |
| 709 | if (!is_software_event(event) && | 709 | if (!is_software_event(event) && |
| 710 | event->state != PERF_EVENT_STATE_OFF) { | 710 | event->state != PERF_EVENT_STATE_OFF) { |
| 711 | if (n >= max_count) | 711 | if (n >= max_count) |
| 712 | return -1; | 712 | return -1; |
| 713 | ctrs[n] = event; | 713 | ctrs[n] = event; |
| 714 | flags[n] = event->hw.event_base; | 714 | flags[n] = event->hw.event_base; |
| 715 | events[n++] = event->hw.config; | 715 | events[n++] = event->hw.config; |
| 716 | } | 716 | } |
| 717 | } | 717 | } |
| 718 | return n; | 718 | return n; |
| 719 | } | 719 | } |
| 720 | 720 | ||
| 721 | static void event_sched_in(struct perf_event *event, int cpu) | 721 | static void event_sched_in(struct perf_event *event, int cpu) |
| 722 | { | 722 | { |
| 723 | event->state = PERF_EVENT_STATE_ACTIVE; | 723 | event->state = PERF_EVENT_STATE_ACTIVE; |
| 724 | event->oncpu = cpu; | 724 | event->oncpu = cpu; |
| 725 | event->tstamp_running += event->ctx->time - event->tstamp_stopped; | 725 | event->tstamp_running += event->ctx->time - event->tstamp_stopped; |
| 726 | if (is_software_event(event)) | 726 | if (is_software_event(event)) |
| 727 | event->pmu->enable(event); | 727 | event->pmu->enable(event); |
| 728 | } | 728 | } |
| 729 | 729 | ||
| 730 | /* | 730 | /* |
| 731 | * Called to enable a whole group of events. | 731 | * Called to enable a whole group of events. |
| 732 | * Returns 1 if the group was enabled, or -EAGAIN if it could not be. | 732 | * Returns 1 if the group was enabled, or -EAGAIN if it could not be. |
| 733 | * Assumes the caller has disabled interrupts and has | 733 | * Assumes the caller has disabled interrupts and has |
| 734 | * frozen the PMU with hw_perf_save_disable. | 734 | * frozen the PMU with hw_perf_save_disable. |
| 735 | */ | 735 | */ |
| 736 | int hw_perf_group_sched_in(struct perf_event *group_leader, | 736 | int hw_perf_group_sched_in(struct perf_event *group_leader, |
| 737 | struct perf_cpu_context *cpuctx, | 737 | struct perf_cpu_context *cpuctx, |
| 738 | struct perf_event_context *ctx, int cpu) | 738 | struct perf_event_context *ctx, int cpu) |
| 739 | { | 739 | { |
| 740 | struct cpu_hw_events *cpuhw; | 740 | struct cpu_hw_events *cpuhw; |
| 741 | long i, n, n0; | 741 | long i, n, n0; |
| 742 | struct perf_event *sub; | 742 | struct perf_event *sub; |
| 743 | 743 | ||
| 744 | if (!ppmu) | 744 | if (!ppmu) |
| 745 | return 0; | 745 | return 0; |
| 746 | cpuhw = &__get_cpu_var(cpu_hw_events); | 746 | cpuhw = &__get_cpu_var(cpu_hw_events); |
| 747 | n0 = cpuhw->n_events; | 747 | n0 = cpuhw->n_events; |
| 748 | n = collect_events(group_leader, ppmu->n_counter - n0, | 748 | n = collect_events(group_leader, ppmu->n_counter - n0, |
| 749 | &cpuhw->event[n0], &cpuhw->events[n0], | 749 | &cpuhw->event[n0], &cpuhw->events[n0], |
| 750 | &cpuhw->flags[n0]); | 750 | &cpuhw->flags[n0]); |
| 751 | if (n < 0) | 751 | if (n < 0) |
| 752 | return -EAGAIN; | 752 | return -EAGAIN; |
| 753 | if (check_excludes(cpuhw->event, cpuhw->flags, n0, n)) | 753 | if (check_excludes(cpuhw->event, cpuhw->flags, n0, n)) |
| 754 | return -EAGAIN; | 754 | return -EAGAIN; |
| 755 | i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0); | 755 | i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0); |
| 756 | if (i < 0) | 756 | if (i < 0) |
| 757 | return -EAGAIN; | 757 | return -EAGAIN; |
| 758 | cpuhw->n_events = n0 + n; | 758 | cpuhw->n_events = n0 + n; |
| 759 | cpuhw->n_added += n; | 759 | cpuhw->n_added += n; |
| 760 | 760 | ||
| 761 | /* | 761 | /* |
| 762 | * OK, this group can go on; update event states etc., | 762 | * OK, this group can go on; update event states etc., |
| 763 | * and enable any software events | 763 | * and enable any software events |
| 764 | */ | 764 | */ |
| 765 | for (i = n0; i < n0 + n; ++i) | 765 | for (i = n0; i < n0 + n; ++i) |
| 766 | cpuhw->event[i]->hw.config = cpuhw->events[i]; | 766 | cpuhw->event[i]->hw.config = cpuhw->events[i]; |
| 767 | cpuctx->active_oncpu += n; | 767 | cpuctx->active_oncpu += n; |
| 768 | n = 1; | 768 | n = 1; |
| 769 | event_sched_in(group_leader, cpu); | 769 | event_sched_in(group_leader, cpu); |
| 770 | list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { | 770 | list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { |
| 771 | if (sub->state != PERF_EVENT_STATE_OFF) { | 771 | if (sub->state != PERF_EVENT_STATE_OFF) { |
| 772 | event_sched_in(sub, cpu); | 772 | event_sched_in(sub, cpu); |
| 773 | ++n; | 773 | ++n; |
| 774 | } | 774 | } |
| 775 | } | 775 | } |
| 776 | ctx->nr_active += n; | 776 | ctx->nr_active += n; |
| 777 | 777 | ||
| 778 | return 1; | 778 | return 1; |
| 779 | } | 779 | } |
| 780 | 780 | ||
| 781 | /* | 781 | /* |
| 782 | * Add a event to the PMU. | 782 | * Add a event to the PMU. |
| 783 | * If all events are not already frozen, then we disable and | 783 | * If all events are not already frozen, then we disable and |
| 784 | * re-enable the PMU in order to get hw_perf_enable to do the | 784 | * re-enable the PMU in order to get hw_perf_enable to do the |
| 785 | * actual work of reconfiguring the PMU. | 785 | * actual work of reconfiguring the PMU. |
| 786 | */ | 786 | */ |
| 787 | static int power_pmu_enable(struct perf_event *event) | 787 | static int power_pmu_enable(struct perf_event *event) |
| 788 | { | 788 | { |
| 789 | struct cpu_hw_events *cpuhw; | 789 | struct cpu_hw_events *cpuhw; |
| 790 | unsigned long flags; | 790 | unsigned long flags; |
| 791 | int n0; | 791 | int n0; |
| 792 | int ret = -EAGAIN; | 792 | int ret = -EAGAIN; |
| 793 | 793 | ||
| 794 | local_irq_save(flags); | 794 | local_irq_save(flags); |
| 795 | perf_disable(); | 795 | perf_disable(); |
| 796 | 796 | ||
| 797 | /* | 797 | /* |
| 798 | * Add the event to the list (if there is room) | 798 | * Add the event to the list (if there is room) |
| 799 | * and check whether the total set is still feasible. | 799 | * and check whether the total set is still feasible. |
| 800 | */ | 800 | */ |
| 801 | cpuhw = &__get_cpu_var(cpu_hw_events); | 801 | cpuhw = &__get_cpu_var(cpu_hw_events); |
| 802 | n0 = cpuhw->n_events; | 802 | n0 = cpuhw->n_events; |
| 803 | if (n0 >= ppmu->n_counter) | 803 | if (n0 >= ppmu->n_counter) |
| 804 | goto out; | 804 | goto out; |
| 805 | cpuhw->event[n0] = event; | 805 | cpuhw->event[n0] = event; |
| 806 | cpuhw->events[n0] = event->hw.config; | 806 | cpuhw->events[n0] = event->hw.config; |
| 807 | cpuhw->flags[n0] = event->hw.event_base; | 807 | cpuhw->flags[n0] = event->hw.event_base; |
| 808 | if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) | 808 | if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) |
| 809 | goto out; | 809 | goto out; |
| 810 | if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) | 810 | if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) |
| 811 | goto out; | 811 | goto out; |
| 812 | 812 | ||
| 813 | event->hw.config = cpuhw->events[n0]; | 813 | event->hw.config = cpuhw->events[n0]; |
| 814 | ++cpuhw->n_events; | 814 | ++cpuhw->n_events; |
| 815 | ++cpuhw->n_added; | 815 | ++cpuhw->n_added; |
| 816 | 816 | ||
| 817 | ret = 0; | 817 | ret = 0; |
| 818 | out: | 818 | out: |
| 819 | perf_enable(); | 819 | perf_enable(); |
| 820 | local_irq_restore(flags); | 820 | local_irq_restore(flags); |
| 821 | return ret; | 821 | return ret; |
| 822 | } | 822 | } |
| 823 | 823 | ||
| 824 | /* | 824 | /* |
| 825 | * Remove a event from the PMU. | 825 | * Remove a event from the PMU. |
| 826 | */ | 826 | */ |
| 827 | static void power_pmu_disable(struct perf_event *event) | 827 | static void power_pmu_disable(struct perf_event *event) |
| 828 | { | 828 | { |
| 829 | struct cpu_hw_events *cpuhw; | 829 | struct cpu_hw_events *cpuhw; |
| 830 | long i; | 830 | long i; |
| 831 | unsigned long flags; | 831 | unsigned long flags; |
| 832 | 832 | ||
| 833 | local_irq_save(flags); | 833 | local_irq_save(flags); |
| 834 | perf_disable(); | 834 | perf_disable(); |
| 835 | 835 | ||
| 836 | power_pmu_read(event); | 836 | power_pmu_read(event); |
| 837 | 837 | ||
| 838 | cpuhw = &__get_cpu_var(cpu_hw_events); | 838 | cpuhw = &__get_cpu_var(cpu_hw_events); |
| 839 | for (i = 0; i < cpuhw->n_events; ++i) { | 839 | for (i = 0; i < cpuhw->n_events; ++i) { |
| 840 | if (event == cpuhw->event[i]) { | 840 | if (event == cpuhw->event[i]) { |
| 841 | while (++i < cpuhw->n_events) | 841 | while (++i < cpuhw->n_events) |
| 842 | cpuhw->event[i-1] = cpuhw->event[i]; | 842 | cpuhw->event[i-1] = cpuhw->event[i]; |
| 843 | --cpuhw->n_events; | 843 | --cpuhw->n_events; |
| 844 | ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); | 844 | ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); |
| 845 | if (event->hw.idx) { | 845 | if (event->hw.idx) { |
| 846 | write_pmc(event->hw.idx, 0); | 846 | write_pmc(event->hw.idx, 0); |
| 847 | event->hw.idx = 0; | 847 | event->hw.idx = 0; |
| 848 | } | 848 | } |
| 849 | perf_event_update_userpage(event); | 849 | perf_event_update_userpage(event); |
| 850 | break; | 850 | break; |
| 851 | } | 851 | } |
| 852 | } | 852 | } |
| 853 | for (i = 0; i < cpuhw->n_limited; ++i) | 853 | for (i = 0; i < cpuhw->n_limited; ++i) |
| 854 | if (event == cpuhw->limited_counter[i]) | 854 | if (event == cpuhw->limited_counter[i]) |
| 855 | break; | 855 | break; |
| 856 | if (i < cpuhw->n_limited) { | 856 | if (i < cpuhw->n_limited) { |
| 857 | while (++i < cpuhw->n_limited) { | 857 | while (++i < cpuhw->n_limited) { |
| 858 | cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i]; | 858 | cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i]; |
| 859 | cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; | 859 | cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; |
| 860 | } | 860 | } |
| 861 | --cpuhw->n_limited; | 861 | --cpuhw->n_limited; |
| 862 | } | 862 | } |
| 863 | if (cpuhw->n_events == 0) { | 863 | if (cpuhw->n_events == 0) { |
| 864 | /* disable exceptions if no events are running */ | 864 | /* disable exceptions if no events are running */ |
| 865 | cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); | 865 | cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); |
| 866 | } | 866 | } |
| 867 | 867 | ||
| 868 | perf_enable(); | 868 | perf_enable(); |
| 869 | local_irq_restore(flags); | 869 | local_irq_restore(flags); |
| 870 | } | 870 | } |
| 871 | 871 | ||
| 872 | /* | 872 | /* |
| 873 | * Re-enable interrupts on a event after they were throttled | 873 | * Re-enable interrupts on a event after they were throttled |
| 874 | * because they were coming too fast. | 874 | * because they were coming too fast. |
| 875 | */ | 875 | */ |
| 876 | static void power_pmu_unthrottle(struct perf_event *event) | 876 | static void power_pmu_unthrottle(struct perf_event *event) |
| 877 | { | 877 | { |
| 878 | s64 val, left; | 878 | s64 val, left; |
| 879 | unsigned long flags; | 879 | unsigned long flags; |
| 880 | 880 | ||
| 881 | if (!event->hw.idx || !event->hw.sample_period) | 881 | if (!event->hw.idx || !event->hw.sample_period) |
| 882 | return; | 882 | return; |
| 883 | local_irq_save(flags); | 883 | local_irq_save(flags); |
| 884 | perf_disable(); | 884 | perf_disable(); |
| 885 | power_pmu_read(event); | 885 | power_pmu_read(event); |
| 886 | left = event->hw.sample_period; | 886 | left = event->hw.sample_period; |
| 887 | event->hw.last_period = left; | 887 | event->hw.last_period = left; |
| 888 | val = 0; | 888 | val = 0; |
| 889 | if (left < 0x80000000L) | 889 | if (left < 0x80000000L) |
| 890 | val = 0x80000000L - left; | 890 | val = 0x80000000L - left; |
| 891 | write_pmc(event->hw.idx, val); | 891 | write_pmc(event->hw.idx, val); |
| 892 | atomic64_set(&event->hw.prev_count, val); | 892 | atomic64_set(&event->hw.prev_count, val); |
| 893 | atomic64_set(&event->hw.period_left, left); | 893 | atomic64_set(&event->hw.period_left, left); |
| 894 | perf_event_update_userpage(event); | 894 | perf_event_update_userpage(event); |
| 895 | perf_enable(); | 895 | perf_enable(); |
| 896 | local_irq_restore(flags); | 896 | local_irq_restore(flags); |
| 897 | } | 897 | } |
| 898 | 898 | ||
| 899 | struct pmu power_pmu = { | 899 | struct pmu power_pmu = { |
| 900 | .enable = power_pmu_enable, | 900 | .enable = power_pmu_enable, |
| 901 | .disable = power_pmu_disable, | 901 | .disable = power_pmu_disable, |
| 902 | .read = power_pmu_read, | 902 | .read = power_pmu_read, |
| 903 | .unthrottle = power_pmu_unthrottle, | 903 | .unthrottle = power_pmu_unthrottle, |
| 904 | }; | 904 | }; |
| 905 | 905 | ||
| 906 | /* | 906 | /* |
| 907 | * Return 1 if we might be able to put event on a limited PMC, | 907 | * Return 1 if we might be able to put event on a limited PMC, |
| 908 | * or 0 if not. | 908 | * or 0 if not. |
| 909 | * A event can only go on a limited PMC if it counts something | 909 | * A event can only go on a limited PMC if it counts something |
| 910 | * that a limited PMC can count, doesn't require interrupts, and | 910 | * that a limited PMC can count, doesn't require interrupts, and |
| 911 | * doesn't exclude any processor mode. | 911 | * doesn't exclude any processor mode. |
| 912 | */ | 912 | */ |
| 913 | static int can_go_on_limited_pmc(struct perf_event *event, u64 ev, | 913 | static int can_go_on_limited_pmc(struct perf_event *event, u64 ev, |
| 914 | unsigned int flags) | 914 | unsigned int flags) |
| 915 | { | 915 | { |
| 916 | int n; | 916 | int n; |
| 917 | u64 alt[MAX_EVENT_ALTERNATIVES]; | 917 | u64 alt[MAX_EVENT_ALTERNATIVES]; |
| 918 | 918 | ||
| 919 | if (event->attr.exclude_user | 919 | if (event->attr.exclude_user |
| 920 | || event->attr.exclude_kernel | 920 | || event->attr.exclude_kernel |
| 921 | || event->attr.exclude_hv | 921 | || event->attr.exclude_hv |
| 922 | || event->attr.sample_period) | 922 | || event->attr.sample_period) |
| 923 | return 0; | 923 | return 0; |
| 924 | 924 | ||
| 925 | if (ppmu->limited_pmc_event(ev)) | 925 | if (ppmu->limited_pmc_event(ev)) |
| 926 | return 1; | 926 | return 1; |
| 927 | 927 | ||
| 928 | /* | 928 | /* |
| 929 | * The requested event_id isn't on a limited PMC already; | 929 | * The requested event_id isn't on a limited PMC already; |
| 930 | * see if any alternative code goes on a limited PMC. | 930 | * see if any alternative code goes on a limited PMC. |
| 931 | */ | 931 | */ |
| 932 | if (!ppmu->get_alternatives) | 932 | if (!ppmu->get_alternatives) |
| 933 | return 0; | 933 | return 0; |
| 934 | 934 | ||
| 935 | flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD; | 935 | flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD; |
| 936 | n = ppmu->get_alternatives(ev, flags, alt); | 936 | n = ppmu->get_alternatives(ev, flags, alt); |
| 937 | 937 | ||
| 938 | return n > 0; | 938 | return n > 0; |
| 939 | } | 939 | } |
| 940 | 940 | ||
| 941 | /* | 941 | /* |
| 942 | * Find an alternative event_id that goes on a normal PMC, if possible, | 942 | * Find an alternative event_id that goes on a normal PMC, if possible, |
| 943 | * and return the event_id code, or 0 if there is no such alternative. | 943 | * and return the event_id code, or 0 if there is no such alternative. |
| 944 | * (Note: event_id code 0 is "don't count" on all machines.) | 944 | * (Note: event_id code 0 is "don't count" on all machines.) |
| 945 | */ | 945 | */ |
| 946 | static u64 normal_pmc_alternative(u64 ev, unsigned long flags) | 946 | static u64 normal_pmc_alternative(u64 ev, unsigned long flags) |
| 947 | { | 947 | { |
| 948 | u64 alt[MAX_EVENT_ALTERNATIVES]; | 948 | u64 alt[MAX_EVENT_ALTERNATIVES]; |
| 949 | int n; | 949 | int n; |
| 950 | 950 | ||
| 951 | flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD); | 951 | flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD); |
| 952 | n = ppmu->get_alternatives(ev, flags, alt); | 952 | n = ppmu->get_alternatives(ev, flags, alt); |
| 953 | if (!n) | 953 | if (!n) |
| 954 | return 0; | 954 | return 0; |
| 955 | return alt[0]; | 955 | return alt[0]; |
| 956 | } | 956 | } |
| 957 | 957 | ||
| 958 | /* Number of perf_events counting hardware events */ | 958 | /* Number of perf_events counting hardware events */ |
| 959 | static atomic_t num_events; | 959 | static atomic_t num_events; |
| 960 | /* Used to avoid races in calling reserve/release_pmc_hardware */ | 960 | /* Used to avoid races in calling reserve/release_pmc_hardware */ |
| 961 | static DEFINE_MUTEX(pmc_reserve_mutex); | 961 | static DEFINE_MUTEX(pmc_reserve_mutex); |
| 962 | 962 | ||
| 963 | /* | 963 | /* |
| 964 | * Release the PMU if this is the last perf_event. | 964 | * Release the PMU if this is the last perf_event. |
| 965 | */ | 965 | */ |
| 966 | static void hw_perf_event_destroy(struct perf_event *event) | 966 | static void hw_perf_event_destroy(struct perf_event *event) |
| 967 | { | 967 | { |
| 968 | if (!atomic_add_unless(&num_events, -1, 1)) { | 968 | if (!atomic_add_unless(&num_events, -1, 1)) { |
| 969 | mutex_lock(&pmc_reserve_mutex); | 969 | mutex_lock(&pmc_reserve_mutex); |
| 970 | if (atomic_dec_return(&num_events) == 0) | 970 | if (atomic_dec_return(&num_events) == 0) |
| 971 | release_pmc_hardware(); | 971 | release_pmc_hardware(); |
| 972 | mutex_unlock(&pmc_reserve_mutex); | 972 | mutex_unlock(&pmc_reserve_mutex); |
| 973 | } | 973 | } |
| 974 | } | 974 | } |
| 975 | 975 | ||
| 976 | /* | 976 | /* |
| 977 | * Translate a generic cache event_id config to a raw event_id code. | 977 | * Translate a generic cache event_id config to a raw event_id code. |
| 978 | */ | 978 | */ |
| 979 | static int hw_perf_cache_event(u64 config, u64 *eventp) | 979 | static int hw_perf_cache_event(u64 config, u64 *eventp) |
| 980 | { | 980 | { |
| 981 | unsigned long type, op, result; | 981 | unsigned long type, op, result; |
| 982 | int ev; | 982 | int ev; |
| 983 | 983 | ||
| 984 | if (!ppmu->cache_events) | 984 | if (!ppmu->cache_events) |
| 985 | return -EINVAL; | 985 | return -EINVAL; |
| 986 | 986 | ||
| 987 | /* unpack config */ | 987 | /* unpack config */ |
| 988 | type = config & 0xff; | 988 | type = config & 0xff; |
| 989 | op = (config >> 8) & 0xff; | 989 | op = (config >> 8) & 0xff; |
| 990 | result = (config >> 16) & 0xff; | 990 | result = (config >> 16) & 0xff; |
| 991 | 991 | ||
| 992 | if (type >= PERF_COUNT_HW_CACHE_MAX || | 992 | if (type >= PERF_COUNT_HW_CACHE_MAX || |
| 993 | op >= PERF_COUNT_HW_CACHE_OP_MAX || | 993 | op >= PERF_COUNT_HW_CACHE_OP_MAX || |
| 994 | result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | 994 | result >= PERF_COUNT_HW_CACHE_RESULT_MAX) |
| 995 | return -EINVAL; | 995 | return -EINVAL; |
| 996 | 996 | ||
| 997 | ev = (*ppmu->cache_events)[type][op][result]; | 997 | ev = (*ppmu->cache_events)[type][op][result]; |
| 998 | if (ev == 0) | 998 | if (ev == 0) |
| 999 | return -EOPNOTSUPP; | 999 | return -EOPNOTSUPP; |
| 1000 | if (ev == -1) | 1000 | if (ev == -1) |
| 1001 | return -EINVAL; | 1001 | return -EINVAL; |
| 1002 | *eventp = ev; | 1002 | *eventp = ev; |
| 1003 | return 0; | 1003 | return 0; |
| 1004 | } | 1004 | } |
| 1005 | 1005 | ||
| 1006 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 1006 | const struct pmu *hw_perf_event_init(struct perf_event *event) |
| 1007 | { | 1007 | { |
| 1008 | u64 ev; | 1008 | u64 ev; |
| 1009 | unsigned long flags; | 1009 | unsigned long flags; |
| 1010 | struct perf_event *ctrs[MAX_HWEVENTS]; | 1010 | struct perf_event *ctrs[MAX_HWEVENTS]; |
| 1011 | u64 events[MAX_HWEVENTS]; | 1011 | u64 events[MAX_HWEVENTS]; |
| 1012 | unsigned int cflags[MAX_HWEVENTS]; | 1012 | unsigned int cflags[MAX_HWEVENTS]; |
| 1013 | int n; | 1013 | int n; |
| 1014 | int err; | 1014 | int err; |
| 1015 | struct cpu_hw_events *cpuhw; | 1015 | struct cpu_hw_events *cpuhw; |
| 1016 | 1016 | ||
| 1017 | if (!ppmu) | 1017 | if (!ppmu) |
| 1018 | return ERR_PTR(-ENXIO); | 1018 | return ERR_PTR(-ENXIO); |
| 1019 | switch (event->attr.type) { | 1019 | switch (event->attr.type) { |
| 1020 | case PERF_TYPE_HARDWARE: | 1020 | case PERF_TYPE_HARDWARE: |
| 1021 | ev = event->attr.config; | 1021 | ev = event->attr.config; |
| 1022 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) | 1022 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) |
| 1023 | return ERR_PTR(-EOPNOTSUPP); | 1023 | return ERR_PTR(-EOPNOTSUPP); |
| 1024 | ev = ppmu->generic_events[ev]; | 1024 | ev = ppmu->generic_events[ev]; |
| 1025 | break; | 1025 | break; |
| 1026 | case PERF_TYPE_HW_CACHE: | 1026 | case PERF_TYPE_HW_CACHE: |
| 1027 | err = hw_perf_cache_event(event->attr.config, &ev); | 1027 | err = hw_perf_cache_event(event->attr.config, &ev); |
| 1028 | if (err) | 1028 | if (err) |
| 1029 | return ERR_PTR(err); | 1029 | return ERR_PTR(err); |
| 1030 | break; | 1030 | break; |
| 1031 | case PERF_TYPE_RAW: | 1031 | case PERF_TYPE_RAW: |
| 1032 | ev = event->attr.config; | 1032 | ev = event->attr.config; |
| 1033 | break; | 1033 | break; |
| 1034 | default: | 1034 | default: |
| 1035 | return ERR_PTR(-EINVAL); | 1035 | return ERR_PTR(-EINVAL); |
| 1036 | } | 1036 | } |
| 1037 | event->hw.config_base = ev; | 1037 | event->hw.config_base = ev; |
| 1038 | event->hw.idx = 0; | 1038 | event->hw.idx = 0; |
| 1039 | 1039 | ||
| 1040 | /* | 1040 | /* |
| 1041 | * If we are not running on a hypervisor, force the | 1041 | * If we are not running on a hypervisor, force the |
| 1042 | * exclude_hv bit to 0 so that we don't care what | 1042 | * exclude_hv bit to 0 so that we don't care what |
| 1043 | * the user set it to. | 1043 | * the user set it to. |
| 1044 | */ | 1044 | */ |
| 1045 | if (!firmware_has_feature(FW_FEATURE_LPAR)) | 1045 | if (!firmware_has_feature(FW_FEATURE_LPAR)) |
| 1046 | event->attr.exclude_hv = 0; | 1046 | event->attr.exclude_hv = 0; |
| 1047 | 1047 | ||
| 1048 | /* | 1048 | /* |
| 1049 | * If this is a per-task event, then we can use | 1049 | * If this is a per-task event, then we can use |
| 1050 | * PM_RUN_* events interchangeably with their non RUN_* | 1050 | * PM_RUN_* events interchangeably with their non RUN_* |
| 1051 | * equivalents, e.g. PM_RUN_CYC instead of PM_CYC. | 1051 | * equivalents, e.g. PM_RUN_CYC instead of PM_CYC. |
| 1052 | * XXX we should check if the task is an idle task. | 1052 | * XXX we should check if the task is an idle task. |
| 1053 | */ | 1053 | */ |
| 1054 | flags = 0; | 1054 | flags = 0; |
| 1055 | if (event->ctx->task) | 1055 | if (event->ctx->task) |
| 1056 | flags |= PPMU_ONLY_COUNT_RUN; | 1056 | flags |= PPMU_ONLY_COUNT_RUN; |
| 1057 | 1057 | ||
| 1058 | /* | 1058 | /* |
| 1059 | * If this machine has limited events, check whether this | 1059 | * If this machine has limited events, check whether this |
| 1060 | * event_id could go on a limited event. | 1060 | * event_id could go on a limited event. |
| 1061 | */ | 1061 | */ |
| 1062 | if (ppmu->flags & PPMU_LIMITED_PMC5_6) { | 1062 | if (ppmu->flags & PPMU_LIMITED_PMC5_6) { |
| 1063 | if (can_go_on_limited_pmc(event, ev, flags)) { | 1063 | if (can_go_on_limited_pmc(event, ev, flags)) { |
| 1064 | flags |= PPMU_LIMITED_PMC_OK; | 1064 | flags |= PPMU_LIMITED_PMC_OK; |
| 1065 | } else if (ppmu->limited_pmc_event(ev)) { | 1065 | } else if (ppmu->limited_pmc_event(ev)) { |
| 1066 | /* | 1066 | /* |
| 1067 | * The requested event_id is on a limited PMC, | 1067 | * The requested event_id is on a limited PMC, |
| 1068 | * but we can't use a limited PMC; see if any | 1068 | * but we can't use a limited PMC; see if any |
| 1069 | * alternative goes on a normal PMC. | 1069 | * alternative goes on a normal PMC. |
| 1070 | */ | 1070 | */ |
| 1071 | ev = normal_pmc_alternative(ev, flags); | 1071 | ev = normal_pmc_alternative(ev, flags); |
| 1072 | if (!ev) | 1072 | if (!ev) |
| 1073 | return ERR_PTR(-EINVAL); | 1073 | return ERR_PTR(-EINVAL); |
| 1074 | } | 1074 | } |
| 1075 | } | 1075 | } |
| 1076 | 1076 | ||
| 1077 | /* | 1077 | /* |
| 1078 | * If this is in a group, check if it can go on with all the | 1078 | * If this is in a group, check if it can go on with all the |
| 1079 | * other hardware events in the group. We assume the event | 1079 | * other hardware events in the group. We assume the event |
| 1080 | * hasn't been linked into its leader's sibling list at this point. | 1080 | * hasn't been linked into its leader's sibling list at this point. |
| 1081 | */ | 1081 | */ |
| 1082 | n = 0; | 1082 | n = 0; |
| 1083 | if (event->group_leader != event) { | 1083 | if (event->group_leader != event) { |
| 1084 | n = collect_events(event->group_leader, ppmu->n_counter - 1, | 1084 | n = collect_events(event->group_leader, ppmu->n_counter - 1, |
| 1085 | ctrs, events, cflags); | 1085 | ctrs, events, cflags); |
| 1086 | if (n < 0) | 1086 | if (n < 0) |
| 1087 | return ERR_PTR(-EINVAL); | 1087 | return ERR_PTR(-EINVAL); |
| 1088 | } | 1088 | } |
| 1089 | events[n] = ev; | 1089 | events[n] = ev; |
| 1090 | ctrs[n] = event; | 1090 | ctrs[n] = event; |
| 1091 | cflags[n] = flags; | 1091 | cflags[n] = flags; |
| 1092 | if (check_excludes(ctrs, cflags, n, 1)) | 1092 | if (check_excludes(ctrs, cflags, n, 1)) |
| 1093 | return ERR_PTR(-EINVAL); | 1093 | return ERR_PTR(-EINVAL); |
| 1094 | 1094 | ||
| 1095 | cpuhw = &get_cpu_var(cpu_hw_events); | 1095 | cpuhw = &get_cpu_var(cpu_hw_events); |
| 1096 | err = power_check_constraints(cpuhw, events, cflags, n + 1); | 1096 | err = power_check_constraints(cpuhw, events, cflags, n + 1); |
| 1097 | put_cpu_var(cpu_hw_events); | 1097 | put_cpu_var(cpu_hw_events); |
| 1098 | if (err) | 1098 | if (err) |
| 1099 | return ERR_PTR(-EINVAL); | 1099 | return ERR_PTR(-EINVAL); |
| 1100 | 1100 | ||
| 1101 | event->hw.config = events[n]; | 1101 | event->hw.config = events[n]; |
| 1102 | event->hw.event_base = cflags[n]; | 1102 | event->hw.event_base = cflags[n]; |
| 1103 | event->hw.last_period = event->hw.sample_period; | 1103 | event->hw.last_period = event->hw.sample_period; |
| 1104 | atomic64_set(&event->hw.period_left, event->hw.last_period); | 1104 | atomic64_set(&event->hw.period_left, event->hw.last_period); |
| 1105 | 1105 | ||
| 1106 | /* | 1106 | /* |
| 1107 | * See if we need to reserve the PMU. | 1107 | * See if we need to reserve the PMU. |
| 1108 | * If no events are currently in use, then we have to take a | 1108 | * If no events are currently in use, then we have to take a |
| 1109 | * mutex to ensure that we don't race with another task doing | 1109 | * mutex to ensure that we don't race with another task doing |
| 1110 | * reserve_pmc_hardware or release_pmc_hardware. | 1110 | * reserve_pmc_hardware or release_pmc_hardware. |
| 1111 | */ | 1111 | */ |
| 1112 | err = 0; | 1112 | err = 0; |
| 1113 | if (!atomic_inc_not_zero(&num_events)) { | 1113 | if (!atomic_inc_not_zero(&num_events)) { |
| 1114 | mutex_lock(&pmc_reserve_mutex); | 1114 | mutex_lock(&pmc_reserve_mutex); |
| 1115 | if (atomic_read(&num_events) == 0 && | 1115 | if (atomic_read(&num_events) == 0 && |
| 1116 | reserve_pmc_hardware(perf_event_interrupt)) | 1116 | reserve_pmc_hardware(perf_event_interrupt)) |
| 1117 | err = -EBUSY; | 1117 | err = -EBUSY; |
| 1118 | else | 1118 | else |
| 1119 | atomic_inc(&num_events); | 1119 | atomic_inc(&num_events); |
| 1120 | mutex_unlock(&pmc_reserve_mutex); | 1120 | mutex_unlock(&pmc_reserve_mutex); |
| 1121 | } | 1121 | } |
| 1122 | event->destroy = hw_perf_event_destroy; | 1122 | event->destroy = hw_perf_event_destroy; |
| 1123 | 1123 | ||
| 1124 | if (err) | 1124 | if (err) |
| 1125 | return ERR_PTR(err); | 1125 | return ERR_PTR(err); |
| 1126 | return &power_pmu; | 1126 | return &power_pmu; |
| 1127 | } | 1127 | } |
| 1128 | 1128 | ||
| 1129 | /* | 1129 | /* |
| 1130 | * A counter has overflowed; update its count and record | 1130 | * A counter has overflowed; update its count and record |
| 1131 | * things if requested. Note that interrupts are hard-disabled | 1131 | * things if requested. Note that interrupts are hard-disabled |
| 1132 | * here so there is no possibility of being interrupted. | 1132 | * here so there is no possibility of being interrupted. |
| 1133 | */ | 1133 | */ |
| 1134 | static void record_and_restart(struct perf_event *event, unsigned long val, | 1134 | static void record_and_restart(struct perf_event *event, unsigned long val, |
| 1135 | struct pt_regs *regs, int nmi) | 1135 | struct pt_regs *regs, int nmi) |
| 1136 | { | 1136 | { |
| 1137 | u64 period = event->hw.sample_period; | 1137 | u64 period = event->hw.sample_period; |
| 1138 | s64 prev, delta, left; | 1138 | s64 prev, delta, left; |
| 1139 | int record = 0; | 1139 | int record = 0; |
| 1140 | 1140 | ||
| 1141 | /* we don't have to worry about interrupts here */ | 1141 | /* we don't have to worry about interrupts here */ |
| 1142 | prev = atomic64_read(&event->hw.prev_count); | 1142 | prev = atomic64_read(&event->hw.prev_count); |
| 1143 | delta = (val - prev) & 0xfffffffful; | 1143 | delta = (val - prev) & 0xfffffffful; |
| 1144 | atomic64_add(delta, &event->count); | 1144 | atomic64_add(delta, &event->count); |
| 1145 | 1145 | ||
| 1146 | /* | 1146 | /* |
| 1147 | * See if the total period for this event has expired, | 1147 | * See if the total period for this event has expired, |
| 1148 | * and update for the next period. | 1148 | * and update for the next period. |
| 1149 | */ | 1149 | */ |
| 1150 | val = 0; | 1150 | val = 0; |
| 1151 | left = atomic64_read(&event->hw.period_left) - delta; | 1151 | left = atomic64_read(&event->hw.period_left) - delta; |
| 1152 | if (period) { | 1152 | if (period) { |
| 1153 | if (left <= 0) { | 1153 | if (left <= 0) { |
| 1154 | left += period; | 1154 | left += period; |
| 1155 | if (left <= 0) | 1155 | if (left <= 0) |
| 1156 | left = period; | 1156 | left = period; |
| 1157 | record = 1; | 1157 | record = 1; |
| 1158 | } | 1158 | } |
| 1159 | if (left < 0x80000000LL) | 1159 | if (left < 0x80000000LL) |
| 1160 | val = 0x80000000LL - left; | 1160 | val = 0x80000000LL - left; |
| 1161 | } | 1161 | } |
| 1162 | 1162 | ||
| 1163 | /* | 1163 | /* |
| 1164 | * Finally record data if requested. | 1164 | * Finally record data if requested. |
| 1165 | */ | 1165 | */ |
| 1166 | if (record) { | 1166 | if (record) { |
| 1167 | struct perf_sample_data data = { | 1167 | struct perf_sample_data data = { |
| 1168 | .addr = 0, | 1168 | .addr = ~0ULL, |
| 1169 | .period = event->hw.last_period, | 1169 | .period = event->hw.last_period, |
| 1170 | }; | 1170 | }; |
| 1171 | 1171 | ||
| 1172 | if (event->attr.sample_type & PERF_SAMPLE_ADDR) | 1172 | if (event->attr.sample_type & PERF_SAMPLE_ADDR) |
| 1173 | perf_get_data_addr(regs, &data.addr); | 1173 | perf_get_data_addr(regs, &data.addr); |
| 1174 | 1174 | ||
| 1175 | if (perf_event_overflow(event, nmi, &data, regs)) { | 1175 | if (perf_event_overflow(event, nmi, &data, regs)) { |
| 1176 | /* | 1176 | /* |
| 1177 | * Interrupts are coming too fast - throttle them | 1177 | * Interrupts are coming too fast - throttle them |
| 1178 | * by setting the event to 0, so it will be | 1178 | * by setting the event to 0, so it will be |
| 1179 | * at least 2^30 cycles until the next interrupt | 1179 | * at least 2^30 cycles until the next interrupt |
| 1180 | * (assuming each event counts at most 2 counts | 1180 | * (assuming each event counts at most 2 counts |
| 1181 | * per cycle). | 1181 | * per cycle). |
| 1182 | */ | 1182 | */ |
| 1183 | val = 0; | 1183 | val = 0; |
| 1184 | left = ~0ULL >> 1; | 1184 | left = ~0ULL >> 1; |
| 1185 | } | 1185 | } |
| 1186 | } | 1186 | } |
| 1187 | 1187 | ||
| 1188 | write_pmc(event->hw.idx, val); | 1188 | write_pmc(event->hw.idx, val); |
| 1189 | atomic64_set(&event->hw.prev_count, val); | 1189 | atomic64_set(&event->hw.prev_count, val); |
| 1190 | atomic64_set(&event->hw.period_left, left); | 1190 | atomic64_set(&event->hw.period_left, left); |
| 1191 | perf_event_update_userpage(event); | 1191 | perf_event_update_userpage(event); |
| 1192 | } | 1192 | } |
| 1193 | 1193 | ||
| 1194 | /* | 1194 | /* |
| 1195 | * Called from generic code to get the misc flags (i.e. processor mode) | 1195 | * Called from generic code to get the misc flags (i.e. processor mode) |
| 1196 | * for an event_id. | 1196 | * for an event_id. |
| 1197 | */ | 1197 | */ |
| 1198 | unsigned long perf_misc_flags(struct pt_regs *regs) | 1198 | unsigned long perf_misc_flags(struct pt_regs *regs) |
| 1199 | { | 1199 | { |
| 1200 | u32 flags = perf_get_misc_flags(regs); | 1200 | u32 flags = perf_get_misc_flags(regs); |
| 1201 | 1201 | ||
| 1202 | if (flags) | 1202 | if (flags) |
| 1203 | return flags; | 1203 | return flags; |
| 1204 | return user_mode(regs) ? PERF_RECORD_MISC_USER : | 1204 | return user_mode(regs) ? PERF_RECORD_MISC_USER : |
| 1205 | PERF_RECORD_MISC_KERNEL; | 1205 | PERF_RECORD_MISC_KERNEL; |
| 1206 | } | 1206 | } |
| 1207 | 1207 | ||
| 1208 | /* | 1208 | /* |
| 1209 | * Called from generic code to get the instruction pointer | 1209 | * Called from generic code to get the instruction pointer |
| 1210 | * for an event_id. | 1210 | * for an event_id. |
| 1211 | */ | 1211 | */ |
| 1212 | unsigned long perf_instruction_pointer(struct pt_regs *regs) | 1212 | unsigned long perf_instruction_pointer(struct pt_regs *regs) |
| 1213 | { | 1213 | { |
| 1214 | unsigned long ip; | 1214 | unsigned long ip; |
| 1215 | 1215 | ||
| 1216 | if (TRAP(regs) != 0xf00) | 1216 | if (TRAP(regs) != 0xf00) |
| 1217 | return regs->nip; /* not a PMU interrupt */ | 1217 | return regs->nip; /* not a PMU interrupt */ |
| 1218 | 1218 | ||
| 1219 | ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs); | 1219 | ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs); |
| 1220 | return ip; | 1220 | return ip; |
| 1221 | } | 1221 | } |
| 1222 | 1222 | ||
| 1223 | /* | 1223 | /* |
| 1224 | * Performance monitor interrupt stuff | 1224 | * Performance monitor interrupt stuff |
| 1225 | */ | 1225 | */ |
| 1226 | static void perf_event_interrupt(struct pt_regs *regs) | 1226 | static void perf_event_interrupt(struct pt_regs *regs) |
| 1227 | { | 1227 | { |
| 1228 | int i; | 1228 | int i; |
| 1229 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1229 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
| 1230 | struct perf_event *event; | 1230 | struct perf_event *event; |
| 1231 | unsigned long val; | 1231 | unsigned long val; |
| 1232 | int found = 0; | 1232 | int found = 0; |
| 1233 | int nmi; | 1233 | int nmi; |
| 1234 | 1234 | ||
| 1235 | if (cpuhw->n_limited) | 1235 | if (cpuhw->n_limited) |
| 1236 | freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), | 1236 | freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), |
| 1237 | mfspr(SPRN_PMC6)); | 1237 | mfspr(SPRN_PMC6)); |
| 1238 | 1238 | ||
| 1239 | perf_read_regs(regs); | 1239 | perf_read_regs(regs); |
| 1240 | 1240 | ||
| 1241 | nmi = perf_intr_is_nmi(regs); | 1241 | nmi = perf_intr_is_nmi(regs); |
| 1242 | if (nmi) | 1242 | if (nmi) |
| 1243 | nmi_enter(); | 1243 | nmi_enter(); |
| 1244 | else | 1244 | else |
| 1245 | irq_enter(); | 1245 | irq_enter(); |
| 1246 | 1246 | ||
| 1247 | for (i = 0; i < cpuhw->n_events; ++i) { | 1247 | for (i = 0; i < cpuhw->n_events; ++i) { |
| 1248 | event = cpuhw->event[i]; | 1248 | event = cpuhw->event[i]; |
| 1249 | if (!event->hw.idx || is_limited_pmc(event->hw.idx)) | 1249 | if (!event->hw.idx || is_limited_pmc(event->hw.idx)) |
| 1250 | continue; | 1250 | continue; |
| 1251 | val = read_pmc(event->hw.idx); | 1251 | val = read_pmc(event->hw.idx); |
| 1252 | if ((int)val < 0) { | 1252 | if ((int)val < 0) { |
| 1253 | /* event has overflowed */ | 1253 | /* event has overflowed */ |
| 1254 | found = 1; | 1254 | found = 1; |
| 1255 | record_and_restart(event, val, regs, nmi); | 1255 | record_and_restart(event, val, regs, nmi); |
| 1256 | } | 1256 | } |
| 1257 | } | 1257 | } |
| 1258 | 1258 | ||
| 1259 | /* | 1259 | /* |
| 1260 | * In case we didn't find and reset the event that caused | 1260 | * In case we didn't find and reset the event that caused |
| 1261 | * the interrupt, scan all events and reset any that are | 1261 | * the interrupt, scan all events and reset any that are |
| 1262 | * negative, to avoid getting continual interrupts. | 1262 | * negative, to avoid getting continual interrupts. |
| 1263 | * Any that we processed in the previous loop will not be negative. | 1263 | * Any that we processed in the previous loop will not be negative. |
| 1264 | */ | 1264 | */ |
| 1265 | if (!found) { | 1265 | if (!found) { |
| 1266 | for (i = 0; i < ppmu->n_counter; ++i) { | 1266 | for (i = 0; i < ppmu->n_counter; ++i) { |
| 1267 | if (is_limited_pmc(i + 1)) | 1267 | if (is_limited_pmc(i + 1)) |
| 1268 | continue; | 1268 | continue; |
| 1269 | val = read_pmc(i + 1); | 1269 | val = read_pmc(i + 1); |
| 1270 | if ((int)val < 0) | 1270 | if ((int)val < 0) |
| 1271 | write_pmc(i + 1, 0); | 1271 | write_pmc(i + 1, 0); |
| 1272 | } | 1272 | } |
| 1273 | } | 1273 | } |
| 1274 | 1274 | ||
| 1275 | /* | 1275 | /* |
| 1276 | * Reset MMCR0 to its normal value. This will set PMXE and | 1276 | * Reset MMCR0 to its normal value. This will set PMXE and |
| 1277 | * clear FC (freeze counters) and PMAO (perf mon alert occurred) | 1277 | * clear FC (freeze counters) and PMAO (perf mon alert occurred) |
| 1278 | * and thus allow interrupts to occur again. | 1278 | * and thus allow interrupts to occur again. |
| 1279 | * XXX might want to use MSR.PM to keep the events frozen until | 1279 | * XXX might want to use MSR.PM to keep the events frozen until |
| 1280 | * we get back out of this interrupt. | 1280 | * we get back out of this interrupt. |
| 1281 | */ | 1281 | */ |
| 1282 | write_mmcr0(cpuhw, cpuhw->mmcr[0]); | 1282 | write_mmcr0(cpuhw, cpuhw->mmcr[0]); |
| 1283 | 1283 | ||
| 1284 | if (nmi) | 1284 | if (nmi) |
| 1285 | nmi_exit(); | 1285 | nmi_exit(); |
| 1286 | else | 1286 | else |
| 1287 | irq_exit(); | 1287 | irq_exit(); |
| 1288 | } | 1288 | } |
| 1289 | 1289 | ||
| 1290 | void hw_perf_event_setup(int cpu) | 1290 | void hw_perf_event_setup(int cpu) |
| 1291 | { | 1291 | { |
| 1292 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); | 1292 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); |
| 1293 | 1293 | ||
| 1294 | if (!ppmu) | 1294 | if (!ppmu) |
| 1295 | return; | 1295 | return; |
| 1296 | memset(cpuhw, 0, sizeof(*cpuhw)); | 1296 | memset(cpuhw, 0, sizeof(*cpuhw)); |
| 1297 | cpuhw->mmcr[0] = MMCR0_FC; | 1297 | cpuhw->mmcr[0] = MMCR0_FC; |
| 1298 | } | 1298 | } |
| 1299 | 1299 | ||
| 1300 | int register_power_pmu(struct power_pmu *pmu) | 1300 | int register_power_pmu(struct power_pmu *pmu) |
| 1301 | { | 1301 | { |
| 1302 | if (ppmu) | 1302 | if (ppmu) |
| 1303 | return -EBUSY; /* something's already registered */ | 1303 | return -EBUSY; /* something's already registered */ |
| 1304 | 1304 | ||
| 1305 | ppmu = pmu; | 1305 | ppmu = pmu; |
| 1306 | pr_info("%s performance monitor hardware support registered\n", | 1306 | pr_info("%s performance monitor hardware support registered\n", |
| 1307 | pmu->name); | 1307 | pmu->name); |
| 1308 | 1308 | ||
| 1309 | #ifdef MSR_HV | 1309 | #ifdef MSR_HV |
| 1310 | /* | 1310 | /* |
| 1311 | * Use FCHV to ignore kernel events if MSR.HV is set. | 1311 | * Use FCHV to ignore kernel events if MSR.HV is set. |
| 1312 | */ | 1312 | */ |
| 1313 | if (mfmsr() & MSR_HV) | 1313 | if (mfmsr() & MSR_HV) |
| 1314 | freeze_events_kernel = MMCR0_FCHV; | 1314 | freeze_events_kernel = MMCR0_FCHV; |
| 1315 | #endif /* CONFIG_PPC64 */ | 1315 | #endif /* CONFIG_PPC64 */ |
| 1316 | 1316 | ||
| 1317 | return 0; | 1317 | return 0; |
| 1318 | } | 1318 | } |
| 1319 | 1319 |
arch/powerpc/kernel/power5+-pmu.c
| 1 | /* | 1 | /* |
| 2 | * Performance counter support for POWER5+/++ (not POWER5) processors. | 2 | * Performance counter support for POWER5+/++ (not POWER5) processors. |
| 3 | * | 3 | * |
| 4 | * Copyright 2009 Paul Mackerras, IBM Corporation. | 4 | * Copyright 2009 Paul Mackerras, IBM Corporation. |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License | 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ | 10 | */ |
| 11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
| 12 | #include <linux/perf_event.h> | 12 | #include <linux/perf_event.h> |
| 13 | #include <linux/string.h> | 13 | #include <linux/string.h> |
| 14 | #include <asm/reg.h> | 14 | #include <asm/reg.h> |
| 15 | #include <asm/cputable.h> | 15 | #include <asm/cputable.h> |
| 16 | 16 | ||
| 17 | /* | 17 | /* |
| 18 | * Bits in event code for POWER5+ (POWER5 GS) and POWER5++ (POWER5 GS DD3) | 18 | * Bits in event code for POWER5+ (POWER5 GS) and POWER5++ (POWER5 GS DD3) |
| 19 | */ | 19 | */ |
| 20 | #define PM_PMC_SH 20 /* PMC number (1-based) for direct events */ | 20 | #define PM_PMC_SH 20 /* PMC number (1-based) for direct events */ |
| 21 | #define PM_PMC_MSK 0xf | 21 | #define PM_PMC_MSK 0xf |
| 22 | #define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) | 22 | #define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) |
| 23 | #define PM_UNIT_SH 16 /* TTMMUX number and setting - unit select */ | 23 | #define PM_UNIT_SH 16 /* TTMMUX number and setting - unit select */ |
| 24 | #define PM_UNIT_MSK 0xf | 24 | #define PM_UNIT_MSK 0xf |
| 25 | #define PM_BYTE_SH 12 /* Byte number of event bus to use */ | 25 | #define PM_BYTE_SH 12 /* Byte number of event bus to use */ |
| 26 | #define PM_BYTE_MSK 7 | 26 | #define PM_BYTE_MSK 7 |
| 27 | #define PM_GRS_SH 8 /* Storage subsystem mux select */ | 27 | #define PM_GRS_SH 8 /* Storage subsystem mux select */ |
| 28 | #define PM_GRS_MSK 7 | 28 | #define PM_GRS_MSK 7 |
| 29 | #define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */ | 29 | #define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */ |
| 30 | #define PM_PMCSEL_MSK 0x7f | 30 | #define PM_PMCSEL_MSK 0x7f |
| 31 | 31 | ||
| 32 | /* Values in PM_UNIT field */ | 32 | /* Values in PM_UNIT field */ |
| 33 | #define PM_FPU 0 | 33 | #define PM_FPU 0 |
| 34 | #define PM_ISU0 1 | 34 | #define PM_ISU0 1 |
| 35 | #define PM_IFU 2 | 35 | #define PM_IFU 2 |
| 36 | #define PM_ISU1 3 | 36 | #define PM_ISU1 3 |
| 37 | #define PM_IDU 4 | 37 | #define PM_IDU 4 |
| 38 | #define PM_ISU0_ALT 6 | 38 | #define PM_ISU0_ALT 6 |
| 39 | #define PM_GRS 7 | 39 | #define PM_GRS 7 |
| 40 | #define PM_LSU0 8 | 40 | #define PM_LSU0 8 |
| 41 | #define PM_LSU1 0xc | 41 | #define PM_LSU1 0xc |
| 42 | #define PM_LASTUNIT 0xc | 42 | #define PM_LASTUNIT 0xc |
| 43 | 43 | ||
| 44 | /* | 44 | /* |
| 45 | * Bits in MMCR1 for POWER5+ | 45 | * Bits in MMCR1 for POWER5+ |
| 46 | */ | 46 | */ |
| 47 | #define MMCR1_TTM0SEL_SH 62 | 47 | #define MMCR1_TTM0SEL_SH 62 |
| 48 | #define MMCR1_TTM1SEL_SH 60 | 48 | #define MMCR1_TTM1SEL_SH 60 |
| 49 | #define MMCR1_TTM2SEL_SH 58 | 49 | #define MMCR1_TTM2SEL_SH 58 |
| 50 | #define MMCR1_TTM3SEL_SH 56 | 50 | #define MMCR1_TTM3SEL_SH 56 |
| 51 | #define MMCR1_TTMSEL_MSK 3 | 51 | #define MMCR1_TTMSEL_MSK 3 |
| 52 | #define MMCR1_TD_CP_DBG0SEL_SH 54 | 52 | #define MMCR1_TD_CP_DBG0SEL_SH 54 |
| 53 | #define MMCR1_TD_CP_DBG1SEL_SH 52 | 53 | #define MMCR1_TD_CP_DBG1SEL_SH 52 |
| 54 | #define MMCR1_TD_CP_DBG2SEL_SH 50 | 54 | #define MMCR1_TD_CP_DBG2SEL_SH 50 |
| 55 | #define MMCR1_TD_CP_DBG3SEL_SH 48 | 55 | #define MMCR1_TD_CP_DBG3SEL_SH 48 |
| 56 | #define MMCR1_GRS_L2SEL_SH 46 | 56 | #define MMCR1_GRS_L2SEL_SH 46 |
| 57 | #define MMCR1_GRS_L2SEL_MSK 3 | 57 | #define MMCR1_GRS_L2SEL_MSK 3 |
| 58 | #define MMCR1_GRS_L3SEL_SH 44 | 58 | #define MMCR1_GRS_L3SEL_SH 44 |
| 59 | #define MMCR1_GRS_L3SEL_MSK 3 | 59 | #define MMCR1_GRS_L3SEL_MSK 3 |
| 60 | #define MMCR1_GRS_MCSEL_SH 41 | 60 | #define MMCR1_GRS_MCSEL_SH 41 |
| 61 | #define MMCR1_GRS_MCSEL_MSK 7 | 61 | #define MMCR1_GRS_MCSEL_MSK 7 |
| 62 | #define MMCR1_GRS_FABSEL_SH 39 | 62 | #define MMCR1_GRS_FABSEL_SH 39 |
| 63 | #define MMCR1_GRS_FABSEL_MSK 3 | 63 | #define MMCR1_GRS_FABSEL_MSK 3 |
| 64 | #define MMCR1_PMC1_ADDER_SEL_SH 35 | 64 | #define MMCR1_PMC1_ADDER_SEL_SH 35 |
| 65 | #define MMCR1_PMC2_ADDER_SEL_SH 34 | 65 | #define MMCR1_PMC2_ADDER_SEL_SH 34 |
| 66 | #define MMCR1_PMC3_ADDER_SEL_SH 33 | 66 | #define MMCR1_PMC3_ADDER_SEL_SH 33 |
| 67 | #define MMCR1_PMC4_ADDER_SEL_SH 32 | 67 | #define MMCR1_PMC4_ADDER_SEL_SH 32 |
| 68 | #define MMCR1_PMC1SEL_SH 25 | 68 | #define MMCR1_PMC1SEL_SH 25 |
| 69 | #define MMCR1_PMC2SEL_SH 17 | 69 | #define MMCR1_PMC2SEL_SH 17 |
| 70 | #define MMCR1_PMC3SEL_SH 9 | 70 | #define MMCR1_PMC3SEL_SH 9 |
| 71 | #define MMCR1_PMC4SEL_SH 1 | 71 | #define MMCR1_PMC4SEL_SH 1 |
| 72 | #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) | 72 | #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) |
| 73 | #define MMCR1_PMCSEL_MSK 0x7f | 73 | #define MMCR1_PMCSEL_MSK 0x7f |
| 74 | 74 | ||
| 75 | /* | 75 | /* |
| 76 | * Bits in MMCRA | ||
| 77 | */ | ||
| 78 | |||
| 79 | /* | ||
| 80 | * Layout of constraint bits: | 76 | * Layout of constraint bits: |
| 81 | * 6666555555555544444444443333333333222222222211111111110000000000 | 77 | * 6666555555555544444444443333333333222222222211111111110000000000 |
| 82 | * 3210987654321098765432109876543210987654321098765432109876543210 | 78 | * 3210987654321098765432109876543210987654321098765432109876543210 |
| 83 | * [ ><><>< ><> <><>[ > < >< >< >< ><><><><><><> | 79 | * [ ><><>< ><> <><>[ > < >< >< >< ><><><><><><> |
| 84 | * NC G0G1G2 G3 T0T1 UC B0 B1 B2 B3 P6P5P4P3P2P1 | 80 | * NC G0G1G2 G3 T0T1 UC B0 B1 B2 B3 P6P5P4P3P2P1 |
| 85 | * | 81 | * |
| 86 | * NC - number of counters | 82 | * NC - number of counters |
| 87 | * 51: NC error 0x0008_0000_0000_0000 | 83 | * 51: NC error 0x0008_0000_0000_0000 |
| 88 | * 48-50: number of events needing PMC1-4 0x0007_0000_0000_0000 | 84 | * 48-50: number of events needing PMC1-4 0x0007_0000_0000_0000 |
| 89 | * | 85 | * |
| 90 | * G0..G3 - GRS mux constraints | 86 | * G0..G3 - GRS mux constraints |
| 91 | * 46-47: GRS_L2SEL value | 87 | * 46-47: GRS_L2SEL value |
| 92 | * 44-45: GRS_L3SEL value | 88 | * 44-45: GRS_L3SEL value |
| 93 | * 41-44: GRS_MCSEL value | 89 | * 41-44: GRS_MCSEL value |
| 94 | * 39-40: GRS_FABSEL value | 90 | * 39-40: GRS_FABSEL value |
| 95 | * Note that these match up with their bit positions in MMCR1 | 91 | * Note that these match up with their bit positions in MMCR1 |
| 96 | * | 92 | * |
| 97 | * T0 - TTM0 constraint | 93 | * T0 - TTM0 constraint |
| 98 | * 36-37: TTM0SEL value (0=FPU, 2=IFU, 3=ISU1) 0x30_0000_0000 | 94 | * 36-37: TTM0SEL value (0=FPU, 2=IFU, 3=ISU1) 0x30_0000_0000 |
| 99 | * | 95 | * |
| 100 | * T1 - TTM1 constraint | 96 | * T1 - TTM1 constraint |
| 101 | * 34-35: TTM1SEL value (0=IDU, 3=GRS) 0x0c_0000_0000 | 97 | * 34-35: TTM1SEL value (0=IDU, 3=GRS) 0x0c_0000_0000 |
| 102 | * | 98 | * |
| 103 | * UC - unit constraint: can't have all three of FPU|IFU|ISU1, ISU0, IDU|GRS | 99 | * UC - unit constraint: can't have all three of FPU|IFU|ISU1, ISU0, IDU|GRS |
| 104 | * 33: UC3 error 0x02_0000_0000 | 100 | * 33: UC3 error 0x02_0000_0000 |
| 105 | * 32: FPU|IFU|ISU1 events needed 0x01_0000_0000 | 101 | * 32: FPU|IFU|ISU1 events needed 0x01_0000_0000 |
| 106 | * 31: ISU0 events needed 0x01_8000_0000 | 102 | * 31: ISU0 events needed 0x01_8000_0000 |
| 107 | * 30: IDU|GRS events needed 0x00_4000_0000 | 103 | * 30: IDU|GRS events needed 0x00_4000_0000 |
| 108 | * | 104 | * |
| 109 | * B0 | 105 | * B0 |
| 110 | * 24-27: Byte 0 event source 0x0f00_0000 | 106 | * 24-27: Byte 0 event source 0x0f00_0000 |
| 111 | * Encoding as for the event code | 107 | * Encoding as for the event code |
| 112 | * | 108 | * |
| 113 | * B1, B2, B3 | 109 | * B1, B2, B3 |
| 114 | * 20-23, 16-19, 12-15: Byte 1, 2, 3 event sources | 110 | * 20-23, 16-19, 12-15: Byte 1, 2, 3 event sources |
| 115 | * | 111 | * |
| 116 | * P6 | 112 | * P6 |
| 117 | * 11: P6 error 0x800 | 113 | * 11: P6 error 0x800 |
| 118 | * 10-11: Count of events needing PMC6 | 114 | * 10-11: Count of events needing PMC6 |
| 119 | * | 115 | * |
| 120 | * P1..P5 | 116 | * P1..P5 |
| 121 | * 0-9: Count of events needing PMC1..PMC5 | 117 | * 0-9: Count of events needing PMC1..PMC5 |
| 122 | */ | 118 | */ |
| 123 | 119 | ||
| 124 | static const int grsel_shift[8] = { | 120 | static const int grsel_shift[8] = { |
| 125 | MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, | 121 | MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, |
| 126 | MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, | 122 | MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, |
| 127 | MMCR1_GRS_MCSEL_SH, MMCR1_GRS_FABSEL_SH | 123 | MMCR1_GRS_MCSEL_SH, MMCR1_GRS_FABSEL_SH |
| 128 | }; | 124 | }; |
| 129 | 125 | ||
| 130 | /* Masks and values for using events from the various units */ | 126 | /* Masks and values for using events from the various units */ |
| 131 | static unsigned long unit_cons[PM_LASTUNIT+1][2] = { | 127 | static unsigned long unit_cons[PM_LASTUNIT+1][2] = { |
| 132 | [PM_FPU] = { 0x3200000000ul, 0x0100000000ul }, | 128 | [PM_FPU] = { 0x3200000000ul, 0x0100000000ul }, |
| 133 | [PM_ISU0] = { 0x0200000000ul, 0x0080000000ul }, | 129 | [PM_ISU0] = { 0x0200000000ul, 0x0080000000ul }, |
| 134 | [PM_ISU1] = { 0x3200000000ul, 0x3100000000ul }, | 130 | [PM_ISU1] = { 0x3200000000ul, 0x3100000000ul }, |
| 135 | [PM_IFU] = { 0x3200000000ul, 0x2100000000ul }, | 131 | [PM_IFU] = { 0x3200000000ul, 0x2100000000ul }, |
| 136 | [PM_IDU] = { 0x0e00000000ul, 0x0040000000ul }, | 132 | [PM_IDU] = { 0x0e00000000ul, 0x0040000000ul }, |
| 137 | [PM_GRS] = { 0x0e00000000ul, 0x0c40000000ul }, | 133 | [PM_GRS] = { 0x0e00000000ul, 0x0c40000000ul }, |
| 138 | }; | 134 | }; |
| 139 | 135 | ||
| 140 | static int power5p_get_constraint(u64 event, unsigned long *maskp, | 136 | static int power5p_get_constraint(u64 event, unsigned long *maskp, |
| 141 | unsigned long *valp) | 137 | unsigned long *valp) |
| 142 | { | 138 | { |
| 143 | int pmc, byte, unit, sh; | 139 | int pmc, byte, unit, sh; |
| 144 | int bit, fmask; | 140 | int bit, fmask; |
| 145 | unsigned long mask = 0, value = 0; | 141 | unsigned long mask = 0, value = 0; |
| 146 | 142 | ||
| 147 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | 143 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; |
| 148 | if (pmc) { | 144 | if (pmc) { |
| 149 | if (pmc > 6) | 145 | if (pmc > 6) |
| 150 | return -1; | 146 | return -1; |
| 151 | sh = (pmc - 1) * 2; | 147 | sh = (pmc - 1) * 2; |
| 152 | mask |= 2 << sh; | 148 | mask |= 2 << sh; |
| 153 | value |= 1 << sh; | 149 | value |= 1 << sh; |
| 154 | if (pmc >= 5 && !(event == 0x500009 || event == 0x600005)) | 150 | if (pmc >= 5 && !(event == 0x500009 || event == 0x600005)) |
| 155 | return -1; | 151 | return -1; |
| 156 | } | 152 | } |
| 157 | if (event & PM_BUSEVENT_MSK) { | 153 | if (event & PM_BUSEVENT_MSK) { |
| 158 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; | 154 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; |
| 159 | if (unit > PM_LASTUNIT) | 155 | if (unit > PM_LASTUNIT) |
| 160 | return -1; | 156 | return -1; |
| 161 | if (unit == PM_ISU0_ALT) | 157 | if (unit == PM_ISU0_ALT) |
| 162 | unit = PM_ISU0; | 158 | unit = PM_ISU0; |
| 163 | mask |= unit_cons[unit][0]; | 159 | mask |= unit_cons[unit][0]; |
| 164 | value |= unit_cons[unit][1]; | 160 | value |= unit_cons[unit][1]; |
| 165 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; | 161 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; |
| 166 | if (byte >= 4) { | 162 | if (byte >= 4) { |
| 167 | if (unit != PM_LSU1) | 163 | if (unit != PM_LSU1) |
| 168 | return -1; | 164 | return -1; |
| 169 | /* Map LSU1 low word (bytes 4-7) to unit LSU1+1 */ | 165 | /* Map LSU1 low word (bytes 4-7) to unit LSU1+1 */ |
| 170 | ++unit; | 166 | ++unit; |
| 171 | byte &= 3; | 167 | byte &= 3; |
| 172 | } | 168 | } |
| 173 | if (unit == PM_GRS) { | 169 | if (unit == PM_GRS) { |
| 174 | bit = event & 7; | 170 | bit = event & 7; |
| 175 | fmask = (bit == 6)? 7: 3; | 171 | fmask = (bit == 6)? 7: 3; |
| 176 | sh = grsel_shift[bit]; | 172 | sh = grsel_shift[bit]; |
| 177 | mask |= (unsigned long)fmask << sh; | 173 | mask |= (unsigned long)fmask << sh; |
| 178 | value |= (unsigned long)((event >> PM_GRS_SH) & fmask) | 174 | value |= (unsigned long)((event >> PM_GRS_SH) & fmask) |
| 179 | << sh; | 175 | << sh; |
| 180 | } | 176 | } |
| 181 | /* Set byte lane select field */ | 177 | /* Set byte lane select field */ |
| 182 | mask |= 0xfUL << (24 - 4 * byte); | 178 | mask |= 0xfUL << (24 - 4 * byte); |
| 183 | value |= (unsigned long)unit << (24 - 4 * byte); | 179 | value |= (unsigned long)unit << (24 - 4 * byte); |
| 184 | } | 180 | } |
| 185 | if (pmc < 5) { | 181 | if (pmc < 5) { |
| 186 | /* need a counter from PMC1-4 set */ | 182 | /* need a counter from PMC1-4 set */ |
| 187 | mask |= 0x8000000000000ul; | 183 | mask |= 0x8000000000000ul; |
| 188 | value |= 0x1000000000000ul; | 184 | value |= 0x1000000000000ul; |
| 189 | } | 185 | } |
| 190 | *maskp = mask; | 186 | *maskp = mask; |
| 191 | *valp = value; | 187 | *valp = value; |
| 192 | return 0; | 188 | return 0; |
| 193 | } | 189 | } |
| 194 | 190 | ||
| 195 | static int power5p_limited_pmc_event(u64 event) | 191 | static int power5p_limited_pmc_event(u64 event) |
| 196 | { | 192 | { |
| 197 | int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | 193 | int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; |
| 198 | 194 | ||
| 199 | return pmc == 5 || pmc == 6; | 195 | return pmc == 5 || pmc == 6; |
| 200 | } | 196 | } |
| 201 | 197 | ||
| 202 | #define MAX_ALT 3 /* at most 3 alternatives for any event */ | 198 | #define MAX_ALT 3 /* at most 3 alternatives for any event */ |
| 203 | 199 | ||
| 204 | static const unsigned int event_alternatives[][MAX_ALT] = { | 200 | static const unsigned int event_alternatives[][MAX_ALT] = { |
| 205 | { 0x100c0, 0x40001f }, /* PM_GCT_FULL_CYC */ | 201 | { 0x100c0, 0x40001f }, /* PM_GCT_FULL_CYC */ |
| 206 | { 0x120e4, 0x400002 }, /* PM_GRP_DISP_REJECT */ | 202 | { 0x120e4, 0x400002 }, /* PM_GRP_DISP_REJECT */ |
| 207 | { 0x230e2, 0x323087 }, /* PM_BR_PRED_CR */ | 203 | { 0x230e2, 0x323087 }, /* PM_BR_PRED_CR */ |
| 208 | { 0x230e3, 0x223087, 0x3230a0 }, /* PM_BR_PRED_TA */ | 204 | { 0x230e3, 0x223087, 0x3230a0 }, /* PM_BR_PRED_TA */ |
| 209 | { 0x410c7, 0x441084 }, /* PM_THRD_L2MISS_BOTH_CYC */ | 205 | { 0x410c7, 0x441084 }, /* PM_THRD_L2MISS_BOTH_CYC */ |
| 210 | { 0x800c4, 0xc20e0 }, /* PM_DTLB_MISS */ | 206 | { 0x800c4, 0xc20e0 }, /* PM_DTLB_MISS */ |
| 211 | { 0xc50c6, 0xc60e0 }, /* PM_MRK_DTLB_MISS */ | 207 | { 0xc50c6, 0xc60e0 }, /* PM_MRK_DTLB_MISS */ |
| 212 | { 0x100005, 0x600005 }, /* PM_RUN_CYC */ | 208 | { 0x100005, 0x600005 }, /* PM_RUN_CYC */ |
| 213 | { 0x100009, 0x200009 }, /* PM_INST_CMPL */ | 209 | { 0x100009, 0x200009 }, /* PM_INST_CMPL */ |
| 214 | { 0x200015, 0x300015 }, /* PM_LSU_LMQ_SRQ_EMPTY_CYC */ | 210 | { 0x200015, 0x300015 }, /* PM_LSU_LMQ_SRQ_EMPTY_CYC */ |
| 215 | { 0x300009, 0x400009 }, /* PM_INST_DISP */ | 211 | { 0x300009, 0x400009 }, /* PM_INST_DISP */ |
| 216 | }; | 212 | }; |
| 217 | 213 | ||
| 218 | /* | 214 | /* |
| 219 | * Scan the alternatives table for a match and return the | 215 | * Scan the alternatives table for a match and return the |
| 220 | * index into the alternatives table if found, else -1. | 216 | * index into the alternatives table if found, else -1. |
| 221 | */ | 217 | */ |
| 222 | static int find_alternative(unsigned int event) | 218 | static int find_alternative(unsigned int event) |
| 223 | { | 219 | { |
| 224 | int i, j; | 220 | int i, j; |
| 225 | 221 | ||
| 226 | for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { | 222 | for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { |
| 227 | if (event < event_alternatives[i][0]) | 223 | if (event < event_alternatives[i][0]) |
| 228 | break; | 224 | break; |
| 229 | for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j) | 225 | for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j) |
| 230 | if (event == event_alternatives[i][j]) | 226 | if (event == event_alternatives[i][j]) |
| 231 | return i; | 227 | return i; |
| 232 | } | 228 | } |
| 233 | return -1; | 229 | return -1; |
| 234 | } | 230 | } |
| 235 | 231 | ||
| 236 | static const unsigned char bytedecode_alternatives[4][4] = { | 232 | static const unsigned char bytedecode_alternatives[4][4] = { |
| 237 | /* PMC 1 */ { 0x21, 0x23, 0x25, 0x27 }, | 233 | /* PMC 1 */ { 0x21, 0x23, 0x25, 0x27 }, |
| 238 | /* PMC 2 */ { 0x07, 0x17, 0x0e, 0x1e }, | 234 | /* PMC 2 */ { 0x07, 0x17, 0x0e, 0x1e }, |
| 239 | /* PMC 3 */ { 0x20, 0x22, 0x24, 0x26 }, | 235 | /* PMC 3 */ { 0x20, 0x22, 0x24, 0x26 }, |
| 240 | /* PMC 4 */ { 0x07, 0x17, 0x0e, 0x1e } | 236 | /* PMC 4 */ { 0x07, 0x17, 0x0e, 0x1e } |
| 241 | }; | 237 | }; |
| 242 | 238 | ||
| 243 | /* | 239 | /* |
| 244 | * Some direct events for decodes of event bus byte 3 have alternative | 240 | * Some direct events for decodes of event bus byte 3 have alternative |
| 245 | * PMCSEL values on other counters. This returns the alternative | 241 | * PMCSEL values on other counters. This returns the alternative |
| 246 | * event code for those that do, or -1 otherwise. This also handles | 242 | * event code for those that do, or -1 otherwise. This also handles |
| 247 | * alternative PCMSEL values for add events. | 243 | * alternative PCMSEL values for add events. |
| 248 | */ | 244 | */ |
| 249 | static s64 find_alternative_bdecode(u64 event) | 245 | static s64 find_alternative_bdecode(u64 event) |
| 250 | { | 246 | { |
| 251 | int pmc, altpmc, pp, j; | 247 | int pmc, altpmc, pp, j; |
| 252 | 248 | ||
| 253 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | 249 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; |
| 254 | if (pmc == 0 || pmc > 4) | 250 | if (pmc == 0 || pmc > 4) |
| 255 | return -1; | 251 | return -1; |
| 256 | altpmc = 5 - pmc; /* 1 <-> 4, 2 <-> 3 */ | 252 | altpmc = 5 - pmc; /* 1 <-> 4, 2 <-> 3 */ |
| 257 | pp = event & PM_PMCSEL_MSK; | 253 | pp = event & PM_PMCSEL_MSK; |
| 258 | for (j = 0; j < 4; ++j) { | 254 | for (j = 0; j < 4; ++j) { |
| 259 | if (bytedecode_alternatives[pmc - 1][j] == pp) { | 255 | if (bytedecode_alternatives[pmc - 1][j] == pp) { |
| 260 | return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) | | 256 | return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) | |
| 261 | (altpmc << PM_PMC_SH) | | 257 | (altpmc << PM_PMC_SH) | |
| 262 | bytedecode_alternatives[altpmc - 1][j]; | 258 | bytedecode_alternatives[altpmc - 1][j]; |
| 263 | } | 259 | } |
| 264 | } | 260 | } |
| 265 | 261 | ||
| 266 | /* new decode alternatives for power5+ */ | 262 | /* new decode alternatives for power5+ */ |
| 267 | if (pmc == 1 && (pp == 0x0d || pp == 0x0e)) | 263 | if (pmc == 1 && (pp == 0x0d || pp == 0x0e)) |
| 268 | return event + (2 << PM_PMC_SH) + (0x2e - 0x0d); | 264 | return event + (2 << PM_PMC_SH) + (0x2e - 0x0d); |
| 269 | if (pmc == 3 && (pp == 0x2e || pp == 0x2f)) | 265 | if (pmc == 3 && (pp == 0x2e || pp == 0x2f)) |
| 270 | return event - (2 << PM_PMC_SH) - (0x2e - 0x0d); | 266 | return event - (2 << PM_PMC_SH) - (0x2e - 0x0d); |
| 271 | 267 | ||
| 272 | /* alternative add event encodings */ | 268 | /* alternative add event encodings */ |
| 273 | if (pp == 0x10 || pp == 0x28) | 269 | if (pp == 0x10 || pp == 0x28) |
| 274 | return ((event ^ (0x10 ^ 0x28)) & ~PM_PMC_MSKS) | | 270 | return ((event ^ (0x10 ^ 0x28)) & ~PM_PMC_MSKS) | |
| 275 | (altpmc << PM_PMC_SH); | 271 | (altpmc << PM_PMC_SH); |
| 276 | 272 | ||
| 277 | return -1; | 273 | return -1; |
| 278 | } | 274 | } |
| 279 | 275 | ||
| 280 | static int power5p_get_alternatives(u64 event, unsigned int flags, u64 alt[]) | 276 | static int power5p_get_alternatives(u64 event, unsigned int flags, u64 alt[]) |
| 281 | { | 277 | { |
| 282 | int i, j, nalt = 1; | 278 | int i, j, nalt = 1; |
| 283 | int nlim; | 279 | int nlim; |
| 284 | s64 ae; | 280 | s64 ae; |
| 285 | 281 | ||
| 286 | alt[0] = event; | 282 | alt[0] = event; |
| 287 | nalt = 1; | 283 | nalt = 1; |
| 288 | nlim = power5p_limited_pmc_event(event); | 284 | nlim = power5p_limited_pmc_event(event); |
| 289 | i = find_alternative(event); | 285 | i = find_alternative(event); |
| 290 | if (i >= 0) { | 286 | if (i >= 0) { |
| 291 | for (j = 0; j < MAX_ALT; ++j) { | 287 | for (j = 0; j < MAX_ALT; ++j) { |
| 292 | ae = event_alternatives[i][j]; | 288 | ae = event_alternatives[i][j]; |
| 293 | if (ae && ae != event) | 289 | if (ae && ae != event) |
| 294 | alt[nalt++] = ae; | 290 | alt[nalt++] = ae; |
| 295 | nlim += power5p_limited_pmc_event(ae); | 291 | nlim += power5p_limited_pmc_event(ae); |
| 296 | } | 292 | } |
| 297 | } else { | 293 | } else { |
| 298 | ae = find_alternative_bdecode(event); | 294 | ae = find_alternative_bdecode(event); |
| 299 | if (ae > 0) | 295 | if (ae > 0) |
| 300 | alt[nalt++] = ae; | 296 | alt[nalt++] = ae; |
| 301 | } | 297 | } |
| 302 | 298 | ||
| 303 | if (flags & PPMU_ONLY_COUNT_RUN) { | 299 | if (flags & PPMU_ONLY_COUNT_RUN) { |
| 304 | /* | 300 | /* |
| 305 | * We're only counting in RUN state, | 301 | * We're only counting in RUN state, |
| 306 | * so PM_CYC is equivalent to PM_RUN_CYC | 302 | * so PM_CYC is equivalent to PM_RUN_CYC |
| 307 | * and PM_INST_CMPL === PM_RUN_INST_CMPL. | 303 | * and PM_INST_CMPL === PM_RUN_INST_CMPL. |
| 308 | * This doesn't include alternatives that don't provide | 304 | * This doesn't include alternatives that don't provide |
| 309 | * any extra flexibility in assigning PMCs (e.g. | 305 | * any extra flexibility in assigning PMCs (e.g. |
| 310 | * 0x100005 for PM_RUN_CYC vs. 0xf for PM_CYC). | 306 | * 0x100005 for PM_RUN_CYC vs. 0xf for PM_CYC). |
| 311 | * Note that even with these additional alternatives | 307 | * Note that even with these additional alternatives |
| 312 | * we never end up with more than 3 alternatives for any event. | 308 | * we never end up with more than 3 alternatives for any event. |
| 313 | */ | 309 | */ |
| 314 | j = nalt; | 310 | j = nalt; |
| 315 | for (i = 0; i < nalt; ++i) { | 311 | for (i = 0; i < nalt; ++i) { |
| 316 | switch (alt[i]) { | 312 | switch (alt[i]) { |
| 317 | case 0xf: /* PM_CYC */ | 313 | case 0xf: /* PM_CYC */ |
| 318 | alt[j++] = 0x600005; /* PM_RUN_CYC */ | 314 | alt[j++] = 0x600005; /* PM_RUN_CYC */ |
| 319 | ++nlim; | 315 | ++nlim; |
| 320 | break; | 316 | break; |
| 321 | case 0x600005: /* PM_RUN_CYC */ | 317 | case 0x600005: /* PM_RUN_CYC */ |
| 322 | alt[j++] = 0xf; | 318 | alt[j++] = 0xf; |
| 323 | break; | 319 | break; |
| 324 | case 0x100009: /* PM_INST_CMPL */ | 320 | case 0x100009: /* PM_INST_CMPL */ |
| 325 | alt[j++] = 0x500009; /* PM_RUN_INST_CMPL */ | 321 | alt[j++] = 0x500009; /* PM_RUN_INST_CMPL */ |
| 326 | ++nlim; | 322 | ++nlim; |
| 327 | break; | 323 | break; |
| 328 | case 0x500009: /* PM_RUN_INST_CMPL */ | 324 | case 0x500009: /* PM_RUN_INST_CMPL */ |
| 329 | alt[j++] = 0x100009; /* PM_INST_CMPL */ | 325 | alt[j++] = 0x100009; /* PM_INST_CMPL */ |
| 330 | alt[j++] = 0x200009; | 326 | alt[j++] = 0x200009; |
| 331 | break; | 327 | break; |
| 332 | } | 328 | } |
| 333 | } | 329 | } |
| 334 | nalt = j; | 330 | nalt = j; |
| 335 | } | 331 | } |
| 336 | 332 | ||
| 337 | if (!(flags & PPMU_LIMITED_PMC_OK) && nlim) { | 333 | if (!(flags & PPMU_LIMITED_PMC_OK) && nlim) { |
| 338 | /* remove the limited PMC events */ | 334 | /* remove the limited PMC events */ |
| 339 | j = 0; | 335 | j = 0; |
| 340 | for (i = 0; i < nalt; ++i) { | 336 | for (i = 0; i < nalt; ++i) { |
| 341 | if (!power5p_limited_pmc_event(alt[i])) { | 337 | if (!power5p_limited_pmc_event(alt[i])) { |
| 342 | alt[j] = alt[i]; | 338 | alt[j] = alt[i]; |
| 343 | ++j; | 339 | ++j; |
| 344 | } | 340 | } |
| 345 | } | 341 | } |
| 346 | nalt = j; | 342 | nalt = j; |
| 347 | } else if ((flags & PPMU_LIMITED_PMC_REQD) && nlim < nalt) { | 343 | } else if ((flags & PPMU_LIMITED_PMC_REQD) && nlim < nalt) { |
| 348 | /* remove all but the limited PMC events */ | 344 | /* remove all but the limited PMC events */ |
| 349 | j = 0; | 345 | j = 0; |
| 350 | for (i = 0; i < nalt; ++i) { | 346 | for (i = 0; i < nalt; ++i) { |
| 351 | if (power5p_limited_pmc_event(alt[i])) { | 347 | if (power5p_limited_pmc_event(alt[i])) { |
| 352 | alt[j] = alt[i]; | 348 | alt[j] = alt[i]; |
| 353 | ++j; | 349 | ++j; |
| 354 | } | 350 | } |
| 355 | } | 351 | } |
| 356 | nalt = j; | 352 | nalt = j; |
| 357 | } | 353 | } |
| 358 | 354 | ||
| 359 | return nalt; | 355 | return nalt; |
| 360 | } | 356 | } |
| 361 | 357 | ||
| 362 | /* | 358 | /* |
| 363 | * Map of which direct events on which PMCs are marked instruction events. | 359 | * Map of which direct events on which PMCs are marked instruction events. |
| 364 | * Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event. | 360 | * Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event. |
| 365 | * Bit 0 is set if it is marked for all PMCs. | 361 | * Bit 0 is set if it is marked for all PMCs. |
| 366 | * The 0x80 bit indicates a byte decode PMCSEL value. | 362 | * The 0x80 bit indicates a byte decode PMCSEL value. |
| 367 | */ | 363 | */ |
| 368 | static unsigned char direct_event_is_marked[0x28] = { | 364 | static unsigned char direct_event_is_marked[0x28] = { |
| 369 | 0, /* 00 */ | 365 | 0, /* 00 */ |
| 370 | 0x1f, /* 01 PM_IOPS_CMPL */ | 366 | 0x1f, /* 01 PM_IOPS_CMPL */ |
| 371 | 0x2, /* 02 PM_MRK_GRP_DISP */ | 367 | 0x2, /* 02 PM_MRK_GRP_DISP */ |
| 372 | 0xe, /* 03 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */ | 368 | 0xe, /* 03 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */ |
| 373 | 0, /* 04 */ | 369 | 0, /* 04 */ |
| 374 | 0x1c, /* 05 PM_MRK_BRU_FIN, PM_MRK_INST_FIN, PM_MRK_CRU_FIN */ | 370 | 0x1c, /* 05 PM_MRK_BRU_FIN, PM_MRK_INST_FIN, PM_MRK_CRU_FIN */ |
| 375 | 0x80, /* 06 */ | 371 | 0x80, /* 06 */ |
| 376 | 0x80, /* 07 */ | 372 | 0x80, /* 07 */ |
| 377 | 0, 0, 0,/* 08 - 0a */ | 373 | 0, 0, 0,/* 08 - 0a */ |
| 378 | 0x18, /* 0b PM_THRESH_TIMEO, PM_MRK_GRP_TIMEO */ | 374 | 0x18, /* 0b PM_THRESH_TIMEO, PM_MRK_GRP_TIMEO */ |
| 379 | 0, /* 0c */ | 375 | 0, /* 0c */ |
| 380 | 0x80, /* 0d */ | 376 | 0x80, /* 0d */ |
| 381 | 0x80, /* 0e */ | 377 | 0x80, /* 0e */ |
| 382 | 0, /* 0f */ | 378 | 0, /* 0f */ |
| 383 | 0, /* 10 */ | 379 | 0, /* 10 */ |
| 384 | 0x14, /* 11 PM_MRK_GRP_BR_REDIR, PM_MRK_GRP_IC_MISS */ | 380 | 0x14, /* 11 PM_MRK_GRP_BR_REDIR, PM_MRK_GRP_IC_MISS */ |
| 385 | 0, /* 12 */ | 381 | 0, /* 12 */ |
| 386 | 0x10, /* 13 PM_MRK_GRP_CMPL */ | 382 | 0x10, /* 13 PM_MRK_GRP_CMPL */ |
| 387 | 0x1f, /* 14 PM_GRP_MRK, PM_MRK_{FXU,FPU,LSU}_FIN */ | 383 | 0x1f, /* 14 PM_GRP_MRK, PM_MRK_{FXU,FPU,LSU}_FIN */ |
| 388 | 0x2, /* 15 PM_MRK_GRP_ISSUED */ | 384 | 0x2, /* 15 PM_MRK_GRP_ISSUED */ |
| 389 | 0x80, /* 16 */ | 385 | 0x80, /* 16 */ |
| 390 | 0x80, /* 17 */ | 386 | 0x80, /* 17 */ |
| 391 | 0, 0, 0, 0, 0, | 387 | 0, 0, 0, 0, 0, |
| 392 | 0x80, /* 1d */ | 388 | 0x80, /* 1d */ |
| 393 | 0x80, /* 1e */ | 389 | 0x80, /* 1e */ |
| 394 | 0, /* 1f */ | 390 | 0, /* 1f */ |
| 395 | 0x80, /* 20 */ | 391 | 0x80, /* 20 */ |
| 396 | 0x80, /* 21 */ | 392 | 0x80, /* 21 */ |
| 397 | 0x80, /* 22 */ | 393 | 0x80, /* 22 */ |
| 398 | 0x80, /* 23 */ | 394 | 0x80, /* 23 */ |
| 399 | 0x80, /* 24 */ | 395 | 0x80, /* 24 */ |
| 400 | 0x80, /* 25 */ | 396 | 0x80, /* 25 */ |
| 401 | 0x80, /* 26 */ | 397 | 0x80, /* 26 */ |
| 402 | 0x80, /* 27 */ | 398 | 0x80, /* 27 */ |
| 403 | }; | 399 | }; |
| 404 | 400 | ||
| 405 | /* | 401 | /* |
| 406 | * Returns 1 if event counts things relating to marked instructions | 402 | * Returns 1 if event counts things relating to marked instructions |
| 407 | * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. | 403 | * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. |
| 408 | */ | 404 | */ |
| 409 | static int power5p_marked_instr_event(u64 event) | 405 | static int power5p_marked_instr_event(u64 event) |
| 410 | { | 406 | { |
| 411 | int pmc, psel; | 407 | int pmc, psel; |
| 412 | int bit, byte, unit; | 408 | int bit, byte, unit; |
| 413 | u32 mask; | 409 | u32 mask; |
| 414 | 410 | ||
| 415 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | 411 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; |
| 416 | psel = event & PM_PMCSEL_MSK; | 412 | psel = event & PM_PMCSEL_MSK; |
| 417 | if (pmc >= 5) | 413 | if (pmc >= 5) |
| 418 | return 0; | 414 | return 0; |
| 419 | 415 | ||
| 420 | bit = -1; | 416 | bit = -1; |
| 421 | if (psel < sizeof(direct_event_is_marked)) { | 417 | if (psel < sizeof(direct_event_is_marked)) { |
| 422 | if (direct_event_is_marked[psel] & (1 << pmc)) | 418 | if (direct_event_is_marked[psel] & (1 << pmc)) |
| 423 | return 1; | 419 | return 1; |
| 424 | if (direct_event_is_marked[psel] & 0x80) | 420 | if (direct_event_is_marked[psel] & 0x80) |
| 425 | bit = 4; | 421 | bit = 4; |
| 426 | else if (psel == 0x08) | 422 | else if (psel == 0x08) |
| 427 | bit = pmc - 1; | 423 | bit = pmc - 1; |
| 428 | else if (psel == 0x10) | 424 | else if (psel == 0x10) |
| 429 | bit = 4 - pmc; | 425 | bit = 4 - pmc; |
| 430 | else if (psel == 0x1b && (pmc == 1 || pmc == 3)) | 426 | else if (psel == 0x1b && (pmc == 1 || pmc == 3)) |
| 431 | bit = 4; | 427 | bit = 4; |
| 432 | } else if ((psel & 0x48) == 0x40) { | 428 | } else if ((psel & 0x48) == 0x40) { |
| 433 | bit = psel & 7; | 429 | bit = psel & 7; |
| 434 | } else if (psel == 0x28) { | 430 | } else if (psel == 0x28) { |
| 435 | bit = pmc - 1; | 431 | bit = pmc - 1; |
| 436 | } else if (pmc == 3 && (psel == 0x2e || psel == 0x2f)) { | 432 | } else if (pmc == 3 && (psel == 0x2e || psel == 0x2f)) { |
| 437 | bit = 4; | 433 | bit = 4; |
| 438 | } | 434 | } |
| 439 | 435 | ||
| 440 | if (!(event & PM_BUSEVENT_MSK) || bit == -1) | 436 | if (!(event & PM_BUSEVENT_MSK) || bit == -1) |
| 441 | return 0; | 437 | return 0; |
| 442 | 438 | ||
| 443 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; | 439 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; |
| 444 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; | 440 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; |
| 445 | if (unit == PM_LSU0) { | 441 | if (unit == PM_LSU0) { |
| 446 | /* byte 1 bits 0-7, byte 2 bits 0,2-4,6 */ | 442 | /* byte 1 bits 0-7, byte 2 bits 0,2-4,6 */ |
| 447 | mask = 0x5dff00; | 443 | mask = 0x5dff00; |
| 448 | } else if (unit == PM_LSU1 && byte >= 4) { | 444 | } else if (unit == PM_LSU1 && byte >= 4) { |
| 449 | byte -= 4; | 445 | byte -= 4; |
| 450 | /* byte 5 bits 6-7, byte 6 bits 0,4, byte 7 bits 0-4,6 */ | 446 | /* byte 5 bits 6-7, byte 6 bits 0,4, byte 7 bits 0-4,6 */ |
| 451 | mask = 0x5f11c000; | 447 | mask = 0x5f11c000; |
| 452 | } else | 448 | } else |
| 453 | return 0; | 449 | return 0; |
| 454 | 450 | ||
| 455 | return (mask >> (byte * 8 + bit)) & 1; | 451 | return (mask >> (byte * 8 + bit)) & 1; |
| 456 | } | 452 | } |
| 457 | 453 | ||
| 458 | static int power5p_compute_mmcr(u64 event[], int n_ev, | 454 | static int power5p_compute_mmcr(u64 event[], int n_ev, |
| 459 | unsigned int hwc[], unsigned long mmcr[]) | 455 | unsigned int hwc[], unsigned long mmcr[]) |
| 460 | { | 456 | { |
| 461 | unsigned long mmcr1 = 0; | 457 | unsigned long mmcr1 = 0; |
| 462 | unsigned long mmcra = 0; | 458 | unsigned long mmcra = 0; |
| 463 | unsigned int pmc, unit, byte, psel; | 459 | unsigned int pmc, unit, byte, psel; |
| 464 | unsigned int ttm; | 460 | unsigned int ttm; |
| 465 | int i, isbus, bit, grsel; | 461 | int i, isbus, bit, grsel; |
| 466 | unsigned int pmc_inuse = 0; | 462 | unsigned int pmc_inuse = 0; |
| 467 | unsigned char busbyte[4]; | 463 | unsigned char busbyte[4]; |
| 468 | unsigned char unituse[16]; | 464 | unsigned char unituse[16]; |
| 469 | int ttmuse; | 465 | int ttmuse; |
| 470 | 466 | ||
| 471 | if (n_ev > 6) | 467 | if (n_ev > 6) |
| 472 | return -1; | 468 | return -1; |
| 473 | 469 | ||
| 474 | /* First pass to count resource use */ | 470 | /* First pass to count resource use */ |
| 475 | memset(busbyte, 0, sizeof(busbyte)); | 471 | memset(busbyte, 0, sizeof(busbyte)); |
| 476 | memset(unituse, 0, sizeof(unituse)); | 472 | memset(unituse, 0, sizeof(unituse)); |
| 477 | for (i = 0; i < n_ev; ++i) { | 473 | for (i = 0; i < n_ev; ++i) { |
| 478 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; | 474 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; |
| 479 | if (pmc) { | 475 | if (pmc) { |
| 480 | if (pmc > 6) | 476 | if (pmc > 6) |
| 481 | return -1; | 477 | return -1; |
| 482 | if (pmc_inuse & (1 << (pmc - 1))) | 478 | if (pmc_inuse & (1 << (pmc - 1))) |
| 483 | return -1; | 479 | return -1; |
| 484 | pmc_inuse |= 1 << (pmc - 1); | 480 | pmc_inuse |= 1 << (pmc - 1); |
| 485 | } | 481 | } |
| 486 | if (event[i] & PM_BUSEVENT_MSK) { | 482 | if (event[i] & PM_BUSEVENT_MSK) { |
| 487 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; | 483 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; |
| 488 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; | 484 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; |
| 489 | if (unit > PM_LASTUNIT) | 485 | if (unit > PM_LASTUNIT) |
| 490 | return -1; | 486 | return -1; |
| 491 | if (unit == PM_ISU0_ALT) | 487 | if (unit == PM_ISU0_ALT) |
| 492 | unit = PM_ISU0; | 488 | unit = PM_ISU0; |
| 493 | if (byte >= 4) { | 489 | if (byte >= 4) { |
| 494 | if (unit != PM_LSU1) | 490 | if (unit != PM_LSU1) |
| 495 | return -1; | 491 | return -1; |
| 496 | ++unit; | 492 | ++unit; |
| 497 | byte &= 3; | 493 | byte &= 3; |
| 498 | } | 494 | } |
| 499 | if (busbyte[byte] && busbyte[byte] != unit) | 495 | if (busbyte[byte] && busbyte[byte] != unit) |
| 500 | return -1; | 496 | return -1; |
| 501 | busbyte[byte] = unit; | 497 | busbyte[byte] = unit; |
| 502 | unituse[unit] = 1; | 498 | unituse[unit] = 1; |
| 503 | } | 499 | } |
| 504 | } | 500 | } |
| 505 | 501 | ||
| 506 | /* | 502 | /* |
| 507 | * Assign resources and set multiplexer selects. | 503 | * Assign resources and set multiplexer selects. |
| 508 | * | 504 | * |
| 509 | * PM_ISU0 can go either on TTM0 or TTM1, but that's the only | 505 | * PM_ISU0 can go either on TTM0 or TTM1, but that's the only |
| 510 | * choice we have to deal with. | 506 | * choice we have to deal with. |
| 511 | */ | 507 | */ |
| 512 | if (unituse[PM_ISU0] & | 508 | if (unituse[PM_ISU0] & |
| 513 | (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_ISU1])) { | 509 | (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_ISU1])) { |
| 514 | unituse[PM_ISU0_ALT] = 1; /* move ISU to TTM1 */ | 510 | unituse[PM_ISU0_ALT] = 1; /* move ISU to TTM1 */ |
| 515 | unituse[PM_ISU0] = 0; | 511 | unituse[PM_ISU0] = 0; |
| 516 | } | 512 | } |
| 517 | /* Set TTM[01]SEL fields. */ | 513 | /* Set TTM[01]SEL fields. */ |
| 518 | ttmuse = 0; | 514 | ttmuse = 0; |
| 519 | for (i = PM_FPU; i <= PM_ISU1; ++i) { | 515 | for (i = PM_FPU; i <= PM_ISU1; ++i) { |
| 520 | if (!unituse[i]) | 516 | if (!unituse[i]) |
| 521 | continue; | 517 | continue; |
| 522 | if (ttmuse++) | 518 | if (ttmuse++) |
| 523 | return -1; | 519 | return -1; |
| 524 | mmcr1 |= (unsigned long)i << MMCR1_TTM0SEL_SH; | 520 | mmcr1 |= (unsigned long)i << MMCR1_TTM0SEL_SH; |
| 525 | } | 521 | } |
| 526 | ttmuse = 0; | 522 | ttmuse = 0; |
| 527 | for (; i <= PM_GRS; ++i) { | 523 | for (; i <= PM_GRS; ++i) { |
| 528 | if (!unituse[i]) | 524 | if (!unituse[i]) |
| 529 | continue; | 525 | continue; |
| 530 | if (ttmuse++) | 526 | if (ttmuse++) |
| 531 | return -1; | 527 | return -1; |
| 532 | mmcr1 |= (unsigned long)(i & 3) << MMCR1_TTM1SEL_SH; | 528 | mmcr1 |= (unsigned long)(i & 3) << MMCR1_TTM1SEL_SH; |
| 533 | } | 529 | } |
| 534 | if (ttmuse > 1) | 530 | if (ttmuse > 1) |
| 535 | return -1; | 531 | return -1; |
| 536 | 532 | ||
| 537 | /* Set byte lane select fields, TTM[23]SEL and GRS_*SEL. */ | 533 | /* Set byte lane select fields, TTM[23]SEL and GRS_*SEL. */ |
| 538 | for (byte = 0; byte < 4; ++byte) { | 534 | for (byte = 0; byte < 4; ++byte) { |
| 539 | unit = busbyte[byte]; | 535 | unit = busbyte[byte]; |
| 540 | if (!unit) | 536 | if (!unit) |
| 541 | continue; | 537 | continue; |
| 542 | if (unit == PM_ISU0 && unituse[PM_ISU0_ALT]) { | 538 | if (unit == PM_ISU0 && unituse[PM_ISU0_ALT]) { |
| 543 | /* get ISU0 through TTM1 rather than TTM0 */ | 539 | /* get ISU0 through TTM1 rather than TTM0 */ |
| 544 | unit = PM_ISU0_ALT; | 540 | unit = PM_ISU0_ALT; |
| 545 | } else if (unit == PM_LSU1 + 1) { | 541 | } else if (unit == PM_LSU1 + 1) { |
| 546 | /* select lower word of LSU1 for this byte */ | 542 | /* select lower word of LSU1 for this byte */ |
| 547 | mmcr1 |= 1ul << (MMCR1_TTM3SEL_SH + 3 - byte); | 543 | mmcr1 |= 1ul << (MMCR1_TTM3SEL_SH + 3 - byte); |
| 548 | } | 544 | } |
| 549 | ttm = unit >> 2; | 545 | ttm = unit >> 2; |
| 550 | mmcr1 |= (unsigned long)ttm | 546 | mmcr1 |= (unsigned long)ttm |
| 551 | << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); | 547 | << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); |
| 552 | } | 548 | } |
| 553 | 549 | ||
| 554 | /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ | 550 | /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ |
| 555 | for (i = 0; i < n_ev; ++i) { | 551 | for (i = 0; i < n_ev; ++i) { |
| 556 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; | 552 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; |
| 557 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; | 553 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; |
| 558 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; | 554 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; |
| 559 | psel = event[i] & PM_PMCSEL_MSK; | 555 | psel = event[i] & PM_PMCSEL_MSK; |
| 560 | isbus = event[i] & PM_BUSEVENT_MSK; | 556 | isbus = event[i] & PM_BUSEVENT_MSK; |
| 561 | if (!pmc) { | 557 | if (!pmc) { |
| 562 | /* Bus event or any-PMC direct event */ | 558 | /* Bus event or any-PMC direct event */ |
| 563 | for (pmc = 0; pmc < 4; ++pmc) { | 559 | for (pmc = 0; pmc < 4; ++pmc) { |
| 564 | if (!(pmc_inuse & (1 << pmc))) | 560 | if (!(pmc_inuse & (1 << pmc))) |
| 565 | break; | 561 | break; |
| 566 | } | 562 | } |
| 567 | if (pmc >= 4) | 563 | if (pmc >= 4) |
| 568 | return -1; | 564 | return -1; |
| 569 | pmc_inuse |= 1 << pmc; | 565 | pmc_inuse |= 1 << pmc; |
| 570 | } else if (pmc <= 4) { | 566 | } else if (pmc <= 4) { |
| 571 | /* Direct event */ | 567 | /* Direct event */ |
| 572 | --pmc; | 568 | --pmc; |
| 573 | if (isbus && (byte & 2) && | 569 | if (isbus && (byte & 2) && |
| 574 | (psel == 8 || psel == 0x10 || psel == 0x28)) | 570 | (psel == 8 || psel == 0x10 || psel == 0x28)) |
| 575 | /* add events on higher-numbered bus */ | 571 | /* add events on higher-numbered bus */ |
| 576 | mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc); | 572 | mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc); |
| 577 | } else { | 573 | } else { |
| 578 | /* Instructions or run cycles on PMC5/6 */ | 574 | /* Instructions or run cycles on PMC5/6 */ |
| 579 | --pmc; | 575 | --pmc; |
| 580 | } | 576 | } |
| 581 | if (isbus && unit == PM_GRS) { | 577 | if (isbus && unit == PM_GRS) { |
| 582 | bit = psel & 7; | 578 | bit = psel & 7; |
| 583 | grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; | 579 | grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; |
| 584 | mmcr1 |= (unsigned long)grsel << grsel_shift[bit]; | 580 | mmcr1 |= (unsigned long)grsel << grsel_shift[bit]; |
| 585 | } | 581 | } |
| 586 | if (power5p_marked_instr_event(event[i])) | 582 | if (power5p_marked_instr_event(event[i])) |
| 587 | mmcra |= MMCRA_SAMPLE_ENABLE; | 583 | mmcra |= MMCRA_SAMPLE_ENABLE; |
| 588 | if ((psel & 0x58) == 0x40 && (byte & 1) != ((pmc >> 1) & 1)) | 584 | if ((psel & 0x58) == 0x40 && (byte & 1) != ((pmc >> 1) & 1)) |
| 589 | /* select alternate byte lane */ | 585 | /* select alternate byte lane */ |
| 590 | psel |= 0x10; | 586 | psel |= 0x10; |
| 591 | if (pmc <= 3) | 587 | if (pmc <= 3) |
| 592 | mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc); | 588 | mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc); |
| 593 | hwc[i] = pmc; | 589 | hwc[i] = pmc; |
| 594 | } | 590 | } |
| 595 | 591 | ||
| 596 | /* Return MMCRx values */ | 592 | /* Return MMCRx values */ |
| 597 | mmcr[0] = 0; | 593 | mmcr[0] = 0; |
| 598 | if (pmc_inuse & 1) | 594 | if (pmc_inuse & 1) |
| 599 | mmcr[0] = MMCR0_PMC1CE; | 595 | mmcr[0] = MMCR0_PMC1CE; |
| 600 | if (pmc_inuse & 0x3e) | 596 | if (pmc_inuse & 0x3e) |
| 601 | mmcr[0] |= MMCR0_PMCjCE; | 597 | mmcr[0] |= MMCR0_PMCjCE; |
| 602 | mmcr[1] = mmcr1; | 598 | mmcr[1] = mmcr1; |
| 603 | mmcr[2] = mmcra; | 599 | mmcr[2] = mmcra; |
| 604 | return 0; | 600 | return 0; |
| 605 | } | 601 | } |
| 606 | 602 | ||
| 607 | static void power5p_disable_pmc(unsigned int pmc, unsigned long mmcr[]) | 603 | static void power5p_disable_pmc(unsigned int pmc, unsigned long mmcr[]) |
| 608 | { | 604 | { |
| 609 | if (pmc <= 3) | 605 | if (pmc <= 3) |
| 610 | mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc)); | 606 | mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc)); |
| 611 | } | 607 | } |
| 612 | 608 | ||
| 613 | static int power5p_generic_events[] = { | 609 | static int power5p_generic_events[] = { |
| 614 | [PERF_COUNT_HW_CPU_CYCLES] = 0xf, | 610 | [PERF_COUNT_HW_CPU_CYCLES] = 0xf, |
| 615 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x100009, | 611 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x100009, |
| 616 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x1c10a8, /* LD_REF_L1 */ | 612 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x1c10a8, /* LD_REF_L1 */ |
| 617 | [PERF_COUNT_HW_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */ | 613 | [PERF_COUNT_HW_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */ |
| 618 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */ | 614 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */ |
| 619 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */ | 615 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */ |
| 620 | }; | 616 | }; |
| 621 | 617 | ||
| 622 | #define C(x) PERF_COUNT_HW_CACHE_##x | 618 | #define C(x) PERF_COUNT_HW_CACHE_##x |
| 623 | 619 | ||
| 624 | /* | 620 | /* |
| 625 | * Table of generalized cache-related events. | 621 | * Table of generalized cache-related events. |
| 626 | * 0 means not supported, -1 means nonsensical, other values | 622 | * 0 means not supported, -1 means nonsensical, other values |
| 627 | * are event codes. | 623 | * are event codes. |
| 628 | */ | 624 | */ |
| 629 | static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | 625 | static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { |
| 630 | [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ | 626 | [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 631 | [C(OP_READ)] = { 0x1c10a8, 0x3c1088 }, | 627 | [C(OP_READ)] = { 0x1c10a8, 0x3c1088 }, |
| 632 | [C(OP_WRITE)] = { 0x2c10a8, 0xc10c3 }, | 628 | [C(OP_WRITE)] = { 0x2c10a8, 0xc10c3 }, |
| 633 | [C(OP_PREFETCH)] = { 0xc70e7, -1 }, | 629 | [C(OP_PREFETCH)] = { 0xc70e7, -1 }, |
| 634 | }, | 630 | }, |
| 635 | [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ | 631 | [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 636 | [C(OP_READ)] = { 0, 0 }, | 632 | [C(OP_READ)] = { 0, 0 }, |
| 637 | [C(OP_WRITE)] = { -1, -1 }, | 633 | [C(OP_WRITE)] = { -1, -1 }, |
| 638 | [C(OP_PREFETCH)] = { 0, 0 }, | 634 | [C(OP_PREFETCH)] = { 0, 0 }, |
| 639 | }, | 635 | }, |
| 640 | [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ | 636 | [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 641 | [C(OP_READ)] = { 0, 0 }, | 637 | [C(OP_READ)] = { 0, 0 }, |
| 642 | [C(OP_WRITE)] = { 0, 0 }, | 638 | [C(OP_WRITE)] = { 0, 0 }, |
| 643 | [C(OP_PREFETCH)] = { 0xc50c3, 0 }, | 639 | [C(OP_PREFETCH)] = { 0xc50c3, 0 }, |
| 644 | }, | 640 | }, |
| 645 | [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ | 641 | [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 646 | [C(OP_READ)] = { 0xc20e4, 0x800c4 }, | 642 | [C(OP_READ)] = { 0xc20e4, 0x800c4 }, |
| 647 | [C(OP_WRITE)] = { -1, -1 }, | 643 | [C(OP_WRITE)] = { -1, -1 }, |
| 648 | [C(OP_PREFETCH)] = { -1, -1 }, | 644 | [C(OP_PREFETCH)] = { -1, -1 }, |
| 649 | }, | 645 | }, |
| 650 | [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ | 646 | [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 651 | [C(OP_READ)] = { 0, 0x800c0 }, | 647 | [C(OP_READ)] = { 0, 0x800c0 }, |
| 652 | [C(OP_WRITE)] = { -1, -1 }, | 648 | [C(OP_WRITE)] = { -1, -1 }, |
| 653 | [C(OP_PREFETCH)] = { -1, -1 }, | 649 | [C(OP_PREFETCH)] = { -1, -1 }, |
| 654 | }, | 650 | }, |
| 655 | [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ | 651 | [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 656 | [C(OP_READ)] = { 0x230e4, 0x230e5 }, | 652 | [C(OP_READ)] = { 0x230e4, 0x230e5 }, |
| 657 | [C(OP_WRITE)] = { -1, -1 }, | 653 | [C(OP_WRITE)] = { -1, -1 }, |
| 658 | [C(OP_PREFETCH)] = { -1, -1 }, | 654 | [C(OP_PREFETCH)] = { -1, -1 }, |
| 659 | }, | 655 | }, |
| 660 | }; | 656 | }; |
| 661 | 657 | ||
| 662 | static struct power_pmu power5p_pmu = { | 658 | static struct power_pmu power5p_pmu = { |
| 663 | .name = "POWER5+/++", | 659 | .name = "POWER5+/++", |
| 664 | .n_counter = 6, | 660 | .n_counter = 6, |
| 665 | .max_alternatives = MAX_ALT, | 661 | .max_alternatives = MAX_ALT, |
| 666 | .add_fields = 0x7000000000055ul, | 662 | .add_fields = 0x7000000000055ul, |
| 667 | .test_adder = 0x3000040000000ul, | 663 | .test_adder = 0x3000040000000ul, |
| 668 | .compute_mmcr = power5p_compute_mmcr, | 664 | .compute_mmcr = power5p_compute_mmcr, |
| 669 | .get_constraint = power5p_get_constraint, | 665 | .get_constraint = power5p_get_constraint, |
| 670 | .get_alternatives = power5p_get_alternatives, | 666 | .get_alternatives = power5p_get_alternatives, |
| 671 | .disable_pmc = power5p_disable_pmc, | 667 | .disable_pmc = power5p_disable_pmc, |
| 672 | .limited_pmc_event = power5p_limited_pmc_event, | 668 | .limited_pmc_event = power5p_limited_pmc_event, |
| 673 | .flags = PPMU_LIMITED_PMC5_6, | 669 | .flags = PPMU_LIMITED_PMC5_6, |
| 674 | .n_generic = ARRAY_SIZE(power5p_generic_events), | 670 | .n_generic = ARRAY_SIZE(power5p_generic_events), |
| 675 | .generic_events = power5p_generic_events, | 671 | .generic_events = power5p_generic_events, |
| 676 | .cache_events = &power5p_cache_events, | 672 | .cache_events = &power5p_cache_events, |
| 677 | }; | 673 | }; |
| 678 | 674 | ||
| 679 | static int init_power5p_pmu(void) | 675 | static int init_power5p_pmu(void) |
| 680 | { | 676 | { |
| 681 | if (!cur_cpu_spec->oprofile_cpu_type || | 677 | if (!cur_cpu_spec->oprofile_cpu_type || |
| 682 | (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5+") | 678 | (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5+") |
| 683 | && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5++"))) | 679 | && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5++"))) |
| 684 | return -ENODEV; | 680 | return -ENODEV; |
| 685 | 681 | ||
| 686 | return register_power_pmu(&power5p_pmu); | 682 | return register_power_pmu(&power5p_pmu); |
| 687 | } | 683 | } |
| 688 | 684 | ||
| 689 | arch_initcall(init_power5p_pmu); | 685 | arch_initcall(init_power5p_pmu); |
| 690 | 686 |
arch/powerpc/kernel/power5-pmu.c
| 1 | /* | 1 | /* |
| 2 | * Performance counter support for POWER5 (not POWER5++) processors. | 2 | * Performance counter support for POWER5 (not POWER5++) processors. |
| 3 | * | 3 | * |
| 4 | * Copyright 2009 Paul Mackerras, IBM Corporation. | 4 | * Copyright 2009 Paul Mackerras, IBM Corporation. |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License | 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ | 10 | */ |
| 11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
| 12 | #include <linux/perf_event.h> | 12 | #include <linux/perf_event.h> |
| 13 | #include <linux/string.h> | 13 | #include <linux/string.h> |
| 14 | #include <asm/reg.h> | 14 | #include <asm/reg.h> |
| 15 | #include <asm/cputable.h> | 15 | #include <asm/cputable.h> |
| 16 | 16 | ||
| 17 | /* | 17 | /* |
| 18 | * Bits in event code for POWER5 (not POWER5++) | 18 | * Bits in event code for POWER5 (not POWER5++) |
| 19 | */ | 19 | */ |
| 20 | #define PM_PMC_SH 20 /* PMC number (1-based) for direct events */ | 20 | #define PM_PMC_SH 20 /* PMC number (1-based) for direct events */ |
| 21 | #define PM_PMC_MSK 0xf | 21 | #define PM_PMC_MSK 0xf |
| 22 | #define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) | 22 | #define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) |
| 23 | #define PM_UNIT_SH 16 /* TTMMUX number and setting - unit select */ | 23 | #define PM_UNIT_SH 16 /* TTMMUX number and setting - unit select */ |
| 24 | #define PM_UNIT_MSK 0xf | 24 | #define PM_UNIT_MSK 0xf |
| 25 | #define PM_BYTE_SH 12 /* Byte number of event bus to use */ | 25 | #define PM_BYTE_SH 12 /* Byte number of event bus to use */ |
| 26 | #define PM_BYTE_MSK 7 | 26 | #define PM_BYTE_MSK 7 |
| 27 | #define PM_GRS_SH 8 /* Storage subsystem mux select */ | 27 | #define PM_GRS_SH 8 /* Storage subsystem mux select */ |
| 28 | #define PM_GRS_MSK 7 | 28 | #define PM_GRS_MSK 7 |
| 29 | #define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */ | 29 | #define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */ |
| 30 | #define PM_PMCSEL_MSK 0x7f | 30 | #define PM_PMCSEL_MSK 0x7f |
| 31 | 31 | ||
| 32 | /* Values in PM_UNIT field */ | 32 | /* Values in PM_UNIT field */ |
| 33 | #define PM_FPU 0 | 33 | #define PM_FPU 0 |
| 34 | #define PM_ISU0 1 | 34 | #define PM_ISU0 1 |
| 35 | #define PM_IFU 2 | 35 | #define PM_IFU 2 |
| 36 | #define PM_ISU1 3 | 36 | #define PM_ISU1 3 |
| 37 | #define PM_IDU 4 | 37 | #define PM_IDU 4 |
| 38 | #define PM_ISU0_ALT 6 | 38 | #define PM_ISU0_ALT 6 |
| 39 | #define PM_GRS 7 | 39 | #define PM_GRS 7 |
| 40 | #define PM_LSU0 8 | 40 | #define PM_LSU0 8 |
| 41 | #define PM_LSU1 0xc | 41 | #define PM_LSU1 0xc |
| 42 | #define PM_LASTUNIT 0xc | 42 | #define PM_LASTUNIT 0xc |
| 43 | 43 | ||
| 44 | /* | 44 | /* |
| 45 | * Bits in MMCR1 for POWER5 | 45 | * Bits in MMCR1 for POWER5 |
| 46 | */ | 46 | */ |
| 47 | #define MMCR1_TTM0SEL_SH 62 | 47 | #define MMCR1_TTM0SEL_SH 62 |
| 48 | #define MMCR1_TTM1SEL_SH 60 | 48 | #define MMCR1_TTM1SEL_SH 60 |
| 49 | #define MMCR1_TTM2SEL_SH 58 | 49 | #define MMCR1_TTM2SEL_SH 58 |
| 50 | #define MMCR1_TTM3SEL_SH 56 | 50 | #define MMCR1_TTM3SEL_SH 56 |
| 51 | #define MMCR1_TTMSEL_MSK 3 | 51 | #define MMCR1_TTMSEL_MSK 3 |
| 52 | #define MMCR1_TD_CP_DBG0SEL_SH 54 | 52 | #define MMCR1_TD_CP_DBG0SEL_SH 54 |
| 53 | #define MMCR1_TD_CP_DBG1SEL_SH 52 | 53 | #define MMCR1_TD_CP_DBG1SEL_SH 52 |
| 54 | #define MMCR1_TD_CP_DBG2SEL_SH 50 | 54 | #define MMCR1_TD_CP_DBG2SEL_SH 50 |
| 55 | #define MMCR1_TD_CP_DBG3SEL_SH 48 | 55 | #define MMCR1_TD_CP_DBG3SEL_SH 48 |
| 56 | #define MMCR1_GRS_L2SEL_SH 46 | 56 | #define MMCR1_GRS_L2SEL_SH 46 |
| 57 | #define MMCR1_GRS_L2SEL_MSK 3 | 57 | #define MMCR1_GRS_L2SEL_MSK 3 |
| 58 | #define MMCR1_GRS_L3SEL_SH 44 | 58 | #define MMCR1_GRS_L3SEL_SH 44 |
| 59 | #define MMCR1_GRS_L3SEL_MSK 3 | 59 | #define MMCR1_GRS_L3SEL_MSK 3 |
| 60 | #define MMCR1_GRS_MCSEL_SH 41 | 60 | #define MMCR1_GRS_MCSEL_SH 41 |
| 61 | #define MMCR1_GRS_MCSEL_MSK 7 | 61 | #define MMCR1_GRS_MCSEL_MSK 7 |
| 62 | #define MMCR1_GRS_FABSEL_SH 39 | 62 | #define MMCR1_GRS_FABSEL_SH 39 |
| 63 | #define MMCR1_GRS_FABSEL_MSK 3 | 63 | #define MMCR1_GRS_FABSEL_MSK 3 |
| 64 | #define MMCR1_PMC1_ADDER_SEL_SH 35 | 64 | #define MMCR1_PMC1_ADDER_SEL_SH 35 |
| 65 | #define MMCR1_PMC2_ADDER_SEL_SH 34 | 65 | #define MMCR1_PMC2_ADDER_SEL_SH 34 |
| 66 | #define MMCR1_PMC3_ADDER_SEL_SH 33 | 66 | #define MMCR1_PMC3_ADDER_SEL_SH 33 |
| 67 | #define MMCR1_PMC4_ADDER_SEL_SH 32 | 67 | #define MMCR1_PMC4_ADDER_SEL_SH 32 |
| 68 | #define MMCR1_PMC1SEL_SH 25 | 68 | #define MMCR1_PMC1SEL_SH 25 |
| 69 | #define MMCR1_PMC2SEL_SH 17 | 69 | #define MMCR1_PMC2SEL_SH 17 |
| 70 | #define MMCR1_PMC3SEL_SH 9 | 70 | #define MMCR1_PMC3SEL_SH 9 |
| 71 | #define MMCR1_PMC4SEL_SH 1 | 71 | #define MMCR1_PMC4SEL_SH 1 |
| 72 | #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) | 72 | #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) |
| 73 | #define MMCR1_PMCSEL_MSK 0x7f | 73 | #define MMCR1_PMCSEL_MSK 0x7f |
| 74 | 74 | ||
| 75 | /* | 75 | /* |
| 76 | * Bits in MMCRA | ||
| 77 | */ | ||
| 78 | |||
| 79 | /* | ||
| 80 | * Layout of constraint bits: | 76 | * Layout of constraint bits: |
| 81 | * 6666555555555544444444443333333333222222222211111111110000000000 | 77 | * 6666555555555544444444443333333333222222222211111111110000000000 |
| 82 | * 3210987654321098765432109876543210987654321098765432109876543210 | 78 | * 3210987654321098765432109876543210987654321098765432109876543210 |
| 83 | * <><>[ ><><>< ><> [ >[ >[ >< >< >< >< ><><><><><><> | 79 | * <><>[ ><><>< ><> [ >[ >[ >< >< >< >< ><><><><><><> |
| 84 | * T0T1 NC G0G1G2 G3 UC PS1PS2 B0 B1 B2 B3 P6P5P4P3P2P1 | 80 | * T0T1 NC G0G1G2 G3 UC PS1PS2 B0 B1 B2 B3 P6P5P4P3P2P1 |
| 85 | * | 81 | * |
| 86 | * T0 - TTM0 constraint | 82 | * T0 - TTM0 constraint |
| 87 | * 54-55: TTM0SEL value (0=FPU, 2=IFU, 3=ISU1) 0xc0_0000_0000_0000 | 83 | * 54-55: TTM0SEL value (0=FPU, 2=IFU, 3=ISU1) 0xc0_0000_0000_0000 |
| 88 | * | 84 | * |
| 89 | * T1 - TTM1 constraint | 85 | * T1 - TTM1 constraint |
| 90 | * 52-53: TTM1SEL value (0=IDU, 3=GRS) 0x30_0000_0000_0000 | 86 | * 52-53: TTM1SEL value (0=IDU, 3=GRS) 0x30_0000_0000_0000 |
| 91 | * | 87 | * |
| 92 | * NC - number of counters | 88 | * NC - number of counters |
| 93 | * 51: NC error 0x0008_0000_0000_0000 | 89 | * 51: NC error 0x0008_0000_0000_0000 |
| 94 | * 48-50: number of events needing PMC1-4 0x0007_0000_0000_0000 | 90 | * 48-50: number of events needing PMC1-4 0x0007_0000_0000_0000 |
| 95 | * | 91 | * |
| 96 | * G0..G3 - GRS mux constraints | 92 | * G0..G3 - GRS mux constraints |
| 97 | * 46-47: GRS_L2SEL value | 93 | * 46-47: GRS_L2SEL value |
| 98 | * 44-45: GRS_L3SEL value | 94 | * 44-45: GRS_L3SEL value |
| 99 | * 41-44: GRS_MCSEL value | 95 | * 41-44: GRS_MCSEL value |
| 100 | * 39-40: GRS_FABSEL value | 96 | * 39-40: GRS_FABSEL value |
| 101 | * Note that these match up with their bit positions in MMCR1 | 97 | * Note that these match up with their bit positions in MMCR1 |
| 102 | * | 98 | * |
| 103 | * UC - unit constraint: can't have all three of FPU|IFU|ISU1, ISU0, IDU|GRS | 99 | * UC - unit constraint: can't have all three of FPU|IFU|ISU1, ISU0, IDU|GRS |
| 104 | * 37: UC3 error 0x20_0000_0000 | 100 | * 37: UC3 error 0x20_0000_0000 |
| 105 | * 36: FPU|IFU|ISU1 events needed 0x10_0000_0000 | 101 | * 36: FPU|IFU|ISU1 events needed 0x10_0000_0000 |
| 106 | * 35: ISU0 events needed 0x08_0000_0000 | 102 | * 35: ISU0 events needed 0x08_0000_0000 |
| 107 | * 34: IDU|GRS events needed 0x04_0000_0000 | 103 | * 34: IDU|GRS events needed 0x04_0000_0000 |
| 108 | * | 104 | * |
| 109 | * PS1 | 105 | * PS1 |
| 110 | * 33: PS1 error 0x2_0000_0000 | 106 | * 33: PS1 error 0x2_0000_0000 |
| 111 | * 31-32: count of events needing PMC1/2 0x1_8000_0000 | 107 | * 31-32: count of events needing PMC1/2 0x1_8000_0000 |
| 112 | * | 108 | * |
| 113 | * PS2 | 109 | * PS2 |
| 114 | * 30: PS2 error 0x4000_0000 | 110 | * 30: PS2 error 0x4000_0000 |
| 115 | * 28-29: count of events needing PMC3/4 0x3000_0000 | 111 | * 28-29: count of events needing PMC3/4 0x3000_0000 |
| 116 | * | 112 | * |
| 117 | * B0 | 113 | * B0 |
| 118 | * 24-27: Byte 0 event source 0x0f00_0000 | 114 | * 24-27: Byte 0 event source 0x0f00_0000 |
| 119 | * Encoding as for the event code | 115 | * Encoding as for the event code |
| 120 | * | 116 | * |
| 121 | * B1, B2, B3 | 117 | * B1, B2, B3 |
| 122 | * 20-23, 16-19, 12-15: Byte 1, 2, 3 event sources | 118 | * 20-23, 16-19, 12-15: Byte 1, 2, 3 event sources |
| 123 | * | 119 | * |
| 124 | * P1..P6 | 120 | * P1..P6 |
| 125 | * 0-11: Count of events needing PMC1..PMC6 | 121 | * 0-11: Count of events needing PMC1..PMC6 |
| 126 | */ | 122 | */ |
| 127 | 123 | ||
| 128 | static const int grsel_shift[8] = { | 124 | static const int grsel_shift[8] = { |
| 129 | MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, | 125 | MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, |
| 130 | MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, | 126 | MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, |
| 131 | MMCR1_GRS_MCSEL_SH, MMCR1_GRS_FABSEL_SH | 127 | MMCR1_GRS_MCSEL_SH, MMCR1_GRS_FABSEL_SH |
| 132 | }; | 128 | }; |
| 133 | 129 | ||
| 134 | /* Masks and values for using events from the various units */ | 130 | /* Masks and values for using events from the various units */ |
| 135 | static unsigned long unit_cons[PM_LASTUNIT+1][2] = { | 131 | static unsigned long unit_cons[PM_LASTUNIT+1][2] = { |
| 136 | [PM_FPU] = { 0xc0002000000000ul, 0x00001000000000ul }, | 132 | [PM_FPU] = { 0xc0002000000000ul, 0x00001000000000ul }, |
| 137 | [PM_ISU0] = { 0x00002000000000ul, 0x00000800000000ul }, | 133 | [PM_ISU0] = { 0x00002000000000ul, 0x00000800000000ul }, |
| 138 | [PM_ISU1] = { 0xc0002000000000ul, 0xc0001000000000ul }, | 134 | [PM_ISU1] = { 0xc0002000000000ul, 0xc0001000000000ul }, |
| 139 | [PM_IFU] = { 0xc0002000000000ul, 0x80001000000000ul }, | 135 | [PM_IFU] = { 0xc0002000000000ul, 0x80001000000000ul }, |
| 140 | [PM_IDU] = { 0x30002000000000ul, 0x00000400000000ul }, | 136 | [PM_IDU] = { 0x30002000000000ul, 0x00000400000000ul }, |
| 141 | [PM_GRS] = { 0x30002000000000ul, 0x30000400000000ul }, | 137 | [PM_GRS] = { 0x30002000000000ul, 0x30000400000000ul }, |
| 142 | }; | 138 | }; |
| 143 | 139 | ||
| 144 | static int power5_get_constraint(u64 event, unsigned long *maskp, | 140 | static int power5_get_constraint(u64 event, unsigned long *maskp, |
| 145 | unsigned long *valp) | 141 | unsigned long *valp) |
| 146 | { | 142 | { |
| 147 | int pmc, byte, unit, sh; | 143 | int pmc, byte, unit, sh; |
| 148 | int bit, fmask; | 144 | int bit, fmask; |
| 149 | unsigned long mask = 0, value = 0; | 145 | unsigned long mask = 0, value = 0; |
| 150 | int grp = -1; | 146 | int grp = -1; |
| 151 | 147 | ||
| 152 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | 148 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; |
| 153 | if (pmc) { | 149 | if (pmc) { |
| 154 | if (pmc > 6) | 150 | if (pmc > 6) |
| 155 | return -1; | 151 | return -1; |
| 156 | sh = (pmc - 1) * 2; | 152 | sh = (pmc - 1) * 2; |
| 157 | mask |= 2 << sh; | 153 | mask |= 2 << sh; |
| 158 | value |= 1 << sh; | 154 | value |= 1 << sh; |
| 159 | if (pmc <= 4) | 155 | if (pmc <= 4) |
| 160 | grp = (pmc - 1) >> 1; | 156 | grp = (pmc - 1) >> 1; |
| 161 | else if (event != 0x500009 && event != 0x600005) | 157 | else if (event != 0x500009 && event != 0x600005) |
| 162 | return -1; | 158 | return -1; |
| 163 | } | 159 | } |
| 164 | if (event & PM_BUSEVENT_MSK) { | 160 | if (event & PM_BUSEVENT_MSK) { |
| 165 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; | 161 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; |
| 166 | if (unit > PM_LASTUNIT) | 162 | if (unit > PM_LASTUNIT) |
| 167 | return -1; | 163 | return -1; |
| 168 | if (unit == PM_ISU0_ALT) | 164 | if (unit == PM_ISU0_ALT) |
| 169 | unit = PM_ISU0; | 165 | unit = PM_ISU0; |
| 170 | mask |= unit_cons[unit][0]; | 166 | mask |= unit_cons[unit][0]; |
| 171 | value |= unit_cons[unit][1]; | 167 | value |= unit_cons[unit][1]; |
| 172 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; | 168 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; |
| 173 | if (byte >= 4) { | 169 | if (byte >= 4) { |
| 174 | if (unit != PM_LSU1) | 170 | if (unit != PM_LSU1) |
| 175 | return -1; | 171 | return -1; |
| 176 | /* Map LSU1 low word (bytes 4-7) to unit LSU1+1 */ | 172 | /* Map LSU1 low word (bytes 4-7) to unit LSU1+1 */ |
| 177 | ++unit; | 173 | ++unit; |
| 178 | byte &= 3; | 174 | byte &= 3; |
| 179 | } | 175 | } |
| 180 | if (unit == PM_GRS) { | 176 | if (unit == PM_GRS) { |
| 181 | bit = event & 7; | 177 | bit = event & 7; |
| 182 | fmask = (bit == 6)? 7: 3; | 178 | fmask = (bit == 6)? 7: 3; |
| 183 | sh = grsel_shift[bit]; | 179 | sh = grsel_shift[bit]; |
| 184 | mask |= (unsigned long)fmask << sh; | 180 | mask |= (unsigned long)fmask << sh; |
| 185 | value |= (unsigned long)((event >> PM_GRS_SH) & fmask) | 181 | value |= (unsigned long)((event >> PM_GRS_SH) & fmask) |
| 186 | << sh; | 182 | << sh; |
| 187 | } | 183 | } |
| 188 | /* | 184 | /* |
| 189 | * Bus events on bytes 0 and 2 can be counted | 185 | * Bus events on bytes 0 and 2 can be counted |
| 190 | * on PMC1/2; bytes 1 and 3 on PMC3/4. | 186 | * on PMC1/2; bytes 1 and 3 on PMC3/4. |
| 191 | */ | 187 | */ |
| 192 | if (!pmc) | 188 | if (!pmc) |
| 193 | grp = byte & 1; | 189 | grp = byte & 1; |
| 194 | /* Set byte lane select field */ | 190 | /* Set byte lane select field */ |
| 195 | mask |= 0xfUL << (24 - 4 * byte); | 191 | mask |= 0xfUL << (24 - 4 * byte); |
| 196 | value |= (unsigned long)unit << (24 - 4 * byte); | 192 | value |= (unsigned long)unit << (24 - 4 * byte); |
| 197 | } | 193 | } |
| 198 | if (grp == 0) { | 194 | if (grp == 0) { |
| 199 | /* increment PMC1/2 field */ | 195 | /* increment PMC1/2 field */ |
| 200 | mask |= 0x200000000ul; | 196 | mask |= 0x200000000ul; |
| 201 | value |= 0x080000000ul; | 197 | value |= 0x080000000ul; |
| 202 | } else if (grp == 1) { | 198 | } else if (grp == 1) { |
| 203 | /* increment PMC3/4 field */ | 199 | /* increment PMC3/4 field */ |
| 204 | mask |= 0x40000000ul; | 200 | mask |= 0x40000000ul; |
| 205 | value |= 0x10000000ul; | 201 | value |= 0x10000000ul; |
| 206 | } | 202 | } |
| 207 | if (pmc < 5) { | 203 | if (pmc < 5) { |
| 208 | /* need a counter from PMC1-4 set */ | 204 | /* need a counter from PMC1-4 set */ |
| 209 | mask |= 0x8000000000000ul; | 205 | mask |= 0x8000000000000ul; |
| 210 | value |= 0x1000000000000ul; | 206 | value |= 0x1000000000000ul; |
| 211 | } | 207 | } |
| 212 | *maskp = mask; | 208 | *maskp = mask; |
| 213 | *valp = value; | 209 | *valp = value; |
| 214 | return 0; | 210 | return 0; |
| 215 | } | 211 | } |
| 216 | 212 | ||
| 217 | #define MAX_ALT 3 /* at most 3 alternatives for any event */ | 213 | #define MAX_ALT 3 /* at most 3 alternatives for any event */ |
| 218 | 214 | ||
| 219 | static const unsigned int event_alternatives[][MAX_ALT] = { | 215 | static const unsigned int event_alternatives[][MAX_ALT] = { |
| 220 | { 0x120e4, 0x400002 }, /* PM_GRP_DISP_REJECT */ | 216 | { 0x120e4, 0x400002 }, /* PM_GRP_DISP_REJECT */ |
| 221 | { 0x410c7, 0x441084 }, /* PM_THRD_L2MISS_BOTH_CYC */ | 217 | { 0x410c7, 0x441084 }, /* PM_THRD_L2MISS_BOTH_CYC */ |
| 222 | { 0x100005, 0x600005 }, /* PM_RUN_CYC */ | 218 | { 0x100005, 0x600005 }, /* PM_RUN_CYC */ |
| 223 | { 0x100009, 0x200009, 0x500009 }, /* PM_INST_CMPL */ | 219 | { 0x100009, 0x200009, 0x500009 }, /* PM_INST_CMPL */ |
| 224 | { 0x300009, 0x400009 }, /* PM_INST_DISP */ | 220 | { 0x300009, 0x400009 }, /* PM_INST_DISP */ |
| 225 | }; | 221 | }; |
| 226 | 222 | ||
| 227 | /* | 223 | /* |
| 228 | * Scan the alternatives table for a match and return the | 224 | * Scan the alternatives table for a match and return the |
| 229 | * index into the alternatives table if found, else -1. | 225 | * index into the alternatives table if found, else -1. |
| 230 | */ | 226 | */ |
| 231 | static int find_alternative(u64 event) | 227 | static int find_alternative(u64 event) |
| 232 | { | 228 | { |
| 233 | int i, j; | 229 | int i, j; |
| 234 | 230 | ||
| 235 | for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { | 231 | for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { |
| 236 | if (event < event_alternatives[i][0]) | 232 | if (event < event_alternatives[i][0]) |
| 237 | break; | 233 | break; |
| 238 | for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j) | 234 | for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j) |
| 239 | if (event == event_alternatives[i][j]) | 235 | if (event == event_alternatives[i][j]) |
| 240 | return i; | 236 | return i; |
| 241 | } | 237 | } |
| 242 | return -1; | 238 | return -1; |
| 243 | } | 239 | } |
| 244 | 240 | ||
| 245 | static const unsigned char bytedecode_alternatives[4][4] = { | 241 | static const unsigned char bytedecode_alternatives[4][4] = { |
| 246 | /* PMC 1 */ { 0x21, 0x23, 0x25, 0x27 }, | 242 | /* PMC 1 */ { 0x21, 0x23, 0x25, 0x27 }, |
| 247 | /* PMC 2 */ { 0x07, 0x17, 0x0e, 0x1e }, | 243 | /* PMC 2 */ { 0x07, 0x17, 0x0e, 0x1e }, |
| 248 | /* PMC 3 */ { 0x20, 0x22, 0x24, 0x26 }, | 244 | /* PMC 3 */ { 0x20, 0x22, 0x24, 0x26 }, |
| 249 | /* PMC 4 */ { 0x07, 0x17, 0x0e, 0x1e } | 245 | /* PMC 4 */ { 0x07, 0x17, 0x0e, 0x1e } |
| 250 | }; | 246 | }; |
| 251 | 247 | ||
| 252 | /* | 248 | /* |
| 253 | * Some direct events for decodes of event bus byte 3 have alternative | 249 | * Some direct events for decodes of event bus byte 3 have alternative |
| 254 | * PMCSEL values on other counters. This returns the alternative | 250 | * PMCSEL values on other counters. This returns the alternative |
| 255 | * event code for those that do, or -1 otherwise. | 251 | * event code for those that do, or -1 otherwise. |
| 256 | */ | 252 | */ |
| 257 | static s64 find_alternative_bdecode(u64 event) | 253 | static s64 find_alternative_bdecode(u64 event) |
| 258 | { | 254 | { |
| 259 | int pmc, altpmc, pp, j; | 255 | int pmc, altpmc, pp, j; |
| 260 | 256 | ||
| 261 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | 257 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; |
| 262 | if (pmc == 0 || pmc > 4) | 258 | if (pmc == 0 || pmc > 4) |
| 263 | return -1; | 259 | return -1; |
| 264 | altpmc = 5 - pmc; /* 1 <-> 4, 2 <-> 3 */ | 260 | altpmc = 5 - pmc; /* 1 <-> 4, 2 <-> 3 */ |
| 265 | pp = event & PM_PMCSEL_MSK; | 261 | pp = event & PM_PMCSEL_MSK; |
| 266 | for (j = 0; j < 4; ++j) { | 262 | for (j = 0; j < 4; ++j) { |
| 267 | if (bytedecode_alternatives[pmc - 1][j] == pp) { | 263 | if (bytedecode_alternatives[pmc - 1][j] == pp) { |
| 268 | return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) | | 264 | return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) | |
| 269 | (altpmc << PM_PMC_SH) | | 265 | (altpmc << PM_PMC_SH) | |
| 270 | bytedecode_alternatives[altpmc - 1][j]; | 266 | bytedecode_alternatives[altpmc - 1][j]; |
| 271 | } | 267 | } |
| 272 | } | 268 | } |
| 273 | return -1; | 269 | return -1; |
| 274 | } | 270 | } |
| 275 | 271 | ||
| 276 | static int power5_get_alternatives(u64 event, unsigned int flags, u64 alt[]) | 272 | static int power5_get_alternatives(u64 event, unsigned int flags, u64 alt[]) |
| 277 | { | 273 | { |
| 278 | int i, j, nalt = 1; | 274 | int i, j, nalt = 1; |
| 279 | s64 ae; | 275 | s64 ae; |
| 280 | 276 | ||
| 281 | alt[0] = event; | 277 | alt[0] = event; |
| 282 | nalt = 1; | 278 | nalt = 1; |
| 283 | i = find_alternative(event); | 279 | i = find_alternative(event); |
| 284 | if (i >= 0) { | 280 | if (i >= 0) { |
| 285 | for (j = 0; j < MAX_ALT; ++j) { | 281 | for (j = 0; j < MAX_ALT; ++j) { |
| 286 | ae = event_alternatives[i][j]; | 282 | ae = event_alternatives[i][j]; |
| 287 | if (ae && ae != event) | 283 | if (ae && ae != event) |
| 288 | alt[nalt++] = ae; | 284 | alt[nalt++] = ae; |
| 289 | } | 285 | } |
| 290 | } else { | 286 | } else { |
| 291 | ae = find_alternative_bdecode(event); | 287 | ae = find_alternative_bdecode(event); |
| 292 | if (ae > 0) | 288 | if (ae > 0) |
| 293 | alt[nalt++] = ae; | 289 | alt[nalt++] = ae; |
| 294 | } | 290 | } |
| 295 | return nalt; | 291 | return nalt; |
| 296 | } | 292 | } |
| 297 | 293 | ||
| 298 | /* | 294 | /* |
| 299 | * Map of which direct events on which PMCs are marked instruction events. | 295 | * Map of which direct events on which PMCs are marked instruction events. |
| 300 | * Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event. | 296 | * Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event. |
| 301 | * Bit 0 is set if it is marked for all PMCs. | 297 | * Bit 0 is set if it is marked for all PMCs. |
| 302 | * The 0x80 bit indicates a byte decode PMCSEL value. | 298 | * The 0x80 bit indicates a byte decode PMCSEL value. |
| 303 | */ | 299 | */ |
| 304 | static unsigned char direct_event_is_marked[0x28] = { | 300 | static unsigned char direct_event_is_marked[0x28] = { |
| 305 | 0, /* 00 */ | 301 | 0, /* 00 */ |
| 306 | 0x1f, /* 01 PM_IOPS_CMPL */ | 302 | 0x1f, /* 01 PM_IOPS_CMPL */ |
| 307 | 0x2, /* 02 PM_MRK_GRP_DISP */ | 303 | 0x2, /* 02 PM_MRK_GRP_DISP */ |
| 308 | 0xe, /* 03 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */ | 304 | 0xe, /* 03 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */ |
| 309 | 0, /* 04 */ | 305 | 0, /* 04 */ |
| 310 | 0x1c, /* 05 PM_MRK_BRU_FIN, PM_MRK_INST_FIN, PM_MRK_CRU_FIN */ | 306 | 0x1c, /* 05 PM_MRK_BRU_FIN, PM_MRK_INST_FIN, PM_MRK_CRU_FIN */ |
| 311 | 0x80, /* 06 */ | 307 | 0x80, /* 06 */ |
| 312 | 0x80, /* 07 */ | 308 | 0x80, /* 07 */ |
| 313 | 0, 0, 0,/* 08 - 0a */ | 309 | 0, 0, 0,/* 08 - 0a */ |
| 314 | 0x18, /* 0b PM_THRESH_TIMEO, PM_MRK_GRP_TIMEO */ | 310 | 0x18, /* 0b PM_THRESH_TIMEO, PM_MRK_GRP_TIMEO */ |
| 315 | 0, /* 0c */ | 311 | 0, /* 0c */ |
| 316 | 0x80, /* 0d */ | 312 | 0x80, /* 0d */ |
| 317 | 0x80, /* 0e */ | 313 | 0x80, /* 0e */ |
| 318 | 0, /* 0f */ | 314 | 0, /* 0f */ |
| 319 | 0, /* 10 */ | 315 | 0, /* 10 */ |
| 320 | 0x14, /* 11 PM_MRK_GRP_BR_REDIR, PM_MRK_GRP_IC_MISS */ | 316 | 0x14, /* 11 PM_MRK_GRP_BR_REDIR, PM_MRK_GRP_IC_MISS */ |
| 321 | 0, /* 12 */ | 317 | 0, /* 12 */ |
| 322 | 0x10, /* 13 PM_MRK_GRP_CMPL */ | 318 | 0x10, /* 13 PM_MRK_GRP_CMPL */ |
| 323 | 0x1f, /* 14 PM_GRP_MRK, PM_MRK_{FXU,FPU,LSU}_FIN */ | 319 | 0x1f, /* 14 PM_GRP_MRK, PM_MRK_{FXU,FPU,LSU}_FIN */ |
| 324 | 0x2, /* 15 PM_MRK_GRP_ISSUED */ | 320 | 0x2, /* 15 PM_MRK_GRP_ISSUED */ |
| 325 | 0x80, /* 16 */ | 321 | 0x80, /* 16 */ |
| 326 | 0x80, /* 17 */ | 322 | 0x80, /* 17 */ |
| 327 | 0, 0, 0, 0, 0, | 323 | 0, 0, 0, 0, 0, |
| 328 | 0x80, /* 1d */ | 324 | 0x80, /* 1d */ |
| 329 | 0x80, /* 1e */ | 325 | 0x80, /* 1e */ |
| 330 | 0, /* 1f */ | 326 | 0, /* 1f */ |
| 331 | 0x80, /* 20 */ | 327 | 0x80, /* 20 */ |
| 332 | 0x80, /* 21 */ | 328 | 0x80, /* 21 */ |
| 333 | 0x80, /* 22 */ | 329 | 0x80, /* 22 */ |
| 334 | 0x80, /* 23 */ | 330 | 0x80, /* 23 */ |
| 335 | 0x80, /* 24 */ | 331 | 0x80, /* 24 */ |
| 336 | 0x80, /* 25 */ | 332 | 0x80, /* 25 */ |
| 337 | 0x80, /* 26 */ | 333 | 0x80, /* 26 */ |
| 338 | 0x80, /* 27 */ | 334 | 0x80, /* 27 */ |
| 339 | }; | 335 | }; |
| 340 | 336 | ||
| 341 | /* | 337 | /* |
| 342 | * Returns 1 if event counts things relating to marked instructions | 338 | * Returns 1 if event counts things relating to marked instructions |
| 343 | * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. | 339 | * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. |
| 344 | */ | 340 | */ |
| 345 | static int power5_marked_instr_event(u64 event) | 341 | static int power5_marked_instr_event(u64 event) |
| 346 | { | 342 | { |
| 347 | int pmc, psel; | 343 | int pmc, psel; |
| 348 | int bit, byte, unit; | 344 | int bit, byte, unit; |
| 349 | u32 mask; | 345 | u32 mask; |
| 350 | 346 | ||
| 351 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | 347 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; |
| 352 | psel = event & PM_PMCSEL_MSK; | 348 | psel = event & PM_PMCSEL_MSK; |
| 353 | if (pmc >= 5) | 349 | if (pmc >= 5) |
| 354 | return 0; | 350 | return 0; |
| 355 | 351 | ||
| 356 | bit = -1; | 352 | bit = -1; |
| 357 | if (psel < sizeof(direct_event_is_marked)) { | 353 | if (psel < sizeof(direct_event_is_marked)) { |
| 358 | if (direct_event_is_marked[psel] & (1 << pmc)) | 354 | if (direct_event_is_marked[psel] & (1 << pmc)) |
| 359 | return 1; | 355 | return 1; |
| 360 | if (direct_event_is_marked[psel] & 0x80) | 356 | if (direct_event_is_marked[psel] & 0x80) |
| 361 | bit = 4; | 357 | bit = 4; |
| 362 | else if (psel == 0x08) | 358 | else if (psel == 0x08) |
| 363 | bit = pmc - 1; | 359 | bit = pmc - 1; |
| 364 | else if (psel == 0x10) | 360 | else if (psel == 0x10) |
| 365 | bit = 4 - pmc; | 361 | bit = 4 - pmc; |
| 366 | else if (psel == 0x1b && (pmc == 1 || pmc == 3)) | 362 | else if (psel == 0x1b && (pmc == 1 || pmc == 3)) |
| 367 | bit = 4; | 363 | bit = 4; |
| 368 | } else if ((psel & 0x58) == 0x40) | 364 | } else if ((psel & 0x58) == 0x40) |
| 369 | bit = psel & 7; | 365 | bit = psel & 7; |
| 370 | 366 | ||
| 371 | if (!(event & PM_BUSEVENT_MSK)) | 367 | if (!(event & PM_BUSEVENT_MSK)) |
| 372 | return 0; | 368 | return 0; |
| 373 | 369 | ||
| 374 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; | 370 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; |
| 375 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; | 371 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; |
| 376 | if (unit == PM_LSU0) { | 372 | if (unit == PM_LSU0) { |
| 377 | /* byte 1 bits 0-7, byte 2 bits 0,2-4,6 */ | 373 | /* byte 1 bits 0-7, byte 2 bits 0,2-4,6 */ |
| 378 | mask = 0x5dff00; | 374 | mask = 0x5dff00; |
| 379 | } else if (unit == PM_LSU1 && byte >= 4) { | 375 | } else if (unit == PM_LSU1 && byte >= 4) { |
| 380 | byte -= 4; | 376 | byte -= 4; |
| 381 | /* byte 4 bits 1,3,5,7, byte 5 bits 6-7, byte 7 bits 0-4,6 */ | 377 | /* byte 4 bits 1,3,5,7, byte 5 bits 6-7, byte 7 bits 0-4,6 */ |
| 382 | mask = 0x5f00c0aa; | 378 | mask = 0x5f00c0aa; |
| 383 | } else | 379 | } else |
| 384 | return 0; | 380 | return 0; |
| 385 | 381 | ||
| 386 | return (mask >> (byte * 8 + bit)) & 1; | 382 | return (mask >> (byte * 8 + bit)) & 1; |
| 387 | } | 383 | } |
| 388 | 384 | ||
| 389 | static int power5_compute_mmcr(u64 event[], int n_ev, | 385 | static int power5_compute_mmcr(u64 event[], int n_ev, |
| 390 | unsigned int hwc[], unsigned long mmcr[]) | 386 | unsigned int hwc[], unsigned long mmcr[]) |
| 391 | { | 387 | { |
| 392 | unsigned long mmcr1 = 0; | 388 | unsigned long mmcr1 = 0; |
| 393 | unsigned long mmcra = 0; | 389 | unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; |
| 394 | unsigned int pmc, unit, byte, psel; | 390 | unsigned int pmc, unit, byte, psel; |
| 395 | unsigned int ttm, grp; | 391 | unsigned int ttm, grp; |
| 396 | int i, isbus, bit, grsel; | 392 | int i, isbus, bit, grsel; |
| 397 | unsigned int pmc_inuse = 0; | 393 | unsigned int pmc_inuse = 0; |
| 398 | unsigned int pmc_grp_use[2]; | 394 | unsigned int pmc_grp_use[2]; |
| 399 | unsigned char busbyte[4]; | 395 | unsigned char busbyte[4]; |
| 400 | unsigned char unituse[16]; | 396 | unsigned char unituse[16]; |
| 401 | int ttmuse; | 397 | int ttmuse; |
| 402 | 398 | ||
| 403 | if (n_ev > 6) | 399 | if (n_ev > 6) |
| 404 | return -1; | 400 | return -1; |
| 405 | 401 | ||
| 406 | /* First pass to count resource use */ | 402 | /* First pass to count resource use */ |
| 407 | pmc_grp_use[0] = pmc_grp_use[1] = 0; | 403 | pmc_grp_use[0] = pmc_grp_use[1] = 0; |
| 408 | memset(busbyte, 0, sizeof(busbyte)); | 404 | memset(busbyte, 0, sizeof(busbyte)); |
| 409 | memset(unituse, 0, sizeof(unituse)); | 405 | memset(unituse, 0, sizeof(unituse)); |
| 410 | for (i = 0; i < n_ev; ++i) { | 406 | for (i = 0; i < n_ev; ++i) { |
| 411 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; | 407 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; |
| 412 | if (pmc) { | 408 | if (pmc) { |
| 413 | if (pmc > 6) | 409 | if (pmc > 6) |
| 414 | return -1; | 410 | return -1; |
| 415 | if (pmc_inuse & (1 << (pmc - 1))) | 411 | if (pmc_inuse & (1 << (pmc - 1))) |
| 416 | return -1; | 412 | return -1; |
| 417 | pmc_inuse |= 1 << (pmc - 1); | 413 | pmc_inuse |= 1 << (pmc - 1); |
| 418 | /* count 1/2 vs 3/4 use */ | 414 | /* count 1/2 vs 3/4 use */ |
| 419 | if (pmc <= 4) | 415 | if (pmc <= 4) |
| 420 | ++pmc_grp_use[(pmc - 1) >> 1]; | 416 | ++pmc_grp_use[(pmc - 1) >> 1]; |
| 421 | } | 417 | } |
| 422 | if (event[i] & PM_BUSEVENT_MSK) { | 418 | if (event[i] & PM_BUSEVENT_MSK) { |
| 423 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; | 419 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; |
| 424 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; | 420 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; |
| 425 | if (unit > PM_LASTUNIT) | 421 | if (unit > PM_LASTUNIT) |
| 426 | return -1; | 422 | return -1; |
| 427 | if (unit == PM_ISU0_ALT) | 423 | if (unit == PM_ISU0_ALT) |
| 428 | unit = PM_ISU0; | 424 | unit = PM_ISU0; |
| 429 | if (byte >= 4) { | 425 | if (byte >= 4) { |
| 430 | if (unit != PM_LSU1) | 426 | if (unit != PM_LSU1) |
| 431 | return -1; | 427 | return -1; |
| 432 | ++unit; | 428 | ++unit; |
| 433 | byte &= 3; | 429 | byte &= 3; |
| 434 | } | 430 | } |
| 435 | if (!pmc) | 431 | if (!pmc) |
| 436 | ++pmc_grp_use[byte & 1]; | 432 | ++pmc_grp_use[byte & 1]; |
| 437 | if (busbyte[byte] && busbyte[byte] != unit) | 433 | if (busbyte[byte] && busbyte[byte] != unit) |
| 438 | return -1; | 434 | return -1; |
| 439 | busbyte[byte] = unit; | 435 | busbyte[byte] = unit; |
| 440 | unituse[unit] = 1; | 436 | unituse[unit] = 1; |
| 441 | } | 437 | } |
| 442 | } | 438 | } |
| 443 | if (pmc_grp_use[0] > 2 || pmc_grp_use[1] > 2) | 439 | if (pmc_grp_use[0] > 2 || pmc_grp_use[1] > 2) |
| 444 | return -1; | 440 | return -1; |
| 445 | 441 | ||
| 446 | /* | 442 | /* |
| 447 | * Assign resources and set multiplexer selects. | 443 | * Assign resources and set multiplexer selects. |
| 448 | * | 444 | * |
| 449 | * PM_ISU0 can go either on TTM0 or TTM1, but that's the only | 445 | * PM_ISU0 can go either on TTM0 or TTM1, but that's the only |
| 450 | * choice we have to deal with. | 446 | * choice we have to deal with. |
| 451 | */ | 447 | */ |
| 452 | if (unituse[PM_ISU0] & | 448 | if (unituse[PM_ISU0] & |
| 453 | (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_ISU1])) { | 449 | (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_ISU1])) { |
| 454 | unituse[PM_ISU0_ALT] = 1; /* move ISU to TTM1 */ | 450 | unituse[PM_ISU0_ALT] = 1; /* move ISU to TTM1 */ |
| 455 | unituse[PM_ISU0] = 0; | 451 | unituse[PM_ISU0] = 0; |
| 456 | } | 452 | } |
| 457 | /* Set TTM[01]SEL fields. */ | 453 | /* Set TTM[01]SEL fields. */ |
| 458 | ttmuse = 0; | 454 | ttmuse = 0; |
| 459 | for (i = PM_FPU; i <= PM_ISU1; ++i) { | 455 | for (i = PM_FPU; i <= PM_ISU1; ++i) { |
| 460 | if (!unituse[i]) | 456 | if (!unituse[i]) |
| 461 | continue; | 457 | continue; |
| 462 | if (ttmuse++) | 458 | if (ttmuse++) |
| 463 | return -1; | 459 | return -1; |
| 464 | mmcr1 |= (unsigned long)i << MMCR1_TTM0SEL_SH; | 460 | mmcr1 |= (unsigned long)i << MMCR1_TTM0SEL_SH; |
| 465 | } | 461 | } |
| 466 | ttmuse = 0; | 462 | ttmuse = 0; |
| 467 | for (; i <= PM_GRS; ++i) { | 463 | for (; i <= PM_GRS; ++i) { |
| 468 | if (!unituse[i]) | 464 | if (!unituse[i]) |
| 469 | continue; | 465 | continue; |
| 470 | if (ttmuse++) | 466 | if (ttmuse++) |
| 471 | return -1; | 467 | return -1; |
| 472 | mmcr1 |= (unsigned long)(i & 3) << MMCR1_TTM1SEL_SH; | 468 | mmcr1 |= (unsigned long)(i & 3) << MMCR1_TTM1SEL_SH; |
| 473 | } | 469 | } |
| 474 | if (ttmuse > 1) | 470 | if (ttmuse > 1) |
| 475 | return -1; | 471 | return -1; |
| 476 | 472 | ||
| 477 | /* Set byte lane select fields, TTM[23]SEL and GRS_*SEL. */ | 473 | /* Set byte lane select fields, TTM[23]SEL and GRS_*SEL. */ |
| 478 | for (byte = 0; byte < 4; ++byte) { | 474 | for (byte = 0; byte < 4; ++byte) { |
| 479 | unit = busbyte[byte]; | 475 | unit = busbyte[byte]; |
| 480 | if (!unit) | 476 | if (!unit) |
| 481 | continue; | 477 | continue; |
| 482 | if (unit == PM_ISU0 && unituse[PM_ISU0_ALT]) { | 478 | if (unit == PM_ISU0 && unituse[PM_ISU0_ALT]) { |
| 483 | /* get ISU0 through TTM1 rather than TTM0 */ | 479 | /* get ISU0 through TTM1 rather than TTM0 */ |
| 484 | unit = PM_ISU0_ALT; | 480 | unit = PM_ISU0_ALT; |
| 485 | } else if (unit == PM_LSU1 + 1) { | 481 | } else if (unit == PM_LSU1 + 1) { |
| 486 | /* select lower word of LSU1 for this byte */ | 482 | /* select lower word of LSU1 for this byte */ |
| 487 | mmcr1 |= 1ul << (MMCR1_TTM3SEL_SH + 3 - byte); | 483 | mmcr1 |= 1ul << (MMCR1_TTM3SEL_SH + 3 - byte); |
| 488 | } | 484 | } |
| 489 | ttm = unit >> 2; | 485 | ttm = unit >> 2; |
| 490 | mmcr1 |= (unsigned long)ttm | 486 | mmcr1 |= (unsigned long)ttm |
| 491 | << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); | 487 | << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); |
| 492 | } | 488 | } |
| 493 | 489 | ||
| 494 | /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ | 490 | /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ |
| 495 | for (i = 0; i < n_ev; ++i) { | 491 | for (i = 0; i < n_ev; ++i) { |
| 496 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; | 492 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; |
| 497 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; | 493 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; |
| 498 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; | 494 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; |
| 499 | psel = event[i] & PM_PMCSEL_MSK; | 495 | psel = event[i] & PM_PMCSEL_MSK; |
| 500 | isbus = event[i] & PM_BUSEVENT_MSK; | 496 | isbus = event[i] & PM_BUSEVENT_MSK; |
| 501 | if (!pmc) { | 497 | if (!pmc) { |
| 502 | /* Bus event or any-PMC direct event */ | 498 | /* Bus event or any-PMC direct event */ |
| 503 | for (pmc = 0; pmc < 4; ++pmc) { | 499 | for (pmc = 0; pmc < 4; ++pmc) { |
| 504 | if (pmc_inuse & (1 << pmc)) | 500 | if (pmc_inuse & (1 << pmc)) |
| 505 | continue; | 501 | continue; |
| 506 | grp = (pmc >> 1) & 1; | 502 | grp = (pmc >> 1) & 1; |
| 507 | if (isbus) { | 503 | if (isbus) { |
| 508 | if (grp == (byte & 1)) | 504 | if (grp == (byte & 1)) |
| 509 | break; | 505 | break; |
| 510 | } else if (pmc_grp_use[grp] < 2) { | 506 | } else if (pmc_grp_use[grp] < 2) { |
| 511 | ++pmc_grp_use[grp]; | 507 | ++pmc_grp_use[grp]; |
| 512 | break; | 508 | break; |
| 513 | } | 509 | } |
| 514 | } | 510 | } |
| 515 | pmc_inuse |= 1 << pmc; | 511 | pmc_inuse |= 1 << pmc; |
| 516 | } else if (pmc <= 4) { | 512 | } else if (pmc <= 4) { |
| 517 | /* Direct event */ | 513 | /* Direct event */ |
| 518 | --pmc; | 514 | --pmc; |
| 519 | if ((psel == 8 || psel == 0x10) && isbus && (byte & 2)) | 515 | if ((psel == 8 || psel == 0x10) && isbus && (byte & 2)) |
| 520 | /* add events on higher-numbered bus */ | 516 | /* add events on higher-numbered bus */ |
| 521 | mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc); | 517 | mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc); |
| 522 | } else { | 518 | } else { |
| 523 | /* Instructions or run cycles on PMC5/6 */ | 519 | /* Instructions or run cycles on PMC5/6 */ |
| 524 | --pmc; | 520 | --pmc; |
| 525 | } | 521 | } |
| 526 | if (isbus && unit == PM_GRS) { | 522 | if (isbus && unit == PM_GRS) { |
| 527 | bit = psel & 7; | 523 | bit = psel & 7; |
| 528 | grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; | 524 | grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; |
| 529 | mmcr1 |= (unsigned long)grsel << grsel_shift[bit]; | 525 | mmcr1 |= (unsigned long)grsel << grsel_shift[bit]; |
| 530 | } | 526 | } |
| 531 | if (power5_marked_instr_event(event[i])) | 527 | if (power5_marked_instr_event(event[i])) |
| 532 | mmcra |= MMCRA_SAMPLE_ENABLE; | 528 | mmcra |= MMCRA_SAMPLE_ENABLE; |
| 533 | if (pmc <= 3) | 529 | if (pmc <= 3) |
| 534 | mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc); | 530 | mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc); |
| 535 | hwc[i] = pmc; | 531 | hwc[i] = pmc; |
| 536 | } | 532 | } |
| 537 | 533 | ||
| 538 | /* Return MMCRx values */ | 534 | /* Return MMCRx values */ |
| 539 | mmcr[0] = 0; | 535 | mmcr[0] = 0; |
| 540 | if (pmc_inuse & 1) | 536 | if (pmc_inuse & 1) |
| 541 | mmcr[0] = MMCR0_PMC1CE; | 537 | mmcr[0] = MMCR0_PMC1CE; |
| 542 | if (pmc_inuse & 0x3e) | 538 | if (pmc_inuse & 0x3e) |
| 543 | mmcr[0] |= MMCR0_PMCjCE; | 539 | mmcr[0] |= MMCR0_PMCjCE; |
| 544 | mmcr[1] = mmcr1; | 540 | mmcr[1] = mmcr1; |
| 545 | mmcr[2] = mmcra; | 541 | mmcr[2] = mmcra; |
| 546 | return 0; | 542 | return 0; |
| 547 | } | 543 | } |
| 548 | 544 | ||
| 549 | static void power5_disable_pmc(unsigned int pmc, unsigned long mmcr[]) | 545 | static void power5_disable_pmc(unsigned int pmc, unsigned long mmcr[]) |
| 550 | { | 546 | { |
| 551 | if (pmc <= 3) | 547 | if (pmc <= 3) |
| 552 | mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc)); | 548 | mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc)); |
| 553 | } | 549 | } |
| 554 | 550 | ||
| 555 | static int power5_generic_events[] = { | 551 | static int power5_generic_events[] = { |
| 556 | [PERF_COUNT_HW_CPU_CYCLES] = 0xf, | 552 | [PERF_COUNT_HW_CPU_CYCLES] = 0xf, |
| 557 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x100009, | 553 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x100009, |
| 558 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4c1090, /* LD_REF_L1 */ | 554 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4c1090, /* LD_REF_L1 */ |
| 559 | [PERF_COUNT_HW_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */ | 555 | [PERF_COUNT_HW_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */ |
| 560 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */ | 556 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */ |
| 561 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */ | 557 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */ |
| 562 | }; | 558 | }; |
| 563 | 559 | ||
| 564 | #define C(x) PERF_COUNT_HW_CACHE_##x | 560 | #define C(x) PERF_COUNT_HW_CACHE_##x |
| 565 | 561 | ||
| 566 | /* | 562 | /* |
| 567 | * Table of generalized cache-related events. | 563 | * Table of generalized cache-related events. |
| 568 | * 0 means not supported, -1 means nonsensical, other values | 564 | * 0 means not supported, -1 means nonsensical, other values |
| 569 | * are event codes. | 565 | * are event codes. |
| 570 | */ | 566 | */ |
| 571 | static int power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | 567 | static int power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { |
| 572 | [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ | 568 | [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 573 | [C(OP_READ)] = { 0x4c1090, 0x3c1088 }, | 569 | [C(OP_READ)] = { 0x4c1090, 0x3c1088 }, |
| 574 | [C(OP_WRITE)] = { 0x3c1090, 0xc10c3 }, | 570 | [C(OP_WRITE)] = { 0x3c1090, 0xc10c3 }, |
| 575 | [C(OP_PREFETCH)] = { 0xc70e7, 0 }, | 571 | [C(OP_PREFETCH)] = { 0xc70e7, 0 }, |
| 576 | }, | 572 | }, |
| 577 | [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ | 573 | [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 578 | [C(OP_READ)] = { 0, 0 }, | 574 | [C(OP_READ)] = { 0, 0 }, |
| 579 | [C(OP_WRITE)] = { -1, -1 }, | 575 | [C(OP_WRITE)] = { -1, -1 }, |
| 580 | [C(OP_PREFETCH)] = { 0, 0 }, | 576 | [C(OP_PREFETCH)] = { 0, 0 }, |
| 581 | }, | 577 | }, |
| 582 | [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ | 578 | [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 583 | [C(OP_READ)] = { 0, 0x3c309b }, | 579 | [C(OP_READ)] = { 0, 0x3c309b }, |
| 584 | [C(OP_WRITE)] = { 0, 0 }, | 580 | [C(OP_WRITE)] = { 0, 0 }, |
| 585 | [C(OP_PREFETCH)] = { 0xc50c3, 0 }, | 581 | [C(OP_PREFETCH)] = { 0xc50c3, 0 }, |
| 586 | }, | 582 | }, |
| 587 | [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ | 583 | [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 588 | [C(OP_READ)] = { 0x2c4090, 0x800c4 }, | 584 | [C(OP_READ)] = { 0x2c4090, 0x800c4 }, |
| 589 | [C(OP_WRITE)] = { -1, -1 }, | 585 | [C(OP_WRITE)] = { -1, -1 }, |
| 590 | [C(OP_PREFETCH)] = { -1, -1 }, | 586 | [C(OP_PREFETCH)] = { -1, -1 }, |
| 591 | }, | 587 | }, |
| 592 | [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ | 588 | [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 593 | [C(OP_READ)] = { 0, 0x800c0 }, | 589 | [C(OP_READ)] = { 0, 0x800c0 }, |
| 594 | [C(OP_WRITE)] = { -1, -1 }, | 590 | [C(OP_WRITE)] = { -1, -1 }, |
| 595 | [C(OP_PREFETCH)] = { -1, -1 }, | 591 | [C(OP_PREFETCH)] = { -1, -1 }, |
| 596 | }, | 592 | }, |
| 597 | [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ | 593 | [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 598 | [C(OP_READ)] = { 0x230e4, 0x230e5 }, | 594 | [C(OP_READ)] = { 0x230e4, 0x230e5 }, |
| 599 | [C(OP_WRITE)] = { -1, -1 }, | 595 | [C(OP_WRITE)] = { -1, -1 }, |
| 600 | [C(OP_PREFETCH)] = { -1, -1 }, | 596 | [C(OP_PREFETCH)] = { -1, -1 }, |
| 601 | }, | 597 | }, |
| 602 | }; | 598 | }; |
| 603 | 599 | ||
| 604 | static struct power_pmu power5_pmu = { | 600 | static struct power_pmu power5_pmu = { |
| 605 | .name = "POWER5", | 601 | .name = "POWER5", |
| 606 | .n_counter = 6, | 602 | .n_counter = 6, |
| 607 | .max_alternatives = MAX_ALT, | 603 | .max_alternatives = MAX_ALT, |
| 608 | .add_fields = 0x7000090000555ul, | 604 | .add_fields = 0x7000090000555ul, |
| 609 | .test_adder = 0x3000490000000ul, | 605 | .test_adder = 0x3000490000000ul, |
| 610 | .compute_mmcr = power5_compute_mmcr, | 606 | .compute_mmcr = power5_compute_mmcr, |
| 611 | .get_constraint = power5_get_constraint, | 607 | .get_constraint = power5_get_constraint, |
| 612 | .get_alternatives = power5_get_alternatives, | 608 | .get_alternatives = power5_get_alternatives, |
| 613 | .disable_pmc = power5_disable_pmc, | 609 | .disable_pmc = power5_disable_pmc, |
| 614 | .n_generic = ARRAY_SIZE(power5_generic_events), | 610 | .n_generic = ARRAY_SIZE(power5_generic_events), |
| 615 | .generic_events = power5_generic_events, | 611 | .generic_events = power5_generic_events, |
| 616 | .cache_events = &power5_cache_events, | 612 | .cache_events = &power5_cache_events, |
| 617 | }; | 613 | }; |
| 618 | 614 | ||
| 619 | static int init_power5_pmu(void) | 615 | static int init_power5_pmu(void) |
| 620 | { | 616 | { |
| 621 | if (!cur_cpu_spec->oprofile_cpu_type || | 617 | if (!cur_cpu_spec->oprofile_cpu_type || |
| 622 | strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5")) | 618 | strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5")) |
| 623 | return -ENODEV; | 619 | return -ENODEV; |
| 624 | 620 | ||
| 625 | return register_power_pmu(&power5_pmu); | 621 | return register_power_pmu(&power5_pmu); |
| 626 | } | 622 | } |
| 627 | 623 | ||
| 628 | arch_initcall(init_power5_pmu); | 624 | arch_initcall(init_power5_pmu); |
| 629 | 625 |
arch/powerpc/kernel/power6-pmu.c
| 1 | /* | 1 | /* |
| 2 | * Performance counter support for POWER6 processors. | 2 | * Performance counter support for POWER6 processors. |
| 3 | * | 3 | * |
| 4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | 4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License | 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ | 10 | */ |
| 11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
| 12 | #include <linux/perf_event.h> | 12 | #include <linux/perf_event.h> |
| 13 | #include <linux/string.h> | 13 | #include <linux/string.h> |
| 14 | #include <asm/reg.h> | 14 | #include <asm/reg.h> |
| 15 | #include <asm/cputable.h> | 15 | #include <asm/cputable.h> |
| 16 | 16 | ||
| 17 | /* | 17 | /* |
| 18 | * Bits in event code for POWER6 | 18 | * Bits in event code for POWER6 |
| 19 | */ | 19 | */ |
| 20 | #define PM_PMC_SH 20 /* PMC number (1-based) for direct events */ | 20 | #define PM_PMC_SH 20 /* PMC number (1-based) for direct events */ |
| 21 | #define PM_PMC_MSK 0x7 | 21 | #define PM_PMC_MSK 0x7 |
| 22 | #define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) | 22 | #define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) |
| 23 | #define PM_UNIT_SH 16 /* Unit event comes (TTMxSEL encoding) */ | 23 | #define PM_UNIT_SH 16 /* Unit event comes (TTMxSEL encoding) */ |
| 24 | #define PM_UNIT_MSK 0xf | 24 | #define PM_UNIT_MSK 0xf |
| 25 | #define PM_UNIT_MSKS (PM_UNIT_MSK << PM_UNIT_SH) | 25 | #define PM_UNIT_MSKS (PM_UNIT_MSK << PM_UNIT_SH) |
| 26 | #define PM_LLAV 0x8000 /* Load lookahead match value */ | 26 | #define PM_LLAV 0x8000 /* Load lookahead match value */ |
| 27 | #define PM_LLA 0x4000 /* Load lookahead match enable */ | 27 | #define PM_LLA 0x4000 /* Load lookahead match enable */ |
| 28 | #define PM_BYTE_SH 12 /* Byte of event bus to use */ | 28 | #define PM_BYTE_SH 12 /* Byte of event bus to use */ |
| 29 | #define PM_BYTE_MSK 3 | 29 | #define PM_BYTE_MSK 3 |
| 30 | #define PM_SUBUNIT_SH 8 /* Subunit event comes from (NEST_SEL enc.) */ | 30 | #define PM_SUBUNIT_SH 8 /* Subunit event comes from (NEST_SEL enc.) */ |
| 31 | #define PM_SUBUNIT_MSK 7 | 31 | #define PM_SUBUNIT_MSK 7 |
| 32 | #define PM_SUBUNIT_MSKS (PM_SUBUNIT_MSK << PM_SUBUNIT_SH) | 32 | #define PM_SUBUNIT_MSKS (PM_SUBUNIT_MSK << PM_SUBUNIT_SH) |
| 33 | #define PM_PMCSEL_MSK 0xff /* PMCxSEL value */ | 33 | #define PM_PMCSEL_MSK 0xff /* PMCxSEL value */ |
| 34 | #define PM_BUSEVENT_MSK 0xf3700 | 34 | #define PM_BUSEVENT_MSK 0xf3700 |
| 35 | 35 | ||
| 36 | /* | 36 | /* |
| 37 | * Bits in MMCR1 for POWER6 | 37 | * Bits in MMCR1 for POWER6 |
| 38 | */ | 38 | */ |
| 39 | #define MMCR1_TTM0SEL_SH 60 | 39 | #define MMCR1_TTM0SEL_SH 60 |
| 40 | #define MMCR1_TTMSEL_SH(n) (MMCR1_TTM0SEL_SH - (n) * 4) | 40 | #define MMCR1_TTMSEL_SH(n) (MMCR1_TTM0SEL_SH - (n) * 4) |
| 41 | #define MMCR1_TTMSEL_MSK 0xf | 41 | #define MMCR1_TTMSEL_MSK 0xf |
| 42 | #define MMCR1_TTMSEL(m, n) (((m) >> MMCR1_TTMSEL_SH(n)) & MMCR1_TTMSEL_MSK) | 42 | #define MMCR1_TTMSEL(m, n) (((m) >> MMCR1_TTMSEL_SH(n)) & MMCR1_TTMSEL_MSK) |
| 43 | #define MMCR1_NESTSEL_SH 45 | 43 | #define MMCR1_NESTSEL_SH 45 |
| 44 | #define MMCR1_NESTSEL_MSK 0x7 | 44 | #define MMCR1_NESTSEL_MSK 0x7 |
| 45 | #define MMCR1_NESTSEL(m) (((m) >> MMCR1_NESTSEL_SH) & MMCR1_NESTSEL_MSK) | 45 | #define MMCR1_NESTSEL(m) (((m) >> MMCR1_NESTSEL_SH) & MMCR1_NESTSEL_MSK) |
| 46 | #define MMCR1_PMC1_LLA (1ul << 44) | 46 | #define MMCR1_PMC1_LLA (1ul << 44) |
| 47 | #define MMCR1_PMC1_LLA_VALUE (1ul << 39) | 47 | #define MMCR1_PMC1_LLA_VALUE (1ul << 39) |
| 48 | #define MMCR1_PMC1_ADDR_SEL (1ul << 35) | 48 | #define MMCR1_PMC1_ADDR_SEL (1ul << 35) |
| 49 | #define MMCR1_PMC1SEL_SH 24 | 49 | #define MMCR1_PMC1SEL_SH 24 |
| 50 | #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) | 50 | #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) |
| 51 | #define MMCR1_PMCSEL_MSK 0xff | 51 | #define MMCR1_PMCSEL_MSK 0xff |
| 52 | 52 | ||
| 53 | /* | 53 | /* |
| 54 | * Map of which direct events on which PMCs are marked instruction events. | 54 | * Map of which direct events on which PMCs are marked instruction events. |
| 55 | * Indexed by PMCSEL value >> 1. | 55 | * Indexed by PMCSEL value >> 1. |
| 56 | * Bottom 4 bits are a map of which PMCs are interesting, | 56 | * Bottom 4 bits are a map of which PMCs are interesting, |
| 57 | * top 4 bits say what sort of event: | 57 | * top 4 bits say what sort of event: |
| 58 | * 0 = direct marked event, | 58 | * 0 = direct marked event, |
| 59 | * 1 = byte decode event, | 59 | * 1 = byte decode event, |
| 60 | * 4 = add/and event (PMC1 -> bits 0 & 4), | 60 | * 4 = add/and event (PMC1 -> bits 0 & 4), |
| 61 | * 5 = add/and event (PMC1 -> bits 1 & 5), | 61 | * 5 = add/and event (PMC1 -> bits 1 & 5), |
| 62 | * 6 = add/and event (PMC1 -> bits 2 & 6), | 62 | * 6 = add/and event (PMC1 -> bits 2 & 6), |
| 63 | * 7 = add/and event (PMC1 -> bits 3 & 7). | 63 | * 7 = add/and event (PMC1 -> bits 3 & 7). |
| 64 | */ | 64 | */ |
| 65 | static unsigned char direct_event_is_marked[0x60 >> 1] = { | 65 | static unsigned char direct_event_is_marked[0x60 >> 1] = { |
| 66 | 0, /* 00 */ | 66 | 0, /* 00 */ |
| 67 | 0, /* 02 */ | 67 | 0, /* 02 */ |
| 68 | 0, /* 04 */ | 68 | 0, /* 04 */ |
| 69 | 0x07, /* 06 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */ | 69 | 0x07, /* 06 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */ |
| 70 | 0x04, /* 08 PM_MRK_DFU_FIN */ | 70 | 0x04, /* 08 PM_MRK_DFU_FIN */ |
| 71 | 0x06, /* 0a PM_MRK_IFU_FIN, PM_MRK_INST_FIN */ | 71 | 0x06, /* 0a PM_MRK_IFU_FIN, PM_MRK_INST_FIN */ |
| 72 | 0, /* 0c */ | 72 | 0, /* 0c */ |
| 73 | 0, /* 0e */ | 73 | 0, /* 0e */ |
| 74 | 0x02, /* 10 PM_MRK_INST_DISP */ | 74 | 0x02, /* 10 PM_MRK_INST_DISP */ |
| 75 | 0x08, /* 12 PM_MRK_LSU_DERAT_MISS */ | 75 | 0x08, /* 12 PM_MRK_LSU_DERAT_MISS */ |
| 76 | 0, /* 14 */ | 76 | 0, /* 14 */ |
| 77 | 0, /* 16 */ | 77 | 0, /* 16 */ |
| 78 | 0x0c, /* 18 PM_THRESH_TIMEO, PM_MRK_INST_FIN */ | 78 | 0x0c, /* 18 PM_THRESH_TIMEO, PM_MRK_INST_FIN */ |
| 79 | 0x0f, /* 1a PM_MRK_INST_DISP, PM_MRK_{FXU,FPU,LSU}_FIN */ | 79 | 0x0f, /* 1a PM_MRK_INST_DISP, PM_MRK_{FXU,FPU,LSU}_FIN */ |
| 80 | 0x01, /* 1c PM_MRK_INST_ISSUED */ | 80 | 0x01, /* 1c PM_MRK_INST_ISSUED */ |
| 81 | 0, /* 1e */ | 81 | 0, /* 1e */ |
| 82 | 0, /* 20 */ | 82 | 0, /* 20 */ |
| 83 | 0, /* 22 */ | 83 | 0, /* 22 */ |
| 84 | 0, /* 24 */ | 84 | 0, /* 24 */ |
| 85 | 0, /* 26 */ | 85 | 0, /* 26 */ |
| 86 | 0x15, /* 28 PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L3MISS */ | 86 | 0x15, /* 28 PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L3MISS */ |
| 87 | 0, /* 2a */ | 87 | 0, /* 2a */ |
| 88 | 0, /* 2c */ | 88 | 0, /* 2c */ |
| 89 | 0, /* 2e */ | 89 | 0, /* 2e */ |
| 90 | 0x4f, /* 30 */ | 90 | 0x4f, /* 30 */ |
| 91 | 0x7f, /* 32 */ | 91 | 0x7f, /* 32 */ |
| 92 | 0x4f, /* 34 */ | 92 | 0x4f, /* 34 */ |
| 93 | 0x5f, /* 36 */ | 93 | 0x5f, /* 36 */ |
| 94 | 0x6f, /* 38 */ | 94 | 0x6f, /* 38 */ |
| 95 | 0x4f, /* 3a */ | 95 | 0x4f, /* 3a */ |
| 96 | 0, /* 3c */ | 96 | 0, /* 3c */ |
| 97 | 0x08, /* 3e PM_MRK_INST_TIMEO */ | 97 | 0x08, /* 3e PM_MRK_INST_TIMEO */ |
| 98 | 0x1f, /* 40 */ | 98 | 0x1f, /* 40 */ |
| 99 | 0x1f, /* 42 */ | 99 | 0x1f, /* 42 */ |
| 100 | 0x1f, /* 44 */ | 100 | 0x1f, /* 44 */ |
| 101 | 0x1f, /* 46 */ | 101 | 0x1f, /* 46 */ |
| 102 | 0x1f, /* 48 */ | 102 | 0x1f, /* 48 */ |
| 103 | 0x1f, /* 4a */ | 103 | 0x1f, /* 4a */ |
| 104 | 0x1f, /* 4c */ | 104 | 0x1f, /* 4c */ |
| 105 | 0x1f, /* 4e */ | 105 | 0x1f, /* 4e */ |
| 106 | 0, /* 50 */ | 106 | 0, /* 50 */ |
| 107 | 0x05, /* 52 PM_MRK_BR_TAKEN, PM_MRK_BR_MPRED */ | 107 | 0x05, /* 52 PM_MRK_BR_TAKEN, PM_MRK_BR_MPRED */ |
| 108 | 0x1c, /* 54 PM_MRK_PTEG_FROM_L3MISS, PM_MRK_PTEG_FROM_L2MISS */ | 108 | 0x1c, /* 54 PM_MRK_PTEG_FROM_L3MISS, PM_MRK_PTEG_FROM_L2MISS */ |
| 109 | 0x02, /* 56 PM_MRK_LD_MISS_L1 */ | 109 | 0x02, /* 56 PM_MRK_LD_MISS_L1 */ |
| 110 | 0, /* 58 */ | 110 | 0, /* 58 */ |
| 111 | 0, /* 5a */ | 111 | 0, /* 5a */ |
| 112 | 0, /* 5c */ | 112 | 0, /* 5c */ |
| 113 | 0, /* 5e */ | 113 | 0, /* 5e */ |
| 114 | }; | 114 | }; |
| 115 | 115 | ||
| 116 | /* | 116 | /* |
| 117 | * Masks showing for each unit which bits are marked events. | 117 | * Masks showing for each unit which bits are marked events. |
| 118 | * These masks are in LE order, i.e. 0x00000001 is byte 0, bit 0. | 118 | * These masks are in LE order, i.e. 0x00000001 is byte 0, bit 0. |
| 119 | */ | 119 | */ |
| 120 | static u32 marked_bus_events[16] = { | 120 | static u32 marked_bus_events[16] = { |
| 121 | 0x01000000, /* direct events set 1: byte 3 bit 0 */ | 121 | 0x01000000, /* direct events set 1: byte 3 bit 0 */ |
| 122 | 0x00010000, /* direct events set 2: byte 2 bit 0 */ | 122 | 0x00010000, /* direct events set 2: byte 2 bit 0 */ |
| 123 | 0, 0, 0, 0, /* IDU, IFU, nest: nothing */ | 123 | 0, 0, 0, 0, /* IDU, IFU, nest: nothing */ |
| 124 | 0x00000088, /* VMX set 1: byte 0 bits 3, 7 */ | 124 | 0x00000088, /* VMX set 1: byte 0 bits 3, 7 */ |
| 125 | 0x000000c0, /* VMX set 2: byte 0 bits 4-7 */ | 125 | 0x000000c0, /* VMX set 2: byte 0 bits 4-7 */ |
| 126 | 0x04010000, /* LSU set 1: byte 2 bit 0, byte 3 bit 2 */ | 126 | 0x04010000, /* LSU set 1: byte 2 bit 0, byte 3 bit 2 */ |
| 127 | 0xff010000u, /* LSU set 2: byte 2 bit 0, all of byte 3 */ | 127 | 0xff010000u, /* LSU set 2: byte 2 bit 0, all of byte 3 */ |
| 128 | 0, /* LSU set 3 */ | 128 | 0, /* LSU set 3 */ |
| 129 | 0x00000010, /* VMX set 3: byte 0 bit 4 */ | 129 | 0x00000010, /* VMX set 3: byte 0 bit 4 */ |
| 130 | 0, /* BFP set 1 */ | 130 | 0, /* BFP set 1 */ |
| 131 | 0x00000022, /* BFP set 2: byte 0 bits 1, 5 */ | 131 | 0x00000022, /* BFP set 2: byte 0 bits 1, 5 */ |
| 132 | 0, 0 | 132 | 0, 0 |
| 133 | }; | 133 | }; |
| 134 | 134 | ||
| 135 | /* | 135 | /* |
| 136 | * Returns 1 if event counts things relating to marked instructions | 136 | * Returns 1 if event counts things relating to marked instructions |
| 137 | * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. | 137 | * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. |
| 138 | */ | 138 | */ |
| 139 | static int power6_marked_instr_event(u64 event) | 139 | static int power6_marked_instr_event(u64 event) |
| 140 | { | 140 | { |
| 141 | int pmc, psel, ptype; | 141 | int pmc, psel, ptype; |
| 142 | int bit, byte, unit; | 142 | int bit, byte, unit; |
| 143 | u32 mask; | 143 | u32 mask; |
| 144 | 144 | ||
| 145 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | 145 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; |
| 146 | psel = (event & PM_PMCSEL_MSK) >> 1; /* drop edge/level bit */ | 146 | psel = (event & PM_PMCSEL_MSK) >> 1; /* drop edge/level bit */ |
| 147 | if (pmc >= 5) | 147 | if (pmc >= 5) |
| 148 | return 0; | 148 | return 0; |
| 149 | 149 | ||
| 150 | bit = -1; | 150 | bit = -1; |
| 151 | if (psel < sizeof(direct_event_is_marked)) { | 151 | if (psel < sizeof(direct_event_is_marked)) { |
| 152 | ptype = direct_event_is_marked[psel]; | 152 | ptype = direct_event_is_marked[psel]; |
| 153 | if (pmc == 0 || !(ptype & (1 << (pmc - 1)))) | 153 | if (pmc == 0 || !(ptype & (1 << (pmc - 1)))) |
| 154 | return 0; | 154 | return 0; |
| 155 | ptype >>= 4; | 155 | ptype >>= 4; |
| 156 | if (ptype == 0) | 156 | if (ptype == 0) |
| 157 | return 1; | 157 | return 1; |
| 158 | if (ptype == 1) | 158 | if (ptype == 1) |
| 159 | bit = 0; | 159 | bit = 0; |
| 160 | else | 160 | else |
| 161 | bit = ptype ^ (pmc - 1); | 161 | bit = ptype ^ (pmc - 1); |
| 162 | } else if ((psel & 0x48) == 0x40) | 162 | } else if ((psel & 0x48) == 0x40) |
| 163 | bit = psel & 7; | 163 | bit = psel & 7; |
| 164 | 164 | ||
| 165 | if (!(event & PM_BUSEVENT_MSK) || bit == -1) | 165 | if (!(event & PM_BUSEVENT_MSK) || bit == -1) |
| 166 | return 0; | 166 | return 0; |
| 167 | 167 | ||
| 168 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; | 168 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; |
| 169 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; | 169 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; |
| 170 | mask = marked_bus_events[unit]; | 170 | mask = marked_bus_events[unit]; |
| 171 | return (mask >> (byte * 8 + bit)) & 1; | 171 | return (mask >> (byte * 8 + bit)) & 1; |
| 172 | } | 172 | } |
| 173 | 173 | ||
| 174 | /* | 174 | /* |
| 175 | * Assign PMC numbers and compute MMCR1 value for a set of events | 175 | * Assign PMC numbers and compute MMCR1 value for a set of events |
| 176 | */ | 176 | */ |
| 177 | static int p6_compute_mmcr(u64 event[], int n_ev, | 177 | static int p6_compute_mmcr(u64 event[], int n_ev, |
| 178 | unsigned int hwc[], unsigned long mmcr[]) | 178 | unsigned int hwc[], unsigned long mmcr[]) |
| 179 | { | 179 | { |
| 180 | unsigned long mmcr1 = 0; | 180 | unsigned long mmcr1 = 0; |
| 181 | unsigned long mmcra = 0; | 181 | unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; |
| 182 | int i; | 182 | int i; |
| 183 | unsigned int pmc, ev, b, u, s, psel; | 183 | unsigned int pmc, ev, b, u, s, psel; |
| 184 | unsigned int ttmset = 0; | 184 | unsigned int ttmset = 0; |
| 185 | unsigned int pmc_inuse = 0; | 185 | unsigned int pmc_inuse = 0; |
| 186 | 186 | ||
| 187 | if (n_ev > 6) | 187 | if (n_ev > 6) |
| 188 | return -1; | 188 | return -1; |
| 189 | for (i = 0; i < n_ev; ++i) { | 189 | for (i = 0; i < n_ev; ++i) { |
| 190 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; | 190 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; |
| 191 | if (pmc) { | 191 | if (pmc) { |
| 192 | if (pmc_inuse & (1 << (pmc - 1))) | 192 | if (pmc_inuse & (1 << (pmc - 1))) |
| 193 | return -1; /* collision! */ | 193 | return -1; /* collision! */ |
| 194 | pmc_inuse |= 1 << (pmc - 1); | 194 | pmc_inuse |= 1 << (pmc - 1); |
| 195 | } | 195 | } |
| 196 | } | 196 | } |
| 197 | for (i = 0; i < n_ev; ++i) { | 197 | for (i = 0; i < n_ev; ++i) { |
| 198 | ev = event[i]; | 198 | ev = event[i]; |
| 199 | pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK; | 199 | pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK; |
| 200 | if (pmc) { | 200 | if (pmc) { |
| 201 | --pmc; | 201 | --pmc; |
| 202 | } else { | 202 | } else { |
| 203 | /* can go on any PMC; find a free one */ | 203 | /* can go on any PMC; find a free one */ |
| 204 | for (pmc = 0; pmc < 4; ++pmc) | 204 | for (pmc = 0; pmc < 4; ++pmc) |
| 205 | if (!(pmc_inuse & (1 << pmc))) | 205 | if (!(pmc_inuse & (1 << pmc))) |
| 206 | break; | 206 | break; |
| 207 | if (pmc >= 4) | 207 | if (pmc >= 4) |
| 208 | return -1; | 208 | return -1; |
| 209 | pmc_inuse |= 1 << pmc; | 209 | pmc_inuse |= 1 << pmc; |
| 210 | } | 210 | } |
| 211 | hwc[i] = pmc; | 211 | hwc[i] = pmc; |
| 212 | psel = ev & PM_PMCSEL_MSK; | 212 | psel = ev & PM_PMCSEL_MSK; |
| 213 | if (ev & PM_BUSEVENT_MSK) { | 213 | if (ev & PM_BUSEVENT_MSK) { |
| 214 | /* this event uses the event bus */ | 214 | /* this event uses the event bus */ |
| 215 | b = (ev >> PM_BYTE_SH) & PM_BYTE_MSK; | 215 | b = (ev >> PM_BYTE_SH) & PM_BYTE_MSK; |
| 216 | u = (ev >> PM_UNIT_SH) & PM_UNIT_MSK; | 216 | u = (ev >> PM_UNIT_SH) & PM_UNIT_MSK; |
| 217 | /* check for conflict on this byte of event bus */ | 217 | /* check for conflict on this byte of event bus */ |
| 218 | if ((ttmset & (1 << b)) && MMCR1_TTMSEL(mmcr1, b) != u) | 218 | if ((ttmset & (1 << b)) && MMCR1_TTMSEL(mmcr1, b) != u) |
| 219 | return -1; | 219 | return -1; |
| 220 | mmcr1 |= (unsigned long)u << MMCR1_TTMSEL_SH(b); | 220 | mmcr1 |= (unsigned long)u << MMCR1_TTMSEL_SH(b); |
| 221 | ttmset |= 1 << b; | 221 | ttmset |= 1 << b; |
| 222 | if (u == 5) { | 222 | if (u == 5) { |
| 223 | /* Nest events have a further mux */ | 223 | /* Nest events have a further mux */ |
| 224 | s = (ev >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK; | 224 | s = (ev >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK; |
| 225 | if ((ttmset & 0x10) && | 225 | if ((ttmset & 0x10) && |
| 226 | MMCR1_NESTSEL(mmcr1) != s) | 226 | MMCR1_NESTSEL(mmcr1) != s) |
| 227 | return -1; | 227 | return -1; |
| 228 | ttmset |= 0x10; | 228 | ttmset |= 0x10; |
| 229 | mmcr1 |= (unsigned long)s << MMCR1_NESTSEL_SH; | 229 | mmcr1 |= (unsigned long)s << MMCR1_NESTSEL_SH; |
| 230 | } | 230 | } |
| 231 | if (0x30 <= psel && psel <= 0x3d) { | 231 | if (0x30 <= psel && psel <= 0x3d) { |
| 232 | /* these need the PMCx_ADDR_SEL bits */ | 232 | /* these need the PMCx_ADDR_SEL bits */ |
| 233 | if (b >= 2) | 233 | if (b >= 2) |
| 234 | mmcr1 |= MMCR1_PMC1_ADDR_SEL >> pmc; | 234 | mmcr1 |= MMCR1_PMC1_ADDR_SEL >> pmc; |
| 235 | } | 235 | } |
| 236 | /* bus select values are different for PMC3/4 */ | 236 | /* bus select values are different for PMC3/4 */ |
| 237 | if (pmc >= 2 && (psel & 0x90) == 0x80) | 237 | if (pmc >= 2 && (psel & 0x90) == 0x80) |
| 238 | psel ^= 0x20; | 238 | psel ^= 0x20; |
| 239 | } | 239 | } |
| 240 | if (ev & PM_LLA) { | 240 | if (ev & PM_LLA) { |
| 241 | mmcr1 |= MMCR1_PMC1_LLA >> pmc; | 241 | mmcr1 |= MMCR1_PMC1_LLA >> pmc; |
| 242 | if (ev & PM_LLAV) | 242 | if (ev & PM_LLAV) |
| 243 | mmcr1 |= MMCR1_PMC1_LLA_VALUE >> pmc; | 243 | mmcr1 |= MMCR1_PMC1_LLA_VALUE >> pmc; |
| 244 | } | 244 | } |
| 245 | if (power6_marked_instr_event(event[i])) | 245 | if (power6_marked_instr_event(event[i])) |
| 246 | mmcra |= MMCRA_SAMPLE_ENABLE; | 246 | mmcra |= MMCRA_SAMPLE_ENABLE; |
| 247 | if (pmc < 4) | 247 | if (pmc < 4) |
| 248 | mmcr1 |= (unsigned long)psel << MMCR1_PMCSEL_SH(pmc); | 248 | mmcr1 |= (unsigned long)psel << MMCR1_PMCSEL_SH(pmc); |
| 249 | } | 249 | } |
| 250 | mmcr[0] = 0; | 250 | mmcr[0] = 0; |
| 251 | if (pmc_inuse & 1) | 251 | if (pmc_inuse & 1) |
| 252 | mmcr[0] = MMCR0_PMC1CE; | 252 | mmcr[0] = MMCR0_PMC1CE; |
| 253 | if (pmc_inuse & 0xe) | 253 | if (pmc_inuse & 0xe) |
| 254 | mmcr[0] |= MMCR0_PMCjCE; | 254 | mmcr[0] |= MMCR0_PMCjCE; |
| 255 | mmcr[1] = mmcr1; | 255 | mmcr[1] = mmcr1; |
| 256 | mmcr[2] = mmcra; | 256 | mmcr[2] = mmcra; |
| 257 | return 0; | 257 | return 0; |
| 258 | } | 258 | } |
| 259 | 259 | ||
| 260 | /* | 260 | /* |
| 261 | * Layout of constraint bits: | 261 | * Layout of constraint bits: |
| 262 | * | 262 | * |
| 263 | * 0-1 add field: number of uses of PMC1 (max 1) | 263 | * 0-1 add field: number of uses of PMC1 (max 1) |
| 264 | * 2-3, 4-5, 6-7, 8-9, 10-11: ditto for PMC2, 3, 4, 5, 6 | 264 | * 2-3, 4-5, 6-7, 8-9, 10-11: ditto for PMC2, 3, 4, 5, 6 |
| 265 | * 12-15 add field: number of uses of PMC1-4 (max 4) | 265 | * 12-15 add field: number of uses of PMC1-4 (max 4) |
| 266 | * 16-19 select field: unit on byte 0 of event bus | 266 | * 16-19 select field: unit on byte 0 of event bus |
| 267 | * 20-23, 24-27, 28-31 ditto for bytes 1, 2, 3 | 267 | * 20-23, 24-27, 28-31 ditto for bytes 1, 2, 3 |
| 268 | * 32-34 select field: nest (subunit) event selector | 268 | * 32-34 select field: nest (subunit) event selector |
| 269 | */ | 269 | */ |
| 270 | static int p6_get_constraint(u64 event, unsigned long *maskp, | 270 | static int p6_get_constraint(u64 event, unsigned long *maskp, |
| 271 | unsigned long *valp) | 271 | unsigned long *valp) |
| 272 | { | 272 | { |
| 273 | int pmc, byte, sh, subunit; | 273 | int pmc, byte, sh, subunit; |
| 274 | unsigned long mask = 0, value = 0; | 274 | unsigned long mask = 0, value = 0; |
| 275 | 275 | ||
| 276 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | 276 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; |
| 277 | if (pmc) { | 277 | if (pmc) { |
| 278 | if (pmc > 4 && !(event == 0x500009 || event == 0x600005)) | 278 | if (pmc > 4 && !(event == 0x500009 || event == 0x600005)) |
| 279 | return -1; | 279 | return -1; |
| 280 | sh = (pmc - 1) * 2; | 280 | sh = (pmc - 1) * 2; |
| 281 | mask |= 2 << sh; | 281 | mask |= 2 << sh; |
| 282 | value |= 1 << sh; | 282 | value |= 1 << sh; |
| 283 | } | 283 | } |
| 284 | if (event & PM_BUSEVENT_MSK) { | 284 | if (event & PM_BUSEVENT_MSK) { |
| 285 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; | 285 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; |
| 286 | sh = byte * 4 + (16 - PM_UNIT_SH); | 286 | sh = byte * 4 + (16 - PM_UNIT_SH); |
| 287 | mask |= PM_UNIT_MSKS << sh; | 287 | mask |= PM_UNIT_MSKS << sh; |
| 288 | value |= (unsigned long)(event & PM_UNIT_MSKS) << sh; | 288 | value |= (unsigned long)(event & PM_UNIT_MSKS) << sh; |
| 289 | if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) { | 289 | if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) { |
| 290 | subunit = (event >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK; | 290 | subunit = (event >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK; |
| 291 | mask |= (unsigned long)PM_SUBUNIT_MSK << 32; | 291 | mask |= (unsigned long)PM_SUBUNIT_MSK << 32; |
| 292 | value |= (unsigned long)subunit << 32; | 292 | value |= (unsigned long)subunit << 32; |
| 293 | } | 293 | } |
| 294 | } | 294 | } |
| 295 | if (pmc <= 4) { | 295 | if (pmc <= 4) { |
| 296 | mask |= 0x8000; /* add field for count of PMC1-4 uses */ | 296 | mask |= 0x8000; /* add field for count of PMC1-4 uses */ |
| 297 | value |= 0x1000; | 297 | value |= 0x1000; |
| 298 | } | 298 | } |
| 299 | *maskp = mask; | 299 | *maskp = mask; |
| 300 | *valp = value; | 300 | *valp = value; |
| 301 | return 0; | 301 | return 0; |
| 302 | } | 302 | } |
| 303 | 303 | ||
| 304 | static int p6_limited_pmc_event(u64 event) | 304 | static int p6_limited_pmc_event(u64 event) |
| 305 | { | 305 | { |
| 306 | int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | 306 | int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; |
| 307 | 307 | ||
| 308 | return pmc == 5 || pmc == 6; | 308 | return pmc == 5 || pmc == 6; |
| 309 | } | 309 | } |
| 310 | 310 | ||
| 311 | #define MAX_ALT 4 /* at most 4 alternatives for any event */ | 311 | #define MAX_ALT 4 /* at most 4 alternatives for any event */ |
| 312 | 312 | ||
| 313 | static const unsigned int event_alternatives[][MAX_ALT] = { | 313 | static const unsigned int event_alternatives[][MAX_ALT] = { |
| 314 | { 0x0130e8, 0x2000f6, 0x3000fc }, /* PM_PTEG_RELOAD_VALID */ | 314 | { 0x0130e8, 0x2000f6, 0x3000fc }, /* PM_PTEG_RELOAD_VALID */ |
| 315 | { 0x080080, 0x10000d, 0x30000c, 0x4000f0 }, /* PM_LD_MISS_L1 */ | 315 | { 0x080080, 0x10000d, 0x30000c, 0x4000f0 }, /* PM_LD_MISS_L1 */ |
| 316 | { 0x080088, 0x200054, 0x3000f0 }, /* PM_ST_MISS_L1 */ | 316 | { 0x080088, 0x200054, 0x3000f0 }, /* PM_ST_MISS_L1 */ |
| 317 | { 0x10000a, 0x2000f4, 0x600005 }, /* PM_RUN_CYC */ | 317 | { 0x10000a, 0x2000f4, 0x600005 }, /* PM_RUN_CYC */ |
| 318 | { 0x10000b, 0x2000f5 }, /* PM_RUN_COUNT */ | 318 | { 0x10000b, 0x2000f5 }, /* PM_RUN_COUNT */ |
| 319 | { 0x10000e, 0x400010 }, /* PM_PURR */ | 319 | { 0x10000e, 0x400010 }, /* PM_PURR */ |
| 320 | { 0x100010, 0x4000f8 }, /* PM_FLUSH */ | 320 | { 0x100010, 0x4000f8 }, /* PM_FLUSH */ |
| 321 | { 0x10001a, 0x200010 }, /* PM_MRK_INST_DISP */ | 321 | { 0x10001a, 0x200010 }, /* PM_MRK_INST_DISP */ |
| 322 | { 0x100026, 0x3000f8 }, /* PM_TB_BIT_TRANS */ | 322 | { 0x100026, 0x3000f8 }, /* PM_TB_BIT_TRANS */ |
| 323 | { 0x100054, 0x2000f0 }, /* PM_ST_FIN */ | 323 | { 0x100054, 0x2000f0 }, /* PM_ST_FIN */ |
| 324 | { 0x100056, 0x2000fc }, /* PM_L1_ICACHE_MISS */ | 324 | { 0x100056, 0x2000fc }, /* PM_L1_ICACHE_MISS */ |
| 325 | { 0x1000f0, 0x40000a }, /* PM_INST_IMC_MATCH_CMPL */ | 325 | { 0x1000f0, 0x40000a }, /* PM_INST_IMC_MATCH_CMPL */ |
| 326 | { 0x1000f8, 0x200008 }, /* PM_GCT_EMPTY_CYC */ | 326 | { 0x1000f8, 0x200008 }, /* PM_GCT_EMPTY_CYC */ |
| 327 | { 0x1000fc, 0x400006 }, /* PM_LSU_DERAT_MISS_CYC */ | 327 | { 0x1000fc, 0x400006 }, /* PM_LSU_DERAT_MISS_CYC */ |
| 328 | { 0x20000e, 0x400007 }, /* PM_LSU_DERAT_MISS */ | 328 | { 0x20000e, 0x400007 }, /* PM_LSU_DERAT_MISS */ |
| 329 | { 0x200012, 0x300012 }, /* PM_INST_DISP */ | 329 | { 0x200012, 0x300012 }, /* PM_INST_DISP */ |
| 330 | { 0x2000f2, 0x3000f2 }, /* PM_INST_DISP */ | 330 | { 0x2000f2, 0x3000f2 }, /* PM_INST_DISP */ |
| 331 | { 0x2000f8, 0x300010 }, /* PM_EXT_INT */ | 331 | { 0x2000f8, 0x300010 }, /* PM_EXT_INT */ |
| 332 | { 0x2000fe, 0x300056 }, /* PM_DATA_FROM_L2MISS */ | 332 | { 0x2000fe, 0x300056 }, /* PM_DATA_FROM_L2MISS */ |
| 333 | { 0x2d0030, 0x30001a }, /* PM_MRK_FPU_FIN */ | 333 | { 0x2d0030, 0x30001a }, /* PM_MRK_FPU_FIN */ |
| 334 | { 0x30000a, 0x400018 }, /* PM_MRK_INST_FIN */ | 334 | { 0x30000a, 0x400018 }, /* PM_MRK_INST_FIN */ |
| 335 | { 0x3000f6, 0x40000e }, /* PM_L1_DCACHE_RELOAD_VALID */ | 335 | { 0x3000f6, 0x40000e }, /* PM_L1_DCACHE_RELOAD_VALID */ |
| 336 | { 0x3000fe, 0x400056 }, /* PM_DATA_FROM_L3MISS */ | 336 | { 0x3000fe, 0x400056 }, /* PM_DATA_FROM_L3MISS */ |
| 337 | }; | 337 | }; |
| 338 | 338 | ||
| 339 | /* | 339 | /* |
| 340 | * This could be made more efficient with a binary search on | 340 | * This could be made more efficient with a binary search on |
| 341 | * a presorted list, if necessary | 341 | * a presorted list, if necessary |
| 342 | */ | 342 | */ |
| 343 | static int find_alternatives_list(u64 event) | 343 | static int find_alternatives_list(u64 event) |
| 344 | { | 344 | { |
| 345 | int i, j; | 345 | int i, j; |
| 346 | unsigned int alt; | 346 | unsigned int alt; |
| 347 | 347 | ||
| 348 | for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { | 348 | for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { |
| 349 | if (event < event_alternatives[i][0]) | 349 | if (event < event_alternatives[i][0]) |
| 350 | return -1; | 350 | return -1; |
| 351 | for (j = 0; j < MAX_ALT; ++j) { | 351 | for (j = 0; j < MAX_ALT; ++j) { |
| 352 | alt = event_alternatives[i][j]; | 352 | alt = event_alternatives[i][j]; |
| 353 | if (!alt || event < alt) | 353 | if (!alt || event < alt) |
| 354 | break; | 354 | break; |
| 355 | if (event == alt) | 355 | if (event == alt) |
| 356 | return i; | 356 | return i; |
| 357 | } | 357 | } |
| 358 | } | 358 | } |
| 359 | return -1; | 359 | return -1; |
| 360 | } | 360 | } |
| 361 | 361 | ||
| 362 | static int p6_get_alternatives(u64 event, unsigned int flags, u64 alt[]) | 362 | static int p6_get_alternatives(u64 event, unsigned int flags, u64 alt[]) |
| 363 | { | 363 | { |
| 364 | int i, j, nlim; | 364 | int i, j, nlim; |
| 365 | unsigned int psel, pmc; | 365 | unsigned int psel, pmc; |
| 366 | unsigned int nalt = 1; | 366 | unsigned int nalt = 1; |
| 367 | u64 aevent; | 367 | u64 aevent; |
| 368 | 368 | ||
| 369 | alt[0] = event; | 369 | alt[0] = event; |
| 370 | nlim = p6_limited_pmc_event(event); | 370 | nlim = p6_limited_pmc_event(event); |
| 371 | 371 | ||
| 372 | /* check the alternatives table */ | 372 | /* check the alternatives table */ |
| 373 | i = find_alternatives_list(event); | 373 | i = find_alternatives_list(event); |
| 374 | if (i >= 0) { | 374 | if (i >= 0) { |
| 375 | /* copy out alternatives from list */ | 375 | /* copy out alternatives from list */ |
| 376 | for (j = 0; j < MAX_ALT; ++j) { | 376 | for (j = 0; j < MAX_ALT; ++j) { |
| 377 | aevent = event_alternatives[i][j]; | 377 | aevent = event_alternatives[i][j]; |
| 378 | if (!aevent) | 378 | if (!aevent) |
| 379 | break; | 379 | break; |
| 380 | if (aevent != event) | 380 | if (aevent != event) |
| 381 | alt[nalt++] = aevent; | 381 | alt[nalt++] = aevent; |
| 382 | nlim += p6_limited_pmc_event(aevent); | 382 | nlim += p6_limited_pmc_event(aevent); |
| 383 | } | 383 | } |
| 384 | 384 | ||
| 385 | } else { | 385 | } else { |
| 386 | /* Check for alternative ways of computing sum events */ | 386 | /* Check for alternative ways of computing sum events */ |
| 387 | /* PMCSEL 0x32 counter N == PMCSEL 0x34 counter 5-N */ | 387 | /* PMCSEL 0x32 counter N == PMCSEL 0x34 counter 5-N */ |
| 388 | psel = event & (PM_PMCSEL_MSK & ~1); /* ignore edge bit */ | 388 | psel = event & (PM_PMCSEL_MSK & ~1); /* ignore edge bit */ |
| 389 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | 389 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; |
| 390 | if (pmc && (psel == 0x32 || psel == 0x34)) | 390 | if (pmc && (psel == 0x32 || psel == 0x34)) |
| 391 | alt[nalt++] = ((event ^ 0x6) & ~PM_PMC_MSKS) | | 391 | alt[nalt++] = ((event ^ 0x6) & ~PM_PMC_MSKS) | |
| 392 | ((5 - pmc) << PM_PMC_SH); | 392 | ((5 - pmc) << PM_PMC_SH); |
| 393 | 393 | ||
| 394 | /* PMCSEL 0x38 counter N == PMCSEL 0x3a counter N+/-2 */ | 394 | /* PMCSEL 0x38 counter N == PMCSEL 0x3a counter N+/-2 */ |
| 395 | if (pmc && (psel == 0x38 || psel == 0x3a)) | 395 | if (pmc && (psel == 0x38 || psel == 0x3a)) |
| 396 | alt[nalt++] = ((event ^ 0x2) & ~PM_PMC_MSKS) | | 396 | alt[nalt++] = ((event ^ 0x2) & ~PM_PMC_MSKS) | |
| 397 | ((pmc > 2? pmc - 2: pmc + 2) << PM_PMC_SH); | 397 | ((pmc > 2? pmc - 2: pmc + 2) << PM_PMC_SH); |
| 398 | } | 398 | } |
| 399 | 399 | ||
| 400 | if (flags & PPMU_ONLY_COUNT_RUN) { | 400 | if (flags & PPMU_ONLY_COUNT_RUN) { |
| 401 | /* | 401 | /* |
| 402 | * We're only counting in RUN state, | 402 | * We're only counting in RUN state, |
| 403 | * so PM_CYC is equivalent to PM_RUN_CYC, | 403 | * so PM_CYC is equivalent to PM_RUN_CYC, |
| 404 | * PM_INST_CMPL === PM_RUN_INST_CMPL, PM_PURR === PM_RUN_PURR. | 404 | * PM_INST_CMPL === PM_RUN_INST_CMPL, PM_PURR === PM_RUN_PURR. |
| 405 | * This doesn't include alternatives that don't provide | 405 | * This doesn't include alternatives that don't provide |
| 406 | * any extra flexibility in assigning PMCs (e.g. | 406 | * any extra flexibility in assigning PMCs (e.g. |
| 407 | * 0x10000a for PM_RUN_CYC vs. 0x1e for PM_CYC). | 407 | * 0x10000a for PM_RUN_CYC vs. 0x1e for PM_CYC). |
| 408 | * Note that even with these additional alternatives | 408 | * Note that even with these additional alternatives |
| 409 | * we never end up with more than 4 alternatives for any event. | 409 | * we never end up with more than 4 alternatives for any event. |
| 410 | */ | 410 | */ |
| 411 | j = nalt; | 411 | j = nalt; |
| 412 | for (i = 0; i < nalt; ++i) { | 412 | for (i = 0; i < nalt; ++i) { |
| 413 | switch (alt[i]) { | 413 | switch (alt[i]) { |
| 414 | case 0x1e: /* PM_CYC */ | 414 | case 0x1e: /* PM_CYC */ |
| 415 | alt[j++] = 0x600005; /* PM_RUN_CYC */ | 415 | alt[j++] = 0x600005; /* PM_RUN_CYC */ |
| 416 | ++nlim; | 416 | ++nlim; |
| 417 | break; | 417 | break; |
| 418 | case 0x10000a: /* PM_RUN_CYC */ | 418 | case 0x10000a: /* PM_RUN_CYC */ |
| 419 | alt[j++] = 0x1e; /* PM_CYC */ | 419 | alt[j++] = 0x1e; /* PM_CYC */ |
| 420 | break; | 420 | break; |
| 421 | case 2: /* PM_INST_CMPL */ | 421 | case 2: /* PM_INST_CMPL */ |
| 422 | alt[j++] = 0x500009; /* PM_RUN_INST_CMPL */ | 422 | alt[j++] = 0x500009; /* PM_RUN_INST_CMPL */ |
| 423 | ++nlim; | 423 | ++nlim; |
| 424 | break; | 424 | break; |
| 425 | case 0x500009: /* PM_RUN_INST_CMPL */ | 425 | case 0x500009: /* PM_RUN_INST_CMPL */ |
| 426 | alt[j++] = 2; /* PM_INST_CMPL */ | 426 | alt[j++] = 2; /* PM_INST_CMPL */ |
| 427 | break; | 427 | break; |
| 428 | case 0x10000e: /* PM_PURR */ | 428 | case 0x10000e: /* PM_PURR */ |
| 429 | alt[j++] = 0x4000f4; /* PM_RUN_PURR */ | 429 | alt[j++] = 0x4000f4; /* PM_RUN_PURR */ |
| 430 | break; | 430 | break; |
| 431 | case 0x4000f4: /* PM_RUN_PURR */ | 431 | case 0x4000f4: /* PM_RUN_PURR */ |
| 432 | alt[j++] = 0x10000e; /* PM_PURR */ | 432 | alt[j++] = 0x10000e; /* PM_PURR */ |
| 433 | break; | 433 | break; |
| 434 | } | 434 | } |
| 435 | } | 435 | } |
| 436 | nalt = j; | 436 | nalt = j; |
| 437 | } | 437 | } |
| 438 | 438 | ||
| 439 | if (!(flags & PPMU_LIMITED_PMC_OK) && nlim) { | 439 | if (!(flags & PPMU_LIMITED_PMC_OK) && nlim) { |
| 440 | /* remove the limited PMC events */ | 440 | /* remove the limited PMC events */ |
| 441 | j = 0; | 441 | j = 0; |
| 442 | for (i = 0; i < nalt; ++i) { | 442 | for (i = 0; i < nalt; ++i) { |
| 443 | if (!p6_limited_pmc_event(alt[i])) { | 443 | if (!p6_limited_pmc_event(alt[i])) { |
| 444 | alt[j] = alt[i]; | 444 | alt[j] = alt[i]; |
| 445 | ++j; | 445 | ++j; |
| 446 | } | 446 | } |
| 447 | } | 447 | } |
| 448 | nalt = j; | 448 | nalt = j; |
| 449 | } else if ((flags & PPMU_LIMITED_PMC_REQD) && nlim < nalt) { | 449 | } else if ((flags & PPMU_LIMITED_PMC_REQD) && nlim < nalt) { |
| 450 | /* remove all but the limited PMC events */ | 450 | /* remove all but the limited PMC events */ |
| 451 | j = 0; | 451 | j = 0; |
| 452 | for (i = 0; i < nalt; ++i) { | 452 | for (i = 0; i < nalt; ++i) { |
| 453 | if (p6_limited_pmc_event(alt[i])) { | 453 | if (p6_limited_pmc_event(alt[i])) { |
| 454 | alt[j] = alt[i]; | 454 | alt[j] = alt[i]; |
| 455 | ++j; | 455 | ++j; |
| 456 | } | 456 | } |
| 457 | } | 457 | } |
| 458 | nalt = j; | 458 | nalt = j; |
| 459 | } | 459 | } |
| 460 | 460 | ||
| 461 | return nalt; | 461 | return nalt; |
| 462 | } | 462 | } |
| 463 | 463 | ||
| 464 | static void p6_disable_pmc(unsigned int pmc, unsigned long mmcr[]) | 464 | static void p6_disable_pmc(unsigned int pmc, unsigned long mmcr[]) |
| 465 | { | 465 | { |
| 466 | /* Set PMCxSEL to 0 to disable PMCx */ | 466 | /* Set PMCxSEL to 0 to disable PMCx */ |
| 467 | if (pmc <= 3) | 467 | if (pmc <= 3) |
| 468 | mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc)); | 468 | mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc)); |
| 469 | } | 469 | } |
| 470 | 470 | ||
| 471 | static int power6_generic_events[] = { | 471 | static int power6_generic_events[] = { |
| 472 | [PERF_COUNT_HW_CPU_CYCLES] = 0x1e, | 472 | [PERF_COUNT_HW_CPU_CYCLES] = 0x1e, |
| 473 | [PERF_COUNT_HW_INSTRUCTIONS] = 2, | 473 | [PERF_COUNT_HW_INSTRUCTIONS] = 2, |
| 474 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x280030, /* LD_REF_L1 */ | 474 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x280030, /* LD_REF_L1 */ |
| 475 | [PERF_COUNT_HW_CACHE_MISSES] = 0x30000c, /* LD_MISS_L1 */ | 475 | [PERF_COUNT_HW_CACHE_MISSES] = 0x30000c, /* LD_MISS_L1 */ |
| 476 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x410a0, /* BR_PRED */ | 476 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x410a0, /* BR_PRED */ |
| 477 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x400052, /* BR_MPRED */ | 477 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x400052, /* BR_MPRED */ |
| 478 | }; | 478 | }; |
| 479 | 479 | ||
| 480 | #define C(x) PERF_COUNT_HW_CACHE_##x | 480 | #define C(x) PERF_COUNT_HW_CACHE_##x |
| 481 | 481 | ||
| 482 | /* | 482 | /* |
| 483 | * Table of generalized cache-related events. | 483 | * Table of generalized cache-related events. |
| 484 | * 0 means not supported, -1 means nonsensical, other values | 484 | * 0 means not supported, -1 means nonsensical, other values |
| 485 | * are event codes. | 485 | * are event codes. |
| 486 | * The "DTLB" and "ITLB" events relate to the DERAT and IERAT. | 486 | * The "DTLB" and "ITLB" events relate to the DERAT and IERAT. |
| 487 | */ | 487 | */ |
| 488 | static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | 488 | static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { |
| 489 | [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ | 489 | [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 490 | [C(OP_READ)] = { 0x80082, 0x80080 }, | 490 | [C(OP_READ)] = { 0x80082, 0x80080 }, |
| 491 | [C(OP_WRITE)] = { 0x80086, 0x80088 }, | 491 | [C(OP_WRITE)] = { 0x80086, 0x80088 }, |
| 492 | [C(OP_PREFETCH)] = { 0x810a4, 0 }, | 492 | [C(OP_PREFETCH)] = { 0x810a4, 0 }, |
| 493 | }, | 493 | }, |
| 494 | [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ | 494 | [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 495 | [C(OP_READ)] = { 0, 0x100056 }, | 495 | [C(OP_READ)] = { 0, 0x100056 }, |
| 496 | [C(OP_WRITE)] = { -1, -1 }, | 496 | [C(OP_WRITE)] = { -1, -1 }, |
| 497 | [C(OP_PREFETCH)] = { 0x4008c, 0 }, | 497 | [C(OP_PREFETCH)] = { 0x4008c, 0 }, |
| 498 | }, | 498 | }, |
| 499 | [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ | 499 | [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 500 | [C(OP_READ)] = { 0x150730, 0x250532 }, | 500 | [C(OP_READ)] = { 0x150730, 0x250532 }, |
| 501 | [C(OP_WRITE)] = { 0x250432, 0x150432 }, | 501 | [C(OP_WRITE)] = { 0x250432, 0x150432 }, |
| 502 | [C(OP_PREFETCH)] = { 0x810a6, 0 }, | 502 | [C(OP_PREFETCH)] = { 0x810a6, 0 }, |
| 503 | }, | 503 | }, |
| 504 | [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ | 504 | [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 505 | [C(OP_READ)] = { 0, 0x20000e }, | 505 | [C(OP_READ)] = { 0, 0x20000e }, |
| 506 | [C(OP_WRITE)] = { -1, -1 }, | 506 | [C(OP_WRITE)] = { -1, -1 }, |
| 507 | [C(OP_PREFETCH)] = { -1, -1 }, | 507 | [C(OP_PREFETCH)] = { -1, -1 }, |
| 508 | }, | 508 | }, |
| 509 | [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ | 509 | [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 510 | [C(OP_READ)] = { 0, 0x420ce }, | 510 | [C(OP_READ)] = { 0, 0x420ce }, |
| 511 | [C(OP_WRITE)] = { -1, -1 }, | 511 | [C(OP_WRITE)] = { -1, -1 }, |
| 512 | [C(OP_PREFETCH)] = { -1, -1 }, | 512 | [C(OP_PREFETCH)] = { -1, -1 }, |
| 513 | }, | 513 | }, |
| 514 | [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ | 514 | [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 515 | [C(OP_READ)] = { 0x430e6, 0x400052 }, | 515 | [C(OP_READ)] = { 0x430e6, 0x400052 }, |
| 516 | [C(OP_WRITE)] = { -1, -1 }, | 516 | [C(OP_WRITE)] = { -1, -1 }, |
| 517 | [C(OP_PREFETCH)] = { -1, -1 }, | 517 | [C(OP_PREFETCH)] = { -1, -1 }, |
| 518 | }, | 518 | }, |
| 519 | }; | 519 | }; |
| 520 | 520 | ||
| 521 | static struct power_pmu power6_pmu = { | 521 | static struct power_pmu power6_pmu = { |
| 522 | .name = "POWER6", | 522 | .name = "POWER6", |
| 523 | .n_counter = 6, | 523 | .n_counter = 6, |
| 524 | .max_alternatives = MAX_ALT, | 524 | .max_alternatives = MAX_ALT, |
| 525 | .add_fields = 0x1555, | 525 | .add_fields = 0x1555, |
| 526 | .test_adder = 0x3000, | 526 | .test_adder = 0x3000, |
| 527 | .compute_mmcr = p6_compute_mmcr, | 527 | .compute_mmcr = p6_compute_mmcr, |
| 528 | .get_constraint = p6_get_constraint, | 528 | .get_constraint = p6_get_constraint, |
| 529 | .get_alternatives = p6_get_alternatives, | 529 | .get_alternatives = p6_get_alternatives, |
| 530 | .disable_pmc = p6_disable_pmc, | 530 | .disable_pmc = p6_disable_pmc, |
| 531 | .limited_pmc_event = p6_limited_pmc_event, | 531 | .limited_pmc_event = p6_limited_pmc_event, |
| 532 | .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR, | 532 | .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR, |
| 533 | .n_generic = ARRAY_SIZE(power6_generic_events), | 533 | .n_generic = ARRAY_SIZE(power6_generic_events), |
| 534 | .generic_events = power6_generic_events, | 534 | .generic_events = power6_generic_events, |
| 535 | .cache_events = &power6_cache_events, | 535 | .cache_events = &power6_cache_events, |
| 536 | }; | 536 | }; |
| 537 | 537 | ||
| 538 | static int init_power6_pmu(void) | 538 | static int init_power6_pmu(void) |
| 539 | { | 539 | { |
| 540 | if (!cur_cpu_spec->oprofile_cpu_type || | 540 | if (!cur_cpu_spec->oprofile_cpu_type || |
| 541 | strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6")) | 541 | strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6")) |
| 542 | return -ENODEV; | 542 | return -ENODEV; |
| 543 | 543 | ||
| 544 | return register_power_pmu(&power6_pmu); | 544 | return register_power_pmu(&power6_pmu); |
| 545 | } | 545 | } |
| 546 | 546 | ||
| 547 | arch_initcall(init_power6_pmu); | 547 | arch_initcall(init_power6_pmu); |
| 548 | 548 |
arch/powerpc/kernel/power7-pmu.c
| 1 | /* | 1 | /* |
| 2 | * Performance counter support for POWER7 processors. | 2 | * Performance counter support for POWER7 processors. |
| 3 | * | 3 | * |
| 4 | * Copyright 2009 Paul Mackerras, IBM Corporation. | 4 | * Copyright 2009 Paul Mackerras, IBM Corporation. |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License | 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ | 10 | */ |
| 11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
| 12 | #include <linux/perf_event.h> | 12 | #include <linux/perf_event.h> |
| 13 | #include <linux/string.h> | 13 | #include <linux/string.h> |
| 14 | #include <asm/reg.h> | 14 | #include <asm/reg.h> |
| 15 | #include <asm/cputable.h> | 15 | #include <asm/cputable.h> |
| 16 | 16 | ||
| 17 | /* | 17 | /* |
| 18 | * Bits in event code for POWER7 | 18 | * Bits in event code for POWER7 |
| 19 | */ | 19 | */ |
| 20 | #define PM_PMC_SH 16 /* PMC number (1-based) for direct events */ | 20 | #define PM_PMC_SH 16 /* PMC number (1-based) for direct events */ |
| 21 | #define PM_PMC_MSK 0xf | 21 | #define PM_PMC_MSK 0xf |
| 22 | #define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) | 22 | #define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) |
| 23 | #define PM_UNIT_SH 12 /* TTMMUX number and setting - unit select */ | 23 | #define PM_UNIT_SH 12 /* TTMMUX number and setting - unit select */ |
| 24 | #define PM_UNIT_MSK 0xf | 24 | #define PM_UNIT_MSK 0xf |
| 25 | #define PM_COMBINE_SH 11 /* Combined event bit */ | 25 | #define PM_COMBINE_SH 11 /* Combined event bit */ |
| 26 | #define PM_COMBINE_MSK 1 | 26 | #define PM_COMBINE_MSK 1 |
| 27 | #define PM_COMBINE_MSKS 0x800 | 27 | #define PM_COMBINE_MSKS 0x800 |
| 28 | #define PM_L2SEL_SH 8 /* L2 event select */ | 28 | #define PM_L2SEL_SH 8 /* L2 event select */ |
| 29 | #define PM_L2SEL_MSK 7 | 29 | #define PM_L2SEL_MSK 7 |
| 30 | #define PM_PMCSEL_MSK 0xff | 30 | #define PM_PMCSEL_MSK 0xff |
| 31 | 31 | ||
| 32 | /* | 32 | /* |
| 33 | * Bits in MMCR1 for POWER7 | 33 | * Bits in MMCR1 for POWER7 |
| 34 | */ | 34 | */ |
| 35 | #define MMCR1_TTM0SEL_SH 60 | 35 | #define MMCR1_TTM0SEL_SH 60 |
| 36 | #define MMCR1_TTM1SEL_SH 56 | 36 | #define MMCR1_TTM1SEL_SH 56 |
| 37 | #define MMCR1_TTM2SEL_SH 52 | 37 | #define MMCR1_TTM2SEL_SH 52 |
| 38 | #define MMCR1_TTM3SEL_SH 48 | 38 | #define MMCR1_TTM3SEL_SH 48 |
| 39 | #define MMCR1_TTMSEL_MSK 0xf | 39 | #define MMCR1_TTMSEL_MSK 0xf |
| 40 | #define MMCR1_L2SEL_SH 45 | 40 | #define MMCR1_L2SEL_SH 45 |
| 41 | #define MMCR1_L2SEL_MSK 7 | 41 | #define MMCR1_L2SEL_MSK 7 |
| 42 | #define MMCR1_PMC1_COMBINE_SH 35 | 42 | #define MMCR1_PMC1_COMBINE_SH 35 |
| 43 | #define MMCR1_PMC2_COMBINE_SH 34 | 43 | #define MMCR1_PMC2_COMBINE_SH 34 |
| 44 | #define MMCR1_PMC3_COMBINE_SH 33 | 44 | #define MMCR1_PMC3_COMBINE_SH 33 |
| 45 | #define MMCR1_PMC4_COMBINE_SH 32 | 45 | #define MMCR1_PMC4_COMBINE_SH 32 |
| 46 | #define MMCR1_PMC1SEL_SH 24 | 46 | #define MMCR1_PMC1SEL_SH 24 |
| 47 | #define MMCR1_PMC2SEL_SH 16 | 47 | #define MMCR1_PMC2SEL_SH 16 |
| 48 | #define MMCR1_PMC3SEL_SH 8 | 48 | #define MMCR1_PMC3SEL_SH 8 |
| 49 | #define MMCR1_PMC4SEL_SH 0 | 49 | #define MMCR1_PMC4SEL_SH 0 |
| 50 | #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) | 50 | #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) |
| 51 | #define MMCR1_PMCSEL_MSK 0xff | 51 | #define MMCR1_PMCSEL_MSK 0xff |
| 52 | 52 | ||
| 53 | /* | 53 | /* |
| 54 | * Bits in MMCRA | ||
| 55 | */ | ||
| 56 | |||
| 57 | /* | ||
| 58 | * Layout of constraint bits: | 54 | * Layout of constraint bits: |
| 59 | * 6666555555555544444444443333333333222222222211111111110000000000 | 55 | * 6666555555555544444444443333333333222222222211111111110000000000 |
| 60 | * 3210987654321098765432109876543210987654321098765432109876543210 | 56 | * 3210987654321098765432109876543210987654321098765432109876543210 |
| 61 | * [ ><><><><><><> | 57 | * [ ><><><><><><> |
| 62 | * NC P6P5P4P3P2P1 | 58 | * NC P6P5P4P3P2P1 |
| 63 | * | 59 | * |
| 64 | * NC - number of counters | 60 | * NC - number of counters |
| 65 | * 15: NC error 0x8000 | 61 | * 15: NC error 0x8000 |
| 66 | * 12-14: number of events needing PMC1-4 0x7000 | 62 | * 12-14: number of events needing PMC1-4 0x7000 |
| 67 | * | 63 | * |
| 68 | * P6 | 64 | * P6 |
| 69 | * 11: P6 error 0x800 | 65 | * 11: P6 error 0x800 |
| 70 | * 10-11: Count of events needing PMC6 | 66 | * 10-11: Count of events needing PMC6 |
| 71 | * | 67 | * |
| 72 | * P1..P5 | 68 | * P1..P5 |
| 73 | * 0-9: Count of events needing PMC1..PMC5 | 69 | * 0-9: Count of events needing PMC1..PMC5 |
| 74 | */ | 70 | */ |
| 75 | 71 | ||
| 76 | static int power7_get_constraint(u64 event, unsigned long *maskp, | 72 | static int power7_get_constraint(u64 event, unsigned long *maskp, |
| 77 | unsigned long *valp) | 73 | unsigned long *valp) |
| 78 | { | 74 | { |
| 79 | int pmc, sh; | 75 | int pmc, sh; |
| 80 | unsigned long mask = 0, value = 0; | 76 | unsigned long mask = 0, value = 0; |
| 81 | 77 | ||
| 82 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | 78 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; |
| 83 | if (pmc) { | 79 | if (pmc) { |
| 84 | if (pmc > 6) | 80 | if (pmc > 6) |
| 85 | return -1; | 81 | return -1; |
| 86 | sh = (pmc - 1) * 2; | 82 | sh = (pmc - 1) * 2; |
| 87 | mask |= 2 << sh; | 83 | mask |= 2 << sh; |
| 88 | value |= 1 << sh; | 84 | value |= 1 << sh; |
| 89 | if (pmc >= 5 && !(event == 0x500fa || event == 0x600f4)) | 85 | if (pmc >= 5 && !(event == 0x500fa || event == 0x600f4)) |
| 90 | return -1; | 86 | return -1; |
| 91 | } | 87 | } |
| 92 | if (pmc < 5) { | 88 | if (pmc < 5) { |
| 93 | /* need a counter from PMC1-4 set */ | 89 | /* need a counter from PMC1-4 set */ |
| 94 | mask |= 0x8000; | 90 | mask |= 0x8000; |
| 95 | value |= 0x1000; | 91 | value |= 0x1000; |
| 96 | } | 92 | } |
| 97 | *maskp = mask; | 93 | *maskp = mask; |
| 98 | *valp = value; | 94 | *valp = value; |
| 99 | return 0; | 95 | return 0; |
| 100 | } | 96 | } |
| 101 | 97 | ||
| 102 | #define MAX_ALT 2 /* at most 2 alternatives for any event */ | 98 | #define MAX_ALT 2 /* at most 2 alternatives for any event */ |
| 103 | 99 | ||
| 104 | static const unsigned int event_alternatives[][MAX_ALT] = { | 100 | static const unsigned int event_alternatives[][MAX_ALT] = { |
| 105 | { 0x200f2, 0x300f2 }, /* PM_INST_DISP */ | 101 | { 0x200f2, 0x300f2 }, /* PM_INST_DISP */ |
| 106 | { 0x200f4, 0x600f4 }, /* PM_RUN_CYC */ | 102 | { 0x200f4, 0x600f4 }, /* PM_RUN_CYC */ |
| 107 | { 0x400fa, 0x500fa }, /* PM_RUN_INST_CMPL */ | 103 | { 0x400fa, 0x500fa }, /* PM_RUN_INST_CMPL */ |
| 108 | }; | 104 | }; |
| 109 | 105 | ||
| 110 | /* | 106 | /* |
| 111 | * Scan the alternatives table for a match and return the | 107 | * Scan the alternatives table for a match and return the |
| 112 | * index into the alternatives table if found, else -1. | 108 | * index into the alternatives table if found, else -1. |
| 113 | */ | 109 | */ |
| 114 | static int find_alternative(u64 event) | 110 | static int find_alternative(u64 event) |
| 115 | { | 111 | { |
| 116 | int i, j; | 112 | int i, j; |
| 117 | 113 | ||
| 118 | for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { | 114 | for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { |
| 119 | if (event < event_alternatives[i][0]) | 115 | if (event < event_alternatives[i][0]) |
| 120 | break; | 116 | break; |
| 121 | for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j) | 117 | for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j) |
| 122 | if (event == event_alternatives[i][j]) | 118 | if (event == event_alternatives[i][j]) |
| 123 | return i; | 119 | return i; |
| 124 | } | 120 | } |
| 125 | return -1; | 121 | return -1; |
| 126 | } | 122 | } |
| 127 | 123 | ||
| 128 | static s64 find_alternative_decode(u64 event) | 124 | static s64 find_alternative_decode(u64 event) |
| 129 | { | 125 | { |
| 130 | int pmc, psel; | 126 | int pmc, psel; |
| 131 | 127 | ||
| 132 | /* this only handles the 4x decode events */ | 128 | /* this only handles the 4x decode events */ |
| 133 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | 129 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; |
| 134 | psel = event & PM_PMCSEL_MSK; | 130 | psel = event & PM_PMCSEL_MSK; |
| 135 | if ((pmc == 2 || pmc == 4) && (psel & ~7) == 0x40) | 131 | if ((pmc == 2 || pmc == 4) && (psel & ~7) == 0x40) |
| 136 | return event - (1 << PM_PMC_SH) + 8; | 132 | return event - (1 << PM_PMC_SH) + 8; |
| 137 | if ((pmc == 1 || pmc == 3) && (psel & ~7) == 0x48) | 133 | if ((pmc == 1 || pmc == 3) && (psel & ~7) == 0x48) |
| 138 | return event + (1 << PM_PMC_SH) - 8; | 134 | return event + (1 << PM_PMC_SH) - 8; |
| 139 | return -1; | 135 | return -1; |
| 140 | } | 136 | } |
| 141 | 137 | ||
| 142 | static int power7_get_alternatives(u64 event, unsigned int flags, u64 alt[]) | 138 | static int power7_get_alternatives(u64 event, unsigned int flags, u64 alt[]) |
| 143 | { | 139 | { |
| 144 | int i, j, nalt = 1; | 140 | int i, j, nalt = 1; |
| 145 | s64 ae; | 141 | s64 ae; |
| 146 | 142 | ||
| 147 | alt[0] = event; | 143 | alt[0] = event; |
| 148 | nalt = 1; | 144 | nalt = 1; |
| 149 | i = find_alternative(event); | 145 | i = find_alternative(event); |
| 150 | if (i >= 0) { | 146 | if (i >= 0) { |
| 151 | for (j = 0; j < MAX_ALT; ++j) { | 147 | for (j = 0; j < MAX_ALT; ++j) { |
| 152 | ae = event_alternatives[i][j]; | 148 | ae = event_alternatives[i][j]; |
| 153 | if (ae && ae != event) | 149 | if (ae && ae != event) |
| 154 | alt[nalt++] = ae; | 150 | alt[nalt++] = ae; |
| 155 | } | 151 | } |
| 156 | } else { | 152 | } else { |
| 157 | ae = find_alternative_decode(event); | 153 | ae = find_alternative_decode(event); |
| 158 | if (ae > 0) | 154 | if (ae > 0) |
| 159 | alt[nalt++] = ae; | 155 | alt[nalt++] = ae; |
| 160 | } | 156 | } |
| 161 | 157 | ||
| 162 | if (flags & PPMU_ONLY_COUNT_RUN) { | 158 | if (flags & PPMU_ONLY_COUNT_RUN) { |
| 163 | /* | 159 | /* |
| 164 | * We're only counting in RUN state, | 160 | * We're only counting in RUN state, |
| 165 | * so PM_CYC is equivalent to PM_RUN_CYC | 161 | * so PM_CYC is equivalent to PM_RUN_CYC |
| 166 | * and PM_INST_CMPL === PM_RUN_INST_CMPL. | 162 | * and PM_INST_CMPL === PM_RUN_INST_CMPL. |
| 167 | * This doesn't include alternatives that don't provide | 163 | * This doesn't include alternatives that don't provide |
| 168 | * any extra flexibility in assigning PMCs. | 164 | * any extra flexibility in assigning PMCs. |
| 169 | */ | 165 | */ |
| 170 | j = nalt; | 166 | j = nalt; |
| 171 | for (i = 0; i < nalt; ++i) { | 167 | for (i = 0; i < nalt; ++i) { |
| 172 | switch (alt[i]) { | 168 | switch (alt[i]) { |
| 173 | case 0x1e: /* PM_CYC */ | 169 | case 0x1e: /* PM_CYC */ |
| 174 | alt[j++] = 0x600f4; /* PM_RUN_CYC */ | 170 | alt[j++] = 0x600f4; /* PM_RUN_CYC */ |
| 175 | break; | 171 | break; |
| 176 | case 0x600f4: /* PM_RUN_CYC */ | 172 | case 0x600f4: /* PM_RUN_CYC */ |
| 177 | alt[j++] = 0x1e; | 173 | alt[j++] = 0x1e; |
| 178 | break; | 174 | break; |
| 179 | case 0x2: /* PM_PPC_CMPL */ | 175 | case 0x2: /* PM_PPC_CMPL */ |
| 180 | alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */ | 176 | alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */ |
| 181 | break; | 177 | break; |
| 182 | case 0x500fa: /* PM_RUN_INST_CMPL */ | 178 | case 0x500fa: /* PM_RUN_INST_CMPL */ |
| 183 | alt[j++] = 0x2; /* PM_PPC_CMPL */ | 179 | alt[j++] = 0x2; /* PM_PPC_CMPL */ |
| 184 | break; | 180 | break; |
| 185 | } | 181 | } |
| 186 | } | 182 | } |
| 187 | nalt = j; | 183 | nalt = j; |
| 188 | } | 184 | } |
| 189 | 185 | ||
| 190 | return nalt; | 186 | return nalt; |
| 191 | } | 187 | } |
| 192 | 188 | ||
| 193 | /* | 189 | /* |
| 194 | * Returns 1 if event counts things relating to marked instructions | 190 | * Returns 1 if event counts things relating to marked instructions |
| 195 | * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. | 191 | * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. |
| 196 | */ | 192 | */ |
| 197 | static int power7_marked_instr_event(u64 event) | 193 | static int power7_marked_instr_event(u64 event) |
| 198 | { | 194 | { |
| 199 | int pmc, psel; | 195 | int pmc, psel; |
| 200 | int unit; | 196 | int unit; |
| 201 | 197 | ||
| 202 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | 198 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; |
| 203 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; | 199 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; |
| 204 | psel = event & PM_PMCSEL_MSK & ~1; /* trim off edge/level bit */ | 200 | psel = event & PM_PMCSEL_MSK & ~1; /* trim off edge/level bit */ |
| 205 | if (pmc >= 5) | 201 | if (pmc >= 5) |
| 206 | return 0; | 202 | return 0; |
| 207 | 203 | ||
| 208 | switch (psel >> 4) { | 204 | switch (psel >> 4) { |
| 209 | case 2: | 205 | case 2: |
| 210 | return pmc == 2 || pmc == 4; | 206 | return pmc == 2 || pmc == 4; |
| 211 | case 3: | 207 | case 3: |
| 212 | if (psel == 0x3c) | 208 | if (psel == 0x3c) |
| 213 | return pmc == 1; | 209 | return pmc == 1; |
| 214 | if (psel == 0x3e) | 210 | if (psel == 0x3e) |
| 215 | return pmc != 2; | 211 | return pmc != 2; |
| 216 | return 1; | 212 | return 1; |
| 217 | case 4: | 213 | case 4: |
| 218 | case 5: | 214 | case 5: |
| 219 | return unit == 0xd; | 215 | return unit == 0xd; |
| 220 | case 6: | 216 | case 6: |
| 221 | if (psel == 0x64) | 217 | if (psel == 0x64) |
| 222 | return pmc >= 3; | 218 | return pmc >= 3; |
| 223 | case 8: | 219 | case 8: |
| 224 | return unit == 0xd; | 220 | return unit == 0xd; |
| 225 | } | 221 | } |
| 226 | return 0; | 222 | return 0; |
| 227 | } | 223 | } |
| 228 | 224 | ||
| 229 | static int power7_compute_mmcr(u64 event[], int n_ev, | 225 | static int power7_compute_mmcr(u64 event[], int n_ev, |
| 230 | unsigned int hwc[], unsigned long mmcr[]) | 226 | unsigned int hwc[], unsigned long mmcr[]) |
| 231 | { | 227 | { |
| 232 | unsigned long mmcr1 = 0; | 228 | unsigned long mmcr1 = 0; |
| 233 | unsigned long mmcra = 0; | 229 | unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; |
| 234 | unsigned int pmc, unit, combine, l2sel, psel; | 230 | unsigned int pmc, unit, combine, l2sel, psel; |
| 235 | unsigned int pmc_inuse = 0; | 231 | unsigned int pmc_inuse = 0; |
| 236 | int i; | 232 | int i; |
| 237 | 233 | ||
| 238 | /* First pass to count resource use */ | 234 | /* First pass to count resource use */ |
| 239 | for (i = 0; i < n_ev; ++i) { | 235 | for (i = 0; i < n_ev; ++i) { |
| 240 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; | 236 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; |
| 241 | if (pmc) { | 237 | if (pmc) { |
| 242 | if (pmc > 6) | 238 | if (pmc > 6) |
| 243 | return -1; | 239 | return -1; |
| 244 | if (pmc_inuse & (1 << (pmc - 1))) | 240 | if (pmc_inuse & (1 << (pmc - 1))) |
| 245 | return -1; | 241 | return -1; |
| 246 | pmc_inuse |= 1 << (pmc - 1); | 242 | pmc_inuse |= 1 << (pmc - 1); |
| 247 | } | 243 | } |
| 248 | } | 244 | } |
| 249 | 245 | ||
| 250 | /* Second pass: assign PMCs, set all MMCR1 fields */ | 246 | /* Second pass: assign PMCs, set all MMCR1 fields */ |
| 251 | for (i = 0; i < n_ev; ++i) { | 247 | for (i = 0; i < n_ev; ++i) { |
| 252 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; | 248 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; |
| 253 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; | 249 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; |
| 254 | combine = (event[i] >> PM_COMBINE_SH) & PM_COMBINE_MSK; | 250 | combine = (event[i] >> PM_COMBINE_SH) & PM_COMBINE_MSK; |
| 255 | l2sel = (event[i] >> PM_L2SEL_SH) & PM_L2SEL_MSK; | 251 | l2sel = (event[i] >> PM_L2SEL_SH) & PM_L2SEL_MSK; |
| 256 | psel = event[i] & PM_PMCSEL_MSK; | 252 | psel = event[i] & PM_PMCSEL_MSK; |
| 257 | if (!pmc) { | 253 | if (!pmc) { |
| 258 | /* Bus event or any-PMC direct event */ | 254 | /* Bus event or any-PMC direct event */ |
| 259 | for (pmc = 0; pmc < 4; ++pmc) { | 255 | for (pmc = 0; pmc < 4; ++pmc) { |
| 260 | if (!(pmc_inuse & (1 << pmc))) | 256 | if (!(pmc_inuse & (1 << pmc))) |
| 261 | break; | 257 | break; |
| 262 | } | 258 | } |
| 263 | if (pmc >= 4) | 259 | if (pmc >= 4) |
| 264 | return -1; | 260 | return -1; |
| 265 | pmc_inuse |= 1 << pmc; | 261 | pmc_inuse |= 1 << pmc; |
| 266 | } else { | 262 | } else { |
| 267 | /* Direct or decoded event */ | 263 | /* Direct or decoded event */ |
| 268 | --pmc; | 264 | --pmc; |
| 269 | } | 265 | } |
| 270 | if (pmc <= 3) { | 266 | if (pmc <= 3) { |
| 271 | mmcr1 |= (unsigned long) unit | 267 | mmcr1 |= (unsigned long) unit |
| 272 | << (MMCR1_TTM0SEL_SH - 4 * pmc); | 268 | << (MMCR1_TTM0SEL_SH - 4 * pmc); |
| 273 | mmcr1 |= (unsigned long) combine | 269 | mmcr1 |= (unsigned long) combine |
| 274 | << (MMCR1_PMC1_COMBINE_SH - pmc); | 270 | << (MMCR1_PMC1_COMBINE_SH - pmc); |
| 275 | mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc); | 271 | mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc); |
| 276 | if (unit == 6) /* L2 events */ | 272 | if (unit == 6) /* L2 events */ |
| 277 | mmcr1 |= (unsigned long) l2sel | 273 | mmcr1 |= (unsigned long) l2sel |
| 278 | << MMCR1_L2SEL_SH; | 274 | << MMCR1_L2SEL_SH; |
| 279 | } | 275 | } |
| 280 | if (power7_marked_instr_event(event[i])) | 276 | if (power7_marked_instr_event(event[i])) |
| 281 | mmcra |= MMCRA_SAMPLE_ENABLE; | 277 | mmcra |= MMCRA_SAMPLE_ENABLE; |
| 282 | hwc[i] = pmc; | 278 | hwc[i] = pmc; |
| 283 | } | 279 | } |
| 284 | 280 | ||
| 285 | /* Return MMCRx values */ | 281 | /* Return MMCRx values */ |
| 286 | mmcr[0] = 0; | 282 | mmcr[0] = 0; |
| 287 | if (pmc_inuse & 1) | 283 | if (pmc_inuse & 1) |
| 288 | mmcr[0] = MMCR0_PMC1CE; | 284 | mmcr[0] = MMCR0_PMC1CE; |
| 289 | if (pmc_inuse & 0x3e) | 285 | if (pmc_inuse & 0x3e) |
| 290 | mmcr[0] |= MMCR0_PMCjCE; | 286 | mmcr[0] |= MMCR0_PMCjCE; |
| 291 | mmcr[1] = mmcr1; | 287 | mmcr[1] = mmcr1; |
| 292 | mmcr[2] = mmcra; | 288 | mmcr[2] = mmcra; |
| 293 | return 0; | 289 | return 0; |
| 294 | } | 290 | } |
| 295 | 291 | ||
| 296 | static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[]) | 292 | static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[]) |
| 297 | { | 293 | { |
| 298 | if (pmc <= 3) | 294 | if (pmc <= 3) |
| 299 | mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc)); | 295 | mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc)); |
| 300 | } | 296 | } |
| 301 | 297 | ||
| 302 | static int power7_generic_events[] = { | 298 | static int power7_generic_events[] = { |
| 303 | [PERF_COUNT_HW_CPU_CYCLES] = 0x1e, | 299 | [PERF_COUNT_HW_CPU_CYCLES] = 0x1e, |
| 304 | [PERF_COUNT_HW_INSTRUCTIONS] = 2, | 300 | [PERF_COUNT_HW_INSTRUCTIONS] = 2, |
| 305 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0xc880, /* LD_REF_L1_LSU*/ | 301 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0xc880, /* LD_REF_L1_LSU*/ |
| 306 | [PERF_COUNT_HW_CACHE_MISSES] = 0x400f0, /* LD_MISS_L1 */ | 302 | [PERF_COUNT_HW_CACHE_MISSES] = 0x400f0, /* LD_MISS_L1 */ |
| 307 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x10068, /* BRU_FIN */ | 303 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x10068, /* BRU_FIN */ |
| 308 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x400f6, /* BR_MPRED */ | 304 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x400f6, /* BR_MPRED */ |
| 309 | }; | 305 | }; |
| 310 | 306 | ||
| 311 | #define C(x) PERF_COUNT_HW_CACHE_##x | 307 | #define C(x) PERF_COUNT_HW_CACHE_##x |
| 312 | 308 | ||
| 313 | /* | 309 | /* |
| 314 | * Table of generalized cache-related events. | 310 | * Table of generalized cache-related events. |
| 315 | * 0 means not supported, -1 means nonsensical, other values | 311 | * 0 means not supported, -1 means nonsensical, other values |
| 316 | * are event codes. | 312 | * are event codes. |
| 317 | */ | 313 | */ |
| 318 | static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | 314 | static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { |
| 319 | [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ | 315 | [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 320 | [C(OP_READ)] = { 0xc880, 0x400f0 }, | 316 | [C(OP_READ)] = { 0xc880, 0x400f0 }, |
| 321 | [C(OP_WRITE)] = { 0, 0x300f0 }, | 317 | [C(OP_WRITE)] = { 0, 0x300f0 }, |
| 322 | [C(OP_PREFETCH)] = { 0xd8b8, 0 }, | 318 | [C(OP_PREFETCH)] = { 0xd8b8, 0 }, |
| 323 | }, | 319 | }, |
| 324 | [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ | 320 | [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 325 | [C(OP_READ)] = { 0, 0x200fc }, | 321 | [C(OP_READ)] = { 0, 0x200fc }, |
| 326 | [C(OP_WRITE)] = { -1, -1 }, | 322 | [C(OP_WRITE)] = { -1, -1 }, |
| 327 | [C(OP_PREFETCH)] = { 0x408a, 0 }, | 323 | [C(OP_PREFETCH)] = { 0x408a, 0 }, |
| 328 | }, | 324 | }, |
| 329 | [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ | 325 | [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 330 | [C(OP_READ)] = { 0x16080, 0x26080 }, | 326 | [C(OP_READ)] = { 0x16080, 0x26080 }, |
| 331 | [C(OP_WRITE)] = { 0x16082, 0x26082 }, | 327 | [C(OP_WRITE)] = { 0x16082, 0x26082 }, |
| 332 | [C(OP_PREFETCH)] = { 0, 0 }, | 328 | [C(OP_PREFETCH)] = { 0, 0 }, |
| 333 | }, | 329 | }, |
| 334 | [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ | 330 | [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 335 | [C(OP_READ)] = { 0, 0x300fc }, | 331 | [C(OP_READ)] = { 0, 0x300fc }, |
| 336 | [C(OP_WRITE)] = { -1, -1 }, | 332 | [C(OP_WRITE)] = { -1, -1 }, |
| 337 | [C(OP_PREFETCH)] = { -1, -1 }, | 333 | [C(OP_PREFETCH)] = { -1, -1 }, |
| 338 | }, | 334 | }, |
| 339 | [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ | 335 | [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 340 | [C(OP_READ)] = { 0, 0x400fc }, | 336 | [C(OP_READ)] = { 0, 0x400fc }, |
| 341 | [C(OP_WRITE)] = { -1, -1 }, | 337 | [C(OP_WRITE)] = { -1, -1 }, |
| 342 | [C(OP_PREFETCH)] = { -1, -1 }, | 338 | [C(OP_PREFETCH)] = { -1, -1 }, |
| 343 | }, | 339 | }, |
| 344 | [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ | 340 | [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 345 | [C(OP_READ)] = { 0x10068, 0x400f6 }, | 341 | [C(OP_READ)] = { 0x10068, 0x400f6 }, |
| 346 | [C(OP_WRITE)] = { -1, -1 }, | 342 | [C(OP_WRITE)] = { -1, -1 }, |
| 347 | [C(OP_PREFETCH)] = { -1, -1 }, | 343 | [C(OP_PREFETCH)] = { -1, -1 }, |
| 348 | }, | 344 | }, |
| 349 | }; | 345 | }; |
| 350 | 346 | ||
| 351 | static struct power_pmu power7_pmu = { | 347 | static struct power_pmu power7_pmu = { |
| 352 | .name = "POWER7", | 348 | .name = "POWER7", |
| 353 | .n_counter = 6, | 349 | .n_counter = 6, |
| 354 | .max_alternatives = MAX_ALT + 1, | 350 | .max_alternatives = MAX_ALT + 1, |
| 355 | .add_fields = 0x1555ul, | 351 | .add_fields = 0x1555ul, |
| 356 | .test_adder = 0x3000ul, | 352 | .test_adder = 0x3000ul, |
| 357 | .compute_mmcr = power7_compute_mmcr, | 353 | .compute_mmcr = power7_compute_mmcr, |
| 358 | .get_constraint = power7_get_constraint, | 354 | .get_constraint = power7_get_constraint, |
| 359 | .get_alternatives = power7_get_alternatives, | 355 | .get_alternatives = power7_get_alternatives, |
| 360 | .disable_pmc = power7_disable_pmc, | 356 | .disable_pmc = power7_disable_pmc, |
| 361 | .flags = PPMU_ALT_SIPR, | 357 | .flags = PPMU_ALT_SIPR, |
| 362 | .n_generic = ARRAY_SIZE(power7_generic_events), | 358 | .n_generic = ARRAY_SIZE(power7_generic_events), |
| 363 | .generic_events = power7_generic_events, | 359 | .generic_events = power7_generic_events, |
| 364 | .cache_events = &power7_cache_events, | 360 | .cache_events = &power7_cache_events, |
| 365 | }; | 361 | }; |
| 366 | 362 | ||
| 367 | static int init_power7_pmu(void) | 363 | static int init_power7_pmu(void) |
| 368 | { | 364 | { |
| 369 | if (!cur_cpu_spec->oprofile_cpu_type || | 365 | if (!cur_cpu_spec->oprofile_cpu_type || |
| 370 | strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7")) | 366 | strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7")) |
| 371 | return -ENODEV; | 367 | return -ENODEV; |
| 372 | 368 | ||
| 373 | return register_power_pmu(&power7_pmu); | 369 | return register_power_pmu(&power7_pmu); |
| 374 | } | 370 | } |
| 375 | 371 | ||
| 376 | arch_initcall(init_power7_pmu); | 372 | arch_initcall(init_power7_pmu); |
| 377 | 373 |
arch/powerpc/kernel/ppc970-pmu.c
| 1 | /* | 1 | /* |
| 2 | * Performance counter support for PPC970-family processors. | 2 | * Performance counter support for PPC970-family processors. |
| 3 | * | 3 | * |
| 4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | 4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License | 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ | 10 | */ |
| 11 | #include <linux/string.h> | 11 | #include <linux/string.h> |
| 12 | #include <linux/perf_event.h> | 12 | #include <linux/perf_event.h> |
| 13 | #include <asm/reg.h> | 13 | #include <asm/reg.h> |
| 14 | #include <asm/cputable.h> | 14 | #include <asm/cputable.h> |
| 15 | 15 | ||
| 16 | /* | 16 | /* |
| 17 | * Bits in event code for PPC970 | 17 | * Bits in event code for PPC970 |
| 18 | */ | 18 | */ |
| 19 | #define PM_PMC_SH 12 /* PMC number (1-based) for direct events */ | 19 | #define PM_PMC_SH 12 /* PMC number (1-based) for direct events */ |
| 20 | #define PM_PMC_MSK 0xf | 20 | #define PM_PMC_MSK 0xf |
| 21 | #define PM_UNIT_SH 8 /* TTMMUX number and setting - unit select */ | 21 | #define PM_UNIT_SH 8 /* TTMMUX number and setting - unit select */ |
| 22 | #define PM_UNIT_MSK 0xf | 22 | #define PM_UNIT_MSK 0xf |
| 23 | #define PM_SPCSEL_SH 6 | 23 | #define PM_SPCSEL_SH 6 |
| 24 | #define PM_SPCSEL_MSK 3 | 24 | #define PM_SPCSEL_MSK 3 |
| 25 | #define PM_BYTE_SH 4 /* Byte number of event bus to use */ | 25 | #define PM_BYTE_SH 4 /* Byte number of event bus to use */ |
| 26 | #define PM_BYTE_MSK 3 | 26 | #define PM_BYTE_MSK 3 |
| 27 | #define PM_PMCSEL_MSK 0xf | 27 | #define PM_PMCSEL_MSK 0xf |
| 28 | 28 | ||
| 29 | /* Values in PM_UNIT field */ | 29 | /* Values in PM_UNIT field */ |
| 30 | #define PM_NONE 0 | 30 | #define PM_NONE 0 |
| 31 | #define PM_FPU 1 | 31 | #define PM_FPU 1 |
| 32 | #define PM_VPU 2 | 32 | #define PM_VPU 2 |
| 33 | #define PM_ISU 3 | 33 | #define PM_ISU 3 |
| 34 | #define PM_IFU 4 | 34 | #define PM_IFU 4 |
| 35 | #define PM_IDU 5 | 35 | #define PM_IDU 5 |
| 36 | #define PM_STS 6 | 36 | #define PM_STS 6 |
| 37 | #define PM_LSU0 7 | 37 | #define PM_LSU0 7 |
| 38 | #define PM_LSU1U 8 | 38 | #define PM_LSU1U 8 |
| 39 | #define PM_LSU1L 9 | 39 | #define PM_LSU1L 9 |
| 40 | #define PM_LASTUNIT 9 | 40 | #define PM_LASTUNIT 9 |
| 41 | 41 | ||
| 42 | /* | 42 | /* |
| 43 | * Bits in MMCR0 for PPC970 | 43 | * Bits in MMCR0 for PPC970 |
| 44 | */ | 44 | */ |
| 45 | #define MMCR0_PMC1SEL_SH 8 | 45 | #define MMCR0_PMC1SEL_SH 8 |
| 46 | #define MMCR0_PMC2SEL_SH 1 | 46 | #define MMCR0_PMC2SEL_SH 1 |
| 47 | #define MMCR_PMCSEL_MSK 0x1f | 47 | #define MMCR_PMCSEL_MSK 0x1f |
| 48 | 48 | ||
| 49 | /* | 49 | /* |
| 50 | * Bits in MMCR1 for PPC970 | 50 | * Bits in MMCR1 for PPC970 |
| 51 | */ | 51 | */ |
| 52 | #define MMCR1_TTM0SEL_SH 62 | 52 | #define MMCR1_TTM0SEL_SH 62 |
| 53 | #define MMCR1_TTM1SEL_SH 59 | 53 | #define MMCR1_TTM1SEL_SH 59 |
| 54 | #define MMCR1_TTM3SEL_SH 53 | 54 | #define MMCR1_TTM3SEL_SH 53 |
| 55 | #define MMCR1_TTMSEL_MSK 3 | 55 | #define MMCR1_TTMSEL_MSK 3 |
| 56 | #define MMCR1_TD_CP_DBG0SEL_SH 50 | 56 | #define MMCR1_TD_CP_DBG0SEL_SH 50 |
| 57 | #define MMCR1_TD_CP_DBG1SEL_SH 48 | 57 | #define MMCR1_TD_CP_DBG1SEL_SH 48 |
| 58 | #define MMCR1_TD_CP_DBG2SEL_SH 46 | 58 | #define MMCR1_TD_CP_DBG2SEL_SH 46 |
| 59 | #define MMCR1_TD_CP_DBG3SEL_SH 44 | 59 | #define MMCR1_TD_CP_DBG3SEL_SH 44 |
| 60 | #define MMCR1_PMC1_ADDER_SEL_SH 39 | 60 | #define MMCR1_PMC1_ADDER_SEL_SH 39 |
| 61 | #define MMCR1_PMC2_ADDER_SEL_SH 38 | 61 | #define MMCR1_PMC2_ADDER_SEL_SH 38 |
| 62 | #define MMCR1_PMC6_ADDER_SEL_SH 37 | 62 | #define MMCR1_PMC6_ADDER_SEL_SH 37 |
| 63 | #define MMCR1_PMC5_ADDER_SEL_SH 36 | 63 | #define MMCR1_PMC5_ADDER_SEL_SH 36 |
| 64 | #define MMCR1_PMC8_ADDER_SEL_SH 35 | 64 | #define MMCR1_PMC8_ADDER_SEL_SH 35 |
| 65 | #define MMCR1_PMC7_ADDER_SEL_SH 34 | 65 | #define MMCR1_PMC7_ADDER_SEL_SH 34 |
| 66 | #define MMCR1_PMC3_ADDER_SEL_SH 33 | 66 | #define MMCR1_PMC3_ADDER_SEL_SH 33 |
| 67 | #define MMCR1_PMC4_ADDER_SEL_SH 32 | 67 | #define MMCR1_PMC4_ADDER_SEL_SH 32 |
| 68 | #define MMCR1_PMC3SEL_SH 27 | 68 | #define MMCR1_PMC3SEL_SH 27 |
| 69 | #define MMCR1_PMC4SEL_SH 22 | 69 | #define MMCR1_PMC4SEL_SH 22 |
| 70 | #define MMCR1_PMC5SEL_SH 17 | 70 | #define MMCR1_PMC5SEL_SH 17 |
| 71 | #define MMCR1_PMC6SEL_SH 12 | 71 | #define MMCR1_PMC6SEL_SH 12 |
| 72 | #define MMCR1_PMC7SEL_SH 7 | 72 | #define MMCR1_PMC7SEL_SH 7 |
| 73 | #define MMCR1_PMC8SEL_SH 2 | 73 | #define MMCR1_PMC8SEL_SH 2 |
| 74 | 74 | ||
| 75 | static short mmcr1_adder_bits[8] = { | 75 | static short mmcr1_adder_bits[8] = { |
| 76 | MMCR1_PMC1_ADDER_SEL_SH, | 76 | MMCR1_PMC1_ADDER_SEL_SH, |
| 77 | MMCR1_PMC2_ADDER_SEL_SH, | 77 | MMCR1_PMC2_ADDER_SEL_SH, |
| 78 | MMCR1_PMC3_ADDER_SEL_SH, | 78 | MMCR1_PMC3_ADDER_SEL_SH, |
| 79 | MMCR1_PMC4_ADDER_SEL_SH, | 79 | MMCR1_PMC4_ADDER_SEL_SH, |
| 80 | MMCR1_PMC5_ADDER_SEL_SH, | 80 | MMCR1_PMC5_ADDER_SEL_SH, |
| 81 | MMCR1_PMC6_ADDER_SEL_SH, | 81 | MMCR1_PMC6_ADDER_SEL_SH, |
| 82 | MMCR1_PMC7_ADDER_SEL_SH, | 82 | MMCR1_PMC7_ADDER_SEL_SH, |
| 83 | MMCR1_PMC8_ADDER_SEL_SH | 83 | MMCR1_PMC8_ADDER_SEL_SH |
| 84 | }; | 84 | }; |
| 85 | 85 | ||
| 86 | /* | 86 | /* |
| 87 | * Bits in MMCRA | ||
| 88 | */ | ||
| 89 | |||
| 90 | /* | ||
| 91 | * Layout of constraint bits: | 87 | * Layout of constraint bits: |
| 92 | * 6666555555555544444444443333333333222222222211111111110000000000 | 88 | * 6666555555555544444444443333333333222222222211111111110000000000 |
| 93 | * 3210987654321098765432109876543210987654321098765432109876543210 | 89 | * 3210987654321098765432109876543210987654321098765432109876543210 |
| 94 | * <><><>[ >[ >[ >< >< >< >< ><><><><><><><><> | 90 | * <><><>[ >[ >[ >< >< >< >< ><><><><><><><><> |
| 95 | * SPT0T1 UC PS1 PS2 B0 B1 B2 B3 P1P2P3P4P5P6P7P8 | 91 | * SPT0T1 UC PS1 PS2 B0 B1 B2 B3 P1P2P3P4P5P6P7P8 |
| 96 | * | 92 | * |
| 97 | * SP - SPCSEL constraint | 93 | * SP - SPCSEL constraint |
| 98 | * 48-49: SPCSEL value 0x3_0000_0000_0000 | 94 | * 48-49: SPCSEL value 0x3_0000_0000_0000 |
| 99 | * | 95 | * |
| 100 | * T0 - TTM0 constraint | 96 | * T0 - TTM0 constraint |
| 101 | * 46-47: TTM0SEL value (0=FPU, 2=IFU, 3=VPU) 0xC000_0000_0000 | 97 | * 46-47: TTM0SEL value (0=FPU, 2=IFU, 3=VPU) 0xC000_0000_0000 |
| 102 | * | 98 | * |
| 103 | * T1 - TTM1 constraint | 99 | * T1 - TTM1 constraint |
| 104 | * 44-45: TTM1SEL value (0=IDU, 3=STS) 0x3000_0000_0000 | 100 | * 44-45: TTM1SEL value (0=IDU, 3=STS) 0x3000_0000_0000 |
| 105 | * | 101 | * |
| 106 | * UC - unit constraint: can't have all three of FPU|IFU|VPU, ISU, IDU|STS | 102 | * UC - unit constraint: can't have all three of FPU|IFU|VPU, ISU, IDU|STS |
| 107 | * 43: UC3 error 0x0800_0000_0000 | 103 | * 43: UC3 error 0x0800_0000_0000 |
| 108 | * 42: FPU|IFU|VPU events needed 0x0400_0000_0000 | 104 | * 42: FPU|IFU|VPU events needed 0x0400_0000_0000 |
| 109 | * 41: ISU events needed 0x0200_0000_0000 | 105 | * 41: ISU events needed 0x0200_0000_0000 |
| 110 | * 40: IDU|STS events needed 0x0100_0000_0000 | 106 | * 40: IDU|STS events needed 0x0100_0000_0000 |
| 111 | * | 107 | * |
| 112 | * PS1 | 108 | * PS1 |
| 113 | * 39: PS1 error 0x0080_0000_0000 | 109 | * 39: PS1 error 0x0080_0000_0000 |
| 114 | * 36-38: count of events needing PMC1/2/5/6 0x0070_0000_0000 | 110 | * 36-38: count of events needing PMC1/2/5/6 0x0070_0000_0000 |
| 115 | * | 111 | * |
| 116 | * PS2 | 112 | * PS2 |
| 117 | * 35: PS2 error 0x0008_0000_0000 | 113 | * 35: PS2 error 0x0008_0000_0000 |
| 118 | * 32-34: count of events needing PMC3/4/7/8 0x0007_0000_0000 | 114 | * 32-34: count of events needing PMC3/4/7/8 0x0007_0000_0000 |
| 119 | * | 115 | * |
| 120 | * B0 | 116 | * B0 |
| 121 | * 28-31: Byte 0 event source 0xf000_0000 | 117 | * 28-31: Byte 0 event source 0xf000_0000 |
| 122 | * Encoding as for the event code | 118 | * Encoding as for the event code |
| 123 | * | 119 | * |
| 124 | * B1, B2, B3 | 120 | * B1, B2, B3 |
| 125 | * 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources | 121 | * 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources |
| 126 | * | 122 | * |
| 127 | * P1 | 123 | * P1 |
| 128 | * 15: P1 error 0x8000 | 124 | * 15: P1 error 0x8000 |
| 129 | * 14-15: Count of events needing PMC1 | 125 | * 14-15: Count of events needing PMC1 |
| 130 | * | 126 | * |
| 131 | * P2..P8 | 127 | * P2..P8 |
| 132 | * 0-13: Count of events needing PMC2..PMC8 | 128 | * 0-13: Count of events needing PMC2..PMC8 |
| 133 | */ | 129 | */ |
| 134 | 130 | ||
| 135 | static unsigned char direct_marked_event[8] = { | 131 | static unsigned char direct_marked_event[8] = { |
| 136 | (1<<2) | (1<<3), /* PMC1: PM_MRK_GRP_DISP, PM_MRK_ST_CMPL */ | 132 | (1<<2) | (1<<3), /* PMC1: PM_MRK_GRP_DISP, PM_MRK_ST_CMPL */ |
| 137 | (1<<3) | (1<<5), /* PMC2: PM_THRESH_TIMEO, PM_MRK_BRU_FIN */ | 133 | (1<<3) | (1<<5), /* PMC2: PM_THRESH_TIMEO, PM_MRK_BRU_FIN */ |
| 138 | (1<<3) | (1<<5), /* PMC3: PM_MRK_ST_CMPL_INT, PM_MRK_VMX_FIN */ | 134 | (1<<3) | (1<<5), /* PMC3: PM_MRK_ST_CMPL_INT, PM_MRK_VMX_FIN */ |
| 139 | (1<<4) | (1<<5), /* PMC4: PM_MRK_GRP_CMPL, PM_MRK_CRU_FIN */ | 135 | (1<<4) | (1<<5), /* PMC4: PM_MRK_GRP_CMPL, PM_MRK_CRU_FIN */ |
| 140 | (1<<4) | (1<<5), /* PMC5: PM_GRP_MRK, PM_MRK_GRP_TIMEO */ | 136 | (1<<4) | (1<<5), /* PMC5: PM_GRP_MRK, PM_MRK_GRP_TIMEO */ |
| 141 | (1<<3) | (1<<4) | (1<<5), | 137 | (1<<3) | (1<<4) | (1<<5), |
| 142 | /* PMC6: PM_MRK_ST_STS, PM_MRK_FXU_FIN, PM_MRK_GRP_ISSUED */ | 138 | /* PMC6: PM_MRK_ST_STS, PM_MRK_FXU_FIN, PM_MRK_GRP_ISSUED */ |
| 143 | (1<<4) | (1<<5), /* PMC7: PM_MRK_FPU_FIN, PM_MRK_INST_FIN */ | 139 | (1<<4) | (1<<5), /* PMC7: PM_MRK_FPU_FIN, PM_MRK_INST_FIN */ |
| 144 | (1<<4) /* PMC8: PM_MRK_LSU_FIN */ | 140 | (1<<4) /* PMC8: PM_MRK_LSU_FIN */ |
| 145 | }; | 141 | }; |
| 146 | 142 | ||
| 147 | /* | 143 | /* |
| 148 | * Returns 1 if event counts things relating to marked instructions | 144 | * Returns 1 if event counts things relating to marked instructions |
| 149 | * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. | 145 | * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. |
| 150 | */ | 146 | */ |
| 151 | static int p970_marked_instr_event(u64 event) | 147 | static int p970_marked_instr_event(u64 event) |
| 152 | { | 148 | { |
| 153 | int pmc, psel, unit, byte, bit; | 149 | int pmc, psel, unit, byte, bit; |
| 154 | unsigned int mask; | 150 | unsigned int mask; |
| 155 | 151 | ||
| 156 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | 152 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; |
| 157 | psel = event & PM_PMCSEL_MSK; | 153 | psel = event & PM_PMCSEL_MSK; |
| 158 | if (pmc) { | 154 | if (pmc) { |
| 159 | if (direct_marked_event[pmc - 1] & (1 << psel)) | 155 | if (direct_marked_event[pmc - 1] & (1 << psel)) |
| 160 | return 1; | 156 | return 1; |
| 161 | if (psel == 0) /* add events */ | 157 | if (psel == 0) /* add events */ |
| 162 | bit = (pmc <= 4)? pmc - 1: 8 - pmc; | 158 | bit = (pmc <= 4)? pmc - 1: 8 - pmc; |
| 163 | else if (psel == 7 || psel == 13) /* decode events */ | 159 | else if (psel == 7 || psel == 13) /* decode events */ |
| 164 | bit = 4; | 160 | bit = 4; |
| 165 | else | 161 | else |
| 166 | return 0; | 162 | return 0; |
| 167 | } else | 163 | } else |
| 168 | bit = psel; | 164 | bit = psel; |
| 169 | 165 | ||
| 170 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; | 166 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; |
| 171 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; | 167 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; |
| 172 | mask = 0; | 168 | mask = 0; |
| 173 | switch (unit) { | 169 | switch (unit) { |
| 174 | case PM_VPU: | 170 | case PM_VPU: |
| 175 | mask = 0x4c; /* byte 0 bits 2,3,6 */ | 171 | mask = 0x4c; /* byte 0 bits 2,3,6 */ |
| 176 | case PM_LSU0: | 172 | case PM_LSU0: |
| 177 | /* byte 2 bits 0,2,3,4,6; all of byte 1 */ | 173 | /* byte 2 bits 0,2,3,4,6; all of byte 1 */ |
| 178 | mask = 0x085dff00; | 174 | mask = 0x085dff00; |
| 179 | case PM_LSU1L: | 175 | case PM_LSU1L: |
| 180 | mask = 0x50 << 24; /* byte 3 bits 4,6 */ | 176 | mask = 0x50 << 24; /* byte 3 bits 4,6 */ |
| 181 | break; | 177 | break; |
| 182 | } | 178 | } |
| 183 | return (mask >> (byte * 8 + bit)) & 1; | 179 | return (mask >> (byte * 8 + bit)) & 1; |
| 184 | } | 180 | } |
| 185 | 181 | ||
| 186 | /* Masks and values for using events from the various units */ | 182 | /* Masks and values for using events from the various units */ |
| 187 | static unsigned long unit_cons[PM_LASTUNIT+1][2] = { | 183 | static unsigned long unit_cons[PM_LASTUNIT+1][2] = { |
| 188 | [PM_FPU] = { 0xc80000000000ull, 0x040000000000ull }, | 184 | [PM_FPU] = { 0xc80000000000ull, 0x040000000000ull }, |
| 189 | [PM_VPU] = { 0xc80000000000ull, 0xc40000000000ull }, | 185 | [PM_VPU] = { 0xc80000000000ull, 0xc40000000000ull }, |
| 190 | [PM_ISU] = { 0x080000000000ull, 0x020000000000ull }, | 186 | [PM_ISU] = { 0x080000000000ull, 0x020000000000ull }, |
| 191 | [PM_IFU] = { 0xc80000000000ull, 0x840000000000ull }, | 187 | [PM_IFU] = { 0xc80000000000ull, 0x840000000000ull }, |
| 192 | [PM_IDU] = { 0x380000000000ull, 0x010000000000ull }, | 188 | [PM_IDU] = { 0x380000000000ull, 0x010000000000ull }, |
| 193 | [PM_STS] = { 0x380000000000ull, 0x310000000000ull }, | 189 | [PM_STS] = { 0x380000000000ull, 0x310000000000ull }, |
| 194 | }; | 190 | }; |
| 195 | 191 | ||
| 196 | static int p970_get_constraint(u64 event, unsigned long *maskp, | 192 | static int p970_get_constraint(u64 event, unsigned long *maskp, |
| 197 | unsigned long *valp) | 193 | unsigned long *valp) |
| 198 | { | 194 | { |
| 199 | int pmc, byte, unit, sh, spcsel; | 195 | int pmc, byte, unit, sh, spcsel; |
| 200 | unsigned long mask = 0, value = 0; | 196 | unsigned long mask = 0, value = 0; |
| 201 | int grp = -1; | 197 | int grp = -1; |
| 202 | 198 | ||
| 203 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; | 199 | pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; |
| 204 | if (pmc) { | 200 | if (pmc) { |
| 205 | if (pmc > 8) | 201 | if (pmc > 8) |
| 206 | return -1; | 202 | return -1; |
| 207 | sh = (pmc - 1) * 2; | 203 | sh = (pmc - 1) * 2; |
| 208 | mask |= 2 << sh; | 204 | mask |= 2 << sh; |
| 209 | value |= 1 << sh; | 205 | value |= 1 << sh; |
| 210 | grp = ((pmc - 1) >> 1) & 1; | 206 | grp = ((pmc - 1) >> 1) & 1; |
| 211 | } | 207 | } |
| 212 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; | 208 | unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; |
| 213 | if (unit) { | 209 | if (unit) { |
| 214 | if (unit > PM_LASTUNIT) | 210 | if (unit > PM_LASTUNIT) |
| 215 | return -1; | 211 | return -1; |
| 216 | mask |= unit_cons[unit][0]; | 212 | mask |= unit_cons[unit][0]; |
| 217 | value |= unit_cons[unit][1]; | 213 | value |= unit_cons[unit][1]; |
| 218 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; | 214 | byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; |
| 219 | /* | 215 | /* |
| 220 | * Bus events on bytes 0 and 2 can be counted | 216 | * Bus events on bytes 0 and 2 can be counted |
| 221 | * on PMC1/2/5/6; bytes 1 and 3 on PMC3/4/7/8. | 217 | * on PMC1/2/5/6; bytes 1 and 3 on PMC3/4/7/8. |
| 222 | */ | 218 | */ |
| 223 | if (!pmc) | 219 | if (!pmc) |
| 224 | grp = byte & 1; | 220 | grp = byte & 1; |
| 225 | /* Set byte lane select field */ | 221 | /* Set byte lane select field */ |
| 226 | mask |= 0xfULL << (28 - 4 * byte); | 222 | mask |= 0xfULL << (28 - 4 * byte); |
| 227 | value |= (unsigned long)unit << (28 - 4 * byte); | 223 | value |= (unsigned long)unit << (28 - 4 * byte); |
| 228 | } | 224 | } |
| 229 | if (grp == 0) { | 225 | if (grp == 0) { |
| 230 | /* increment PMC1/2/5/6 field */ | 226 | /* increment PMC1/2/5/6 field */ |
| 231 | mask |= 0x8000000000ull; | 227 | mask |= 0x8000000000ull; |
| 232 | value |= 0x1000000000ull; | 228 | value |= 0x1000000000ull; |
| 233 | } else if (grp == 1) { | 229 | } else if (grp == 1) { |
| 234 | /* increment PMC3/4/7/8 field */ | 230 | /* increment PMC3/4/7/8 field */ |
| 235 | mask |= 0x800000000ull; | 231 | mask |= 0x800000000ull; |
| 236 | value |= 0x100000000ull; | 232 | value |= 0x100000000ull; |
| 237 | } | 233 | } |
| 238 | spcsel = (event >> PM_SPCSEL_SH) & PM_SPCSEL_MSK; | 234 | spcsel = (event >> PM_SPCSEL_SH) & PM_SPCSEL_MSK; |
| 239 | if (spcsel) { | 235 | if (spcsel) { |
| 240 | mask |= 3ull << 48; | 236 | mask |= 3ull << 48; |
| 241 | value |= (unsigned long)spcsel << 48; | 237 | value |= (unsigned long)spcsel << 48; |
| 242 | } | 238 | } |
| 243 | *maskp = mask; | 239 | *maskp = mask; |
| 244 | *valp = value; | 240 | *valp = value; |
| 245 | return 0; | 241 | return 0; |
| 246 | } | 242 | } |
| 247 | 243 | ||
| 248 | static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[]) | 244 | static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[]) |
| 249 | { | 245 | { |
| 250 | alt[0] = event; | 246 | alt[0] = event; |
| 251 | 247 | ||
| 252 | /* 2 alternatives for LSU empty */ | 248 | /* 2 alternatives for LSU empty */ |
| 253 | if (event == 0x2002 || event == 0x3002) { | 249 | if (event == 0x2002 || event == 0x3002) { |
| 254 | alt[1] = event ^ 0x1000; | 250 | alt[1] = event ^ 0x1000; |
| 255 | return 2; | 251 | return 2; |
| 256 | } | 252 | } |
| 257 | 253 | ||
| 258 | return 1; | 254 | return 1; |
| 259 | } | 255 | } |
| 260 | 256 | ||
| 261 | static int p970_compute_mmcr(u64 event[], int n_ev, | 257 | static int p970_compute_mmcr(u64 event[], int n_ev, |
| 262 | unsigned int hwc[], unsigned long mmcr[]) | 258 | unsigned int hwc[], unsigned long mmcr[]) |
| 263 | { | 259 | { |
| 264 | unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0; | 260 | unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0; |
| 265 | unsigned int pmc, unit, byte, psel; | 261 | unsigned int pmc, unit, byte, psel; |
| 266 | unsigned int ttm, grp; | 262 | unsigned int ttm, grp; |
| 267 | unsigned int pmc_inuse = 0; | 263 | unsigned int pmc_inuse = 0; |
| 268 | unsigned int pmc_grp_use[2]; | 264 | unsigned int pmc_grp_use[2]; |
| 269 | unsigned char busbyte[4]; | 265 | unsigned char busbyte[4]; |
| 270 | unsigned char unituse[16]; | 266 | unsigned char unituse[16]; |
| 271 | unsigned char unitmap[] = { 0, 0<<3, 3<<3, 1<<3, 2<<3, 0|4, 3|4 }; | 267 | unsigned char unitmap[] = { 0, 0<<3, 3<<3, 1<<3, 2<<3, 0|4, 3|4 }; |
| 272 | unsigned char ttmuse[2]; | 268 | unsigned char ttmuse[2]; |
| 273 | unsigned char pmcsel[8]; | 269 | unsigned char pmcsel[8]; |
| 274 | int i; | 270 | int i; |
| 275 | int spcsel; | 271 | int spcsel; |
| 276 | 272 | ||
| 277 | if (n_ev > 8) | 273 | if (n_ev > 8) |
| 278 | return -1; | 274 | return -1; |
| 279 | 275 | ||
| 280 | /* First pass to count resource use */ | 276 | /* First pass to count resource use */ |
| 281 | pmc_grp_use[0] = pmc_grp_use[1] = 0; | 277 | pmc_grp_use[0] = pmc_grp_use[1] = 0; |
| 282 | memset(busbyte, 0, sizeof(busbyte)); | 278 | memset(busbyte, 0, sizeof(busbyte)); |
| 283 | memset(unituse, 0, sizeof(unituse)); | 279 | memset(unituse, 0, sizeof(unituse)); |
| 284 | for (i = 0; i < n_ev; ++i) { | 280 | for (i = 0; i < n_ev; ++i) { |
| 285 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; | 281 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; |
| 286 | if (pmc) { | 282 | if (pmc) { |
| 287 | if (pmc_inuse & (1 << (pmc - 1))) | 283 | if (pmc_inuse & (1 << (pmc - 1))) |
| 288 | return -1; | 284 | return -1; |
| 289 | pmc_inuse |= 1 << (pmc - 1); | 285 | pmc_inuse |= 1 << (pmc - 1); |
| 290 | /* count 1/2/5/6 vs 3/4/7/8 use */ | 286 | /* count 1/2/5/6 vs 3/4/7/8 use */ |
| 291 | ++pmc_grp_use[((pmc - 1) >> 1) & 1]; | 287 | ++pmc_grp_use[((pmc - 1) >> 1) & 1]; |
| 292 | } | 288 | } |
| 293 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; | 289 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; |
| 294 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; | 290 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; |
| 295 | if (unit) { | 291 | if (unit) { |
| 296 | if (unit > PM_LASTUNIT) | 292 | if (unit > PM_LASTUNIT) |
| 297 | return -1; | 293 | return -1; |
| 298 | if (!pmc) | 294 | if (!pmc) |
| 299 | ++pmc_grp_use[byte & 1]; | 295 | ++pmc_grp_use[byte & 1]; |
| 300 | if (busbyte[byte] && busbyte[byte] != unit) | 296 | if (busbyte[byte] && busbyte[byte] != unit) |
| 301 | return -1; | 297 | return -1; |
| 302 | busbyte[byte] = unit; | 298 | busbyte[byte] = unit; |
| 303 | unituse[unit] = 1; | 299 | unituse[unit] = 1; |
| 304 | } | 300 | } |
| 305 | } | 301 | } |
| 306 | if (pmc_grp_use[0] > 4 || pmc_grp_use[1] > 4) | 302 | if (pmc_grp_use[0] > 4 || pmc_grp_use[1] > 4) |
| 307 | return -1; | 303 | return -1; |
| 308 | 304 | ||
| 309 | /* | 305 | /* |
| 310 | * Assign resources and set multiplexer selects. | 306 | * Assign resources and set multiplexer selects. |
| 311 | * | 307 | * |
| 312 | * PM_ISU can go either on TTM0 or TTM1, but that's the only | 308 | * PM_ISU can go either on TTM0 or TTM1, but that's the only |
| 313 | * choice we have to deal with. | 309 | * choice we have to deal with. |
| 314 | */ | 310 | */ |
| 315 | if (unituse[PM_ISU] & | 311 | if (unituse[PM_ISU] & |
| 316 | (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_VPU])) | 312 | (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_VPU])) |
| 317 | unitmap[PM_ISU] = 2 | 4; /* move ISU to TTM1 */ | 313 | unitmap[PM_ISU] = 2 | 4; /* move ISU to TTM1 */ |
| 318 | /* Set TTM[01]SEL fields. */ | 314 | /* Set TTM[01]SEL fields. */ |
| 319 | ttmuse[0] = ttmuse[1] = 0; | 315 | ttmuse[0] = ttmuse[1] = 0; |
| 320 | for (i = PM_FPU; i <= PM_STS; ++i) { | 316 | for (i = PM_FPU; i <= PM_STS; ++i) { |
| 321 | if (!unituse[i]) | 317 | if (!unituse[i]) |
| 322 | continue; | 318 | continue; |
| 323 | ttm = unitmap[i]; | 319 | ttm = unitmap[i]; |
| 324 | ++ttmuse[(ttm >> 2) & 1]; | 320 | ++ttmuse[(ttm >> 2) & 1]; |
| 325 | mmcr1 |= (unsigned long)(ttm & ~4) << MMCR1_TTM1SEL_SH; | 321 | mmcr1 |= (unsigned long)(ttm & ~4) << MMCR1_TTM1SEL_SH; |
| 326 | } | 322 | } |
| 327 | /* Check only one unit per TTMx */ | 323 | /* Check only one unit per TTMx */ |
| 328 | if (ttmuse[0] > 1 || ttmuse[1] > 1) | 324 | if (ttmuse[0] > 1 || ttmuse[1] > 1) |
| 329 | return -1; | 325 | return -1; |
| 330 | 326 | ||
| 331 | /* Set byte lane select fields and TTM3SEL. */ | 327 | /* Set byte lane select fields and TTM3SEL. */ |
| 332 | for (byte = 0; byte < 4; ++byte) { | 328 | for (byte = 0; byte < 4; ++byte) { |
| 333 | unit = busbyte[byte]; | 329 | unit = busbyte[byte]; |
| 334 | if (!unit) | 330 | if (!unit) |
| 335 | continue; | 331 | continue; |
| 336 | if (unit <= PM_STS) | 332 | if (unit <= PM_STS) |
| 337 | ttm = (unitmap[unit] >> 2) & 1; | 333 | ttm = (unitmap[unit] >> 2) & 1; |
| 338 | else if (unit == PM_LSU0) | 334 | else if (unit == PM_LSU0) |
| 339 | ttm = 2; | 335 | ttm = 2; |
| 340 | else { | 336 | else { |
| 341 | ttm = 3; | 337 | ttm = 3; |
| 342 | if (unit == PM_LSU1L && byte >= 2) | 338 | if (unit == PM_LSU1L && byte >= 2) |
| 343 | mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte); | 339 | mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte); |
| 344 | } | 340 | } |
| 345 | mmcr1 |= (unsigned long)ttm | 341 | mmcr1 |= (unsigned long)ttm |
| 346 | << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); | 342 | << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); |
| 347 | } | 343 | } |
| 348 | 344 | ||
| 349 | /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ | 345 | /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ |
| 350 | memset(pmcsel, 0x8, sizeof(pmcsel)); /* 8 means don't count */ | 346 | memset(pmcsel, 0x8, sizeof(pmcsel)); /* 8 means don't count */ |
| 351 | for (i = 0; i < n_ev; ++i) { | 347 | for (i = 0; i < n_ev; ++i) { |
| 352 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; | 348 | pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; |
| 353 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; | 349 | unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; |
| 354 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; | 350 | byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; |
| 355 | psel = event[i] & PM_PMCSEL_MSK; | 351 | psel = event[i] & PM_PMCSEL_MSK; |
| 356 | if (!pmc) { | 352 | if (!pmc) { |
| 357 | /* Bus event or any-PMC direct event */ | 353 | /* Bus event or any-PMC direct event */ |
| 358 | if (unit) | 354 | if (unit) |
| 359 | psel |= 0x10 | ((byte & 2) << 2); | 355 | psel |= 0x10 | ((byte & 2) << 2); |
| 360 | else | 356 | else |
| 361 | psel |= 8; | 357 | psel |= 8; |
| 362 | for (pmc = 0; pmc < 8; ++pmc) { | 358 | for (pmc = 0; pmc < 8; ++pmc) { |
| 363 | if (pmc_inuse & (1 << pmc)) | 359 | if (pmc_inuse & (1 << pmc)) |
| 364 | continue; | 360 | continue; |
| 365 | grp = (pmc >> 1) & 1; | 361 | grp = (pmc >> 1) & 1; |
| 366 | if (unit) { | 362 | if (unit) { |
| 367 | if (grp == (byte & 1)) | 363 | if (grp == (byte & 1)) |
| 368 | break; | 364 | break; |
| 369 | } else if (pmc_grp_use[grp] < 4) { | 365 | } else if (pmc_grp_use[grp] < 4) { |
| 370 | ++pmc_grp_use[grp]; | 366 | ++pmc_grp_use[grp]; |
| 371 | break; | 367 | break; |
| 372 | } | 368 | } |
| 373 | } | 369 | } |
| 374 | pmc_inuse |= 1 << pmc; | 370 | pmc_inuse |= 1 << pmc; |
| 375 | } else { | 371 | } else { |
| 376 | /* Direct event */ | 372 | /* Direct event */ |
| 377 | --pmc; | 373 | --pmc; |
| 378 | if (psel == 0 && (byte & 2)) | 374 | if (psel == 0 && (byte & 2)) |
| 379 | /* add events on higher-numbered bus */ | 375 | /* add events on higher-numbered bus */ |
| 380 | mmcr1 |= 1ull << mmcr1_adder_bits[pmc]; | 376 | mmcr1 |= 1ull << mmcr1_adder_bits[pmc]; |
| 381 | } | 377 | } |
| 382 | pmcsel[pmc] = psel; | 378 | pmcsel[pmc] = psel; |
| 383 | hwc[i] = pmc; | 379 | hwc[i] = pmc; |
| 384 | spcsel = (event[i] >> PM_SPCSEL_SH) & PM_SPCSEL_MSK; | 380 | spcsel = (event[i] >> PM_SPCSEL_SH) & PM_SPCSEL_MSK; |
| 385 | mmcr1 |= spcsel; | 381 | mmcr1 |= spcsel; |
| 386 | if (p970_marked_instr_event(event[i])) | 382 | if (p970_marked_instr_event(event[i])) |
| 387 | mmcra |= MMCRA_SAMPLE_ENABLE; | 383 | mmcra |= MMCRA_SAMPLE_ENABLE; |
| 388 | } | 384 | } |
| 389 | for (pmc = 0; pmc < 2; ++pmc) | 385 | for (pmc = 0; pmc < 2; ++pmc) |
| 390 | mmcr0 |= pmcsel[pmc] << (MMCR0_PMC1SEL_SH - 7 * pmc); | 386 | mmcr0 |= pmcsel[pmc] << (MMCR0_PMC1SEL_SH - 7 * pmc); |
| 391 | for (; pmc < 8; ++pmc) | 387 | for (; pmc < 8; ++pmc) |
| 392 | mmcr1 |= (unsigned long)pmcsel[pmc] | 388 | mmcr1 |= (unsigned long)pmcsel[pmc] |
| 393 | << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2)); | 389 | << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2)); |
| 394 | if (pmc_inuse & 1) | 390 | if (pmc_inuse & 1) |
| 395 | mmcr0 |= MMCR0_PMC1CE; | 391 | mmcr0 |= MMCR0_PMC1CE; |
| 396 | if (pmc_inuse & 0xfe) | 392 | if (pmc_inuse & 0xfe) |
| 397 | mmcr0 |= MMCR0_PMCjCE; | 393 | mmcr0 |= MMCR0_PMCjCE; |
| 398 | 394 | ||
| 399 | mmcra |= 0x2000; /* mark only one IOP per PPC instruction */ | 395 | mmcra |= 0x2000; /* mark only one IOP per PPC instruction */ |
| 400 | 396 | ||
| 401 | /* Return MMCRx values */ | 397 | /* Return MMCRx values */ |
| 402 | mmcr[0] = mmcr0; | 398 | mmcr[0] = mmcr0; |
| 403 | mmcr[1] = mmcr1; | 399 | mmcr[1] = mmcr1; |
| 404 | mmcr[2] = mmcra; | 400 | mmcr[2] = mmcra; |
| 405 | return 0; | 401 | return 0; |
| 406 | } | 402 | } |
| 407 | 403 | ||
| 408 | static void p970_disable_pmc(unsigned int pmc, unsigned long mmcr[]) | 404 | static void p970_disable_pmc(unsigned int pmc, unsigned long mmcr[]) |
| 409 | { | 405 | { |
| 410 | int shift, i; | 406 | int shift, i; |
| 411 | 407 | ||
| 412 | if (pmc <= 1) { | 408 | if (pmc <= 1) { |
| 413 | shift = MMCR0_PMC1SEL_SH - 7 * pmc; | 409 | shift = MMCR0_PMC1SEL_SH - 7 * pmc; |
| 414 | i = 0; | 410 | i = 0; |
| 415 | } else { | 411 | } else { |
| 416 | shift = MMCR1_PMC3SEL_SH - 5 * (pmc - 2); | 412 | shift = MMCR1_PMC3SEL_SH - 5 * (pmc - 2); |
| 417 | i = 1; | 413 | i = 1; |
| 418 | } | 414 | } |
| 419 | /* | 415 | /* |
| 420 | * Setting the PMCxSEL field to 0x08 disables PMC x. | 416 | * Setting the PMCxSEL field to 0x08 disables PMC x. |
| 421 | */ | 417 | */ |
| 422 | mmcr[i] = (mmcr[i] & ~(0x1fUL << shift)) | (0x08UL << shift); | 418 | mmcr[i] = (mmcr[i] & ~(0x1fUL << shift)) | (0x08UL << shift); |
| 423 | } | 419 | } |
| 424 | 420 | ||
| 425 | static int ppc970_generic_events[] = { | 421 | static int ppc970_generic_events[] = { |
| 426 | [PERF_COUNT_HW_CPU_CYCLES] = 7, | 422 | [PERF_COUNT_HW_CPU_CYCLES] = 7, |
| 427 | [PERF_COUNT_HW_INSTRUCTIONS] = 1, | 423 | [PERF_COUNT_HW_INSTRUCTIONS] = 1, |
| 428 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x8810, /* PM_LD_REF_L1 */ | 424 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x8810, /* PM_LD_REF_L1 */ |
| 429 | [PERF_COUNT_HW_CACHE_MISSES] = 0x3810, /* PM_LD_MISS_L1 */ | 425 | [PERF_COUNT_HW_CACHE_MISSES] = 0x3810, /* PM_LD_MISS_L1 */ |
| 430 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x431, /* PM_BR_ISSUED */ | 426 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x431, /* PM_BR_ISSUED */ |
| 431 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x327, /* PM_GRP_BR_MPRED */ | 427 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x327, /* PM_GRP_BR_MPRED */ |
| 432 | }; | 428 | }; |
| 433 | 429 | ||
| 434 | #define C(x) PERF_COUNT_HW_CACHE_##x | 430 | #define C(x) PERF_COUNT_HW_CACHE_##x |
| 435 | 431 | ||
| 436 | /* | 432 | /* |
| 437 | * Table of generalized cache-related events. | 433 | * Table of generalized cache-related events. |
| 438 | * 0 means not supported, -1 means nonsensical, other values | 434 | * 0 means not supported, -1 means nonsensical, other values |
| 439 | * are event codes. | 435 | * are event codes. |
| 440 | */ | 436 | */ |
| 441 | static int ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | 437 | static int ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { |
| 442 | [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ | 438 | [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 443 | [C(OP_READ)] = { 0x8810, 0x3810 }, | 439 | [C(OP_READ)] = { 0x8810, 0x3810 }, |
| 444 | [C(OP_WRITE)] = { 0x7810, 0x813 }, | 440 | [C(OP_WRITE)] = { 0x7810, 0x813 }, |
| 445 | [C(OP_PREFETCH)] = { 0x731, 0 }, | 441 | [C(OP_PREFETCH)] = { 0x731, 0 }, |
| 446 | }, | 442 | }, |
| 447 | [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ | 443 | [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 448 | [C(OP_READ)] = { 0, 0 }, | 444 | [C(OP_READ)] = { 0, 0 }, |
| 449 | [C(OP_WRITE)] = { -1, -1 }, | 445 | [C(OP_WRITE)] = { -1, -1 }, |
| 450 | [C(OP_PREFETCH)] = { 0, 0 }, | 446 | [C(OP_PREFETCH)] = { 0, 0 }, |
| 451 | }, | 447 | }, |
| 452 | [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ | 448 | [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 453 | [C(OP_READ)] = { 0, 0 }, | 449 | [C(OP_READ)] = { 0, 0 }, |
| 454 | [C(OP_WRITE)] = { 0, 0 }, | 450 | [C(OP_WRITE)] = { 0, 0 }, |
| 455 | [C(OP_PREFETCH)] = { 0x733, 0 }, | 451 | [C(OP_PREFETCH)] = { 0x733, 0 }, |
| 456 | }, | 452 | }, |
| 457 | [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ | 453 | [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 458 | [C(OP_READ)] = { 0, 0x704 }, | 454 | [C(OP_READ)] = { 0, 0x704 }, |
| 459 | [C(OP_WRITE)] = { -1, -1 }, | 455 | [C(OP_WRITE)] = { -1, -1 }, |
| 460 | [C(OP_PREFETCH)] = { -1, -1 }, | 456 | [C(OP_PREFETCH)] = { -1, -1 }, |
| 461 | }, | 457 | }, |
| 462 | [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ | 458 | [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 463 | [C(OP_READ)] = { 0, 0x700 }, | 459 | [C(OP_READ)] = { 0, 0x700 }, |
| 464 | [C(OP_WRITE)] = { -1, -1 }, | 460 | [C(OP_WRITE)] = { -1, -1 }, |
| 465 | [C(OP_PREFETCH)] = { -1, -1 }, | 461 | [C(OP_PREFETCH)] = { -1, -1 }, |
| 466 | }, | 462 | }, |
| 467 | [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ | 463 | [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ |
| 468 | [C(OP_READ)] = { 0x431, 0x327 }, | 464 | [C(OP_READ)] = { 0x431, 0x327 }, |
| 469 | [C(OP_WRITE)] = { -1, -1 }, | 465 | [C(OP_WRITE)] = { -1, -1 }, |
| 470 | [C(OP_PREFETCH)] = { -1, -1 }, | 466 | [C(OP_PREFETCH)] = { -1, -1 }, |
| 471 | }, | 467 | }, |
| 472 | }; | 468 | }; |
| 473 | 469 | ||
| 474 | static struct power_pmu ppc970_pmu = { | 470 | static struct power_pmu ppc970_pmu = { |
| 475 | .name = "PPC970/FX/MP", | 471 | .name = "PPC970/FX/MP", |
| 476 | .n_counter = 8, | 472 | .n_counter = 8, |
| 477 | .max_alternatives = 2, | 473 | .max_alternatives = 2, |
| 478 | .add_fields = 0x001100005555ull, | 474 | .add_fields = 0x001100005555ull, |
| 479 | .test_adder = 0x013300000000ull, | 475 | .test_adder = 0x013300000000ull, |
| 480 | .compute_mmcr = p970_compute_mmcr, | 476 | .compute_mmcr = p970_compute_mmcr, |
| 481 | .get_constraint = p970_get_constraint, | 477 | .get_constraint = p970_get_constraint, |
| 482 | .get_alternatives = p970_get_alternatives, | 478 | .get_alternatives = p970_get_alternatives, |
| 483 | .disable_pmc = p970_disable_pmc, | 479 | .disable_pmc = p970_disable_pmc, |
| 484 | .n_generic = ARRAY_SIZE(ppc970_generic_events), | 480 | .n_generic = ARRAY_SIZE(ppc970_generic_events), |
| 485 | .generic_events = ppc970_generic_events, | 481 | .generic_events = ppc970_generic_events, |
| 486 | .cache_events = &ppc970_cache_events, | 482 | .cache_events = &ppc970_cache_events, |
| 487 | }; | 483 | }; |
| 488 | 484 | ||
| 489 | static int init_ppc970_pmu(void) | 485 | static int init_ppc970_pmu(void) |
| 490 | { | 486 | { |
| 491 | if (!cur_cpu_spec->oprofile_cpu_type || | 487 | if (!cur_cpu_spec->oprofile_cpu_type || |
| 492 | (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970") | 488 | (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970") |
| 493 | && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970MP"))) | 489 | && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970MP"))) |
| 494 | return -ENODEV; | 490 | return -ENODEV; |
| 495 | 491 | ||
| 496 | return register_power_pmu(&ppc970_pmu); | 492 | return register_power_pmu(&ppc970_pmu); |
| 497 | } | 493 | } |
| 498 | 494 | ||
| 499 | arch_initcall(init_ppc970_pmu); | 495 | arch_initcall(init_ppc970_pmu); |
| 500 | 496 |
arch/powerpc/kernel/setup-common.c
| 1 | /* | 1 | /* |
| 2 | * Common boot and setup code for both 32-bit and 64-bit. | 2 | * Common boot and setup code for both 32-bit and 64-bit. |
| 3 | * Extracted from arch/powerpc/kernel/setup_64.c. | 3 | * Extracted from arch/powerpc/kernel/setup_64.c. |
| 4 | * | 4 | * |
| 5 | * Copyright (C) 2001 PPC64 Team, IBM Corp | 5 | * Copyright (C) 2001 PPC64 Team, IBM Corp |
| 6 | * | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
| 8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
| 9 | * as published by the Free Software Foundation; either version | 9 | * as published by the Free Software Foundation; either version |
| 10 | * 2 of the License, or (at your option) any later version. | 10 | * 2 of the License, or (at your option) any later version. |
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #undef DEBUG | 13 | #undef DEBUG |
| 14 | 14 | ||
| 15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
| 16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
| 17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
| 18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
| 19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
| 20 | #include <linux/reboot.h> | 20 | #include <linux/reboot.h> |
| 21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
| 22 | #include <linux/initrd.h> | 22 | #include <linux/initrd.h> |
| 23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
| 24 | #include <linux/seq_file.h> | 24 | #include <linux/seq_file.h> |
| 25 | #include <linux/ioport.h> | 25 | #include <linux/ioport.h> |
| 26 | #include <linux/console.h> | 26 | #include <linux/console.h> |
| 27 | #include <linux/screen_info.h> | 27 | #include <linux/screen_info.h> |
| 28 | #include <linux/root_dev.h> | 28 | #include <linux/root_dev.h> |
| 29 | #include <linux/notifier.h> | 29 | #include <linux/notifier.h> |
| 30 | #include <linux/cpu.h> | 30 | #include <linux/cpu.h> |
| 31 | #include <linux/unistd.h> | 31 | #include <linux/unistd.h> |
| 32 | #include <linux/serial.h> | 32 | #include <linux/serial.h> |
| 33 | #include <linux/serial_8250.h> | 33 | #include <linux/serial_8250.h> |
| 34 | #include <linux/debugfs.h> | 34 | #include <linux/debugfs.h> |
| 35 | #include <linux/percpu.h> | 35 | #include <linux/percpu.h> |
| 36 | #include <linux/lmb.h> | 36 | #include <linux/lmb.h> |
| 37 | #include <linux/of_platform.h> | 37 | #include <linux/of_platform.h> |
| 38 | #include <asm/io.h> | 38 | #include <asm/io.h> |
| 39 | #include <asm/prom.h> | 39 | #include <asm/prom.h> |
| 40 | #include <asm/processor.h> | 40 | #include <asm/processor.h> |
| 41 | #include <asm/vdso_datapage.h> | 41 | #include <asm/vdso_datapage.h> |
| 42 | #include <asm/pgtable.h> | 42 | #include <asm/pgtable.h> |
| 43 | #include <asm/smp.h> | 43 | #include <asm/smp.h> |
| 44 | #include <asm/elf.h> | 44 | #include <asm/elf.h> |
| 45 | #include <asm/machdep.h> | 45 | #include <asm/machdep.h> |
| 46 | #include <asm/time.h> | 46 | #include <asm/time.h> |
| 47 | #include <asm/cputable.h> | 47 | #include <asm/cputable.h> |
| 48 | #include <asm/sections.h> | 48 | #include <asm/sections.h> |
| 49 | #include <asm/firmware.h> | 49 | #include <asm/firmware.h> |
| 50 | #include <asm/btext.h> | 50 | #include <asm/btext.h> |
| 51 | #include <asm/nvram.h> | 51 | #include <asm/nvram.h> |
| 52 | #include <asm/setup.h> | 52 | #include <asm/setup.h> |
| 53 | #include <asm/system.h> | 53 | #include <asm/system.h> |
| 54 | #include <asm/rtas.h> | 54 | #include <asm/rtas.h> |
| 55 | #include <asm/iommu.h> | 55 | #include <asm/iommu.h> |
| 56 | #include <asm/serial.h> | 56 | #include <asm/serial.h> |
| 57 | #include <asm/cache.h> | 57 | #include <asm/cache.h> |
| 58 | #include <asm/page.h> | 58 | #include <asm/page.h> |
| 59 | #include <asm/mmu.h> | 59 | #include <asm/mmu.h> |
| 60 | #include <asm/xmon.h> | 60 | #include <asm/xmon.h> |
| 61 | #include <asm/cputhreads.h> | 61 | #include <asm/cputhreads.h> |
| 62 | #include <mm/mmu_decl.h> | 62 | #include <mm/mmu_decl.h> |
| 63 | 63 | ||
| 64 | #include "setup.h" | 64 | #include "setup.h" |
| 65 | 65 | ||
| 66 | #ifdef DEBUG | 66 | #ifdef DEBUG |
| 67 | #include <asm/udbg.h> | 67 | #include <asm/udbg.h> |
| 68 | #define DBG(fmt...) udbg_printf(fmt) | 68 | #define DBG(fmt...) udbg_printf(fmt) |
| 69 | #else | 69 | #else |
| 70 | #define DBG(fmt...) | 70 | #define DBG(fmt...) |
| 71 | #endif | 71 | #endif |
| 72 | 72 | ||
| 73 | /* The main machine-dep calls structure | 73 | /* The main machine-dep calls structure |
| 74 | */ | 74 | */ |
| 75 | struct machdep_calls ppc_md; | 75 | struct machdep_calls ppc_md; |
| 76 | EXPORT_SYMBOL(ppc_md); | 76 | EXPORT_SYMBOL(ppc_md); |
| 77 | struct machdep_calls *machine_id; | 77 | struct machdep_calls *machine_id; |
| 78 | EXPORT_SYMBOL(machine_id); | 78 | EXPORT_SYMBOL(machine_id); |
| 79 | 79 | ||
| 80 | unsigned long klimit = (unsigned long) _end; | 80 | unsigned long klimit = (unsigned long) _end; |
| 81 | 81 | ||
| 82 | char cmd_line[COMMAND_LINE_SIZE]; | 82 | char cmd_line[COMMAND_LINE_SIZE]; |
| 83 | 83 | ||
| 84 | /* | 84 | /* |
| 85 | * This still seems to be needed... -- paulus | 85 | * This still seems to be needed... -- paulus |
| 86 | */ | 86 | */ |
| 87 | struct screen_info screen_info = { | 87 | struct screen_info screen_info = { |
| 88 | .orig_x = 0, | 88 | .orig_x = 0, |
| 89 | .orig_y = 25, | 89 | .orig_y = 25, |
| 90 | .orig_video_cols = 80, | 90 | .orig_video_cols = 80, |
| 91 | .orig_video_lines = 25, | 91 | .orig_video_lines = 25, |
| 92 | .orig_video_isVGA = 1, | 92 | .orig_video_isVGA = 1, |
| 93 | .orig_video_points = 16 | 93 | .orig_video_points = 16 |
| 94 | }; | 94 | }; |
| 95 | 95 | ||
| 96 | #ifdef __DO_IRQ_CANON | 96 | #ifdef __DO_IRQ_CANON |
| 97 | /* XXX should go elsewhere eventually */ | 97 | /* XXX should go elsewhere eventually */ |
| 98 | int ppc_do_canonicalize_irqs; | 98 | int ppc_do_canonicalize_irqs; |
| 99 | EXPORT_SYMBOL(ppc_do_canonicalize_irqs); | 99 | EXPORT_SYMBOL(ppc_do_canonicalize_irqs); |
| 100 | #endif | 100 | #endif |
| 101 | 101 | ||
| 102 | /* also used by kexec */ | 102 | /* also used by kexec */ |
| 103 | void machine_shutdown(void) | 103 | void machine_shutdown(void) |
| 104 | { | 104 | { |
| 105 | if (ppc_md.machine_shutdown) | 105 | if (ppc_md.machine_shutdown) |
| 106 | ppc_md.machine_shutdown(); | 106 | ppc_md.machine_shutdown(); |
| 107 | } | 107 | } |
| 108 | 108 | ||
| 109 | void machine_restart(char *cmd) | 109 | void machine_restart(char *cmd) |
| 110 | { | 110 | { |
| 111 | machine_shutdown(); | 111 | machine_shutdown(); |
| 112 | if (ppc_md.restart) | 112 | if (ppc_md.restart) |
| 113 | ppc_md.restart(cmd); | 113 | ppc_md.restart(cmd); |
| 114 | #ifdef CONFIG_SMP | 114 | #ifdef CONFIG_SMP |
| 115 | smp_send_stop(); | 115 | smp_send_stop(); |
| 116 | #endif | 116 | #endif |
| 117 | printk(KERN_EMERG "System Halted, OK to turn off power\n"); | 117 | printk(KERN_EMERG "System Halted, OK to turn off power\n"); |
| 118 | local_irq_disable(); | 118 | local_irq_disable(); |
| 119 | while (1) ; | 119 | while (1) ; |
| 120 | } | 120 | } |
| 121 | 121 | ||
| 122 | void machine_power_off(void) | 122 | void machine_power_off(void) |
| 123 | { | 123 | { |
| 124 | machine_shutdown(); | 124 | machine_shutdown(); |
| 125 | if (ppc_md.power_off) | 125 | if (ppc_md.power_off) |
| 126 | ppc_md.power_off(); | 126 | ppc_md.power_off(); |
| 127 | #ifdef CONFIG_SMP | 127 | #ifdef CONFIG_SMP |
| 128 | smp_send_stop(); | 128 | smp_send_stop(); |
| 129 | #endif | 129 | #endif |
| 130 | printk(KERN_EMERG "System Halted, OK to turn off power\n"); | 130 | printk(KERN_EMERG "System Halted, OK to turn off power\n"); |
| 131 | local_irq_disable(); | 131 | local_irq_disable(); |
| 132 | while (1) ; | 132 | while (1) ; |
| 133 | } | 133 | } |
| 134 | /* Used by the G5 thermal driver */ | 134 | /* Used by the G5 thermal driver */ |
| 135 | EXPORT_SYMBOL_GPL(machine_power_off); | 135 | EXPORT_SYMBOL_GPL(machine_power_off); |
| 136 | 136 | ||
| 137 | void (*pm_power_off)(void) = machine_power_off; | 137 | void (*pm_power_off)(void) = machine_power_off; |
| 138 | EXPORT_SYMBOL_GPL(pm_power_off); | 138 | EXPORT_SYMBOL_GPL(pm_power_off); |
| 139 | 139 | ||
| 140 | void machine_halt(void) | 140 | void machine_halt(void) |
| 141 | { | 141 | { |
| 142 | machine_shutdown(); | 142 | machine_shutdown(); |
| 143 | if (ppc_md.halt) | 143 | if (ppc_md.halt) |
| 144 | ppc_md.halt(); | 144 | ppc_md.halt(); |
| 145 | #ifdef CONFIG_SMP | 145 | #ifdef CONFIG_SMP |
| 146 | smp_send_stop(); | 146 | smp_send_stop(); |
| 147 | #endif | 147 | #endif |
| 148 | printk(KERN_EMERG "System Halted, OK to turn off power\n"); | 148 | printk(KERN_EMERG "System Halted, OK to turn off power\n"); |
| 149 | local_irq_disable(); | 149 | local_irq_disable(); |
| 150 | while (1) ; | 150 | while (1) ; |
| 151 | } | 151 | } |
| 152 | 152 | ||
| 153 | 153 | ||
| 154 | #ifdef CONFIG_TAU | 154 | #ifdef CONFIG_TAU |
| 155 | extern u32 cpu_temp(unsigned long cpu); | 155 | extern u32 cpu_temp(unsigned long cpu); |
| 156 | extern u32 cpu_temp_both(unsigned long cpu); | 156 | extern u32 cpu_temp_both(unsigned long cpu); |
| 157 | #endif /* CONFIG_TAU */ | 157 | #endif /* CONFIG_TAU */ |
| 158 | 158 | ||
| 159 | #ifdef CONFIG_SMP | 159 | #ifdef CONFIG_SMP |
| 160 | DEFINE_PER_CPU(unsigned int, pvr); | 160 | DEFINE_PER_CPU(unsigned int, pvr); |
| 161 | #endif | 161 | #endif |
| 162 | 162 | ||
| 163 | static int show_cpuinfo(struct seq_file *m, void *v) | 163 | static int show_cpuinfo(struct seq_file *m, void *v) |
| 164 | { | 164 | { |
| 165 | unsigned long cpu_id = (unsigned long)v - 1; | 165 | unsigned long cpu_id = (unsigned long)v - 1; |
| 166 | unsigned int pvr; | 166 | unsigned int pvr; |
| 167 | unsigned short maj; | 167 | unsigned short maj; |
| 168 | unsigned short min; | 168 | unsigned short min; |
| 169 | 169 | ||
| 170 | if (cpu_id == NR_CPUS) { | 170 | if (cpu_id == NR_CPUS) { |
| 171 | struct device_node *root; | 171 | struct device_node *root; |
| 172 | const char *model = NULL; | 172 | const char *model = NULL; |
| 173 | #if defined(CONFIG_SMP) && defined(CONFIG_PPC32) | 173 | #if defined(CONFIG_SMP) && defined(CONFIG_PPC32) |
| 174 | unsigned long bogosum = 0; | 174 | unsigned long bogosum = 0; |
| 175 | int i; | 175 | int i; |
| 176 | for_each_online_cpu(i) | 176 | for_each_online_cpu(i) |
| 177 | bogosum += loops_per_jiffy; | 177 | bogosum += loops_per_jiffy; |
| 178 | seq_printf(m, "total bogomips\t: %lu.%02lu\n", | 178 | seq_printf(m, "total bogomips\t: %lu.%02lu\n", |
| 179 | bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); | 179 | bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); |
| 180 | #endif /* CONFIG_SMP && CONFIG_PPC32 */ | 180 | #endif /* CONFIG_SMP && CONFIG_PPC32 */ |
| 181 | seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq); | 181 | seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq); |
| 182 | if (ppc_md.name) | 182 | if (ppc_md.name) |
| 183 | seq_printf(m, "platform\t: %s\n", ppc_md.name); | 183 | seq_printf(m, "platform\t: %s\n", ppc_md.name); |
| 184 | root = of_find_node_by_path("/"); | 184 | root = of_find_node_by_path("/"); |
| 185 | if (root) | 185 | if (root) |
| 186 | model = of_get_property(root, "model", NULL); | 186 | model = of_get_property(root, "model", NULL); |
| 187 | if (model) | 187 | if (model) |
| 188 | seq_printf(m, "model\t\t: %s\n", model); | 188 | seq_printf(m, "model\t\t: %s\n", model); |
| 189 | of_node_put(root); | 189 | of_node_put(root); |
| 190 | 190 | ||
| 191 | if (ppc_md.show_cpuinfo != NULL) | 191 | if (ppc_md.show_cpuinfo != NULL) |
| 192 | ppc_md.show_cpuinfo(m); | 192 | ppc_md.show_cpuinfo(m); |
| 193 | 193 | ||
| 194 | #ifdef CONFIG_PPC32 | 194 | #ifdef CONFIG_PPC32 |
| 195 | /* Display the amount of memory */ | 195 | /* Display the amount of memory */ |
| 196 | seq_printf(m, "Memory\t\t: %d MB\n", | 196 | seq_printf(m, "Memory\t\t: %d MB\n", |
| 197 | (unsigned int)(total_memory / (1024 * 1024))); | 197 | (unsigned int)(total_memory / (1024 * 1024))); |
| 198 | #endif | 198 | #endif |
| 199 | 199 | ||
| 200 | return 0; | 200 | return 0; |
| 201 | } | 201 | } |
| 202 | 202 | ||
| 203 | /* We only show online cpus: disable preempt (overzealous, I | 203 | /* We only show online cpus: disable preempt (overzealous, I |
| 204 | * knew) to prevent cpu going down. */ | 204 | * knew) to prevent cpu going down. */ |
| 205 | preempt_disable(); | 205 | preempt_disable(); |
| 206 | if (!cpu_online(cpu_id)) { | 206 | if (!cpu_online(cpu_id)) { |
| 207 | preempt_enable(); | 207 | preempt_enable(); |
| 208 | return 0; | 208 | return 0; |
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | #ifdef CONFIG_SMP | 211 | #ifdef CONFIG_SMP |
| 212 | pvr = per_cpu(pvr, cpu_id); | 212 | pvr = per_cpu(pvr, cpu_id); |
| 213 | #else | 213 | #else |
| 214 | pvr = mfspr(SPRN_PVR); | 214 | pvr = mfspr(SPRN_PVR); |
| 215 | #endif | 215 | #endif |
| 216 | maj = (pvr >> 8) & 0xFF; | 216 | maj = (pvr >> 8) & 0xFF; |
| 217 | min = pvr & 0xFF; | 217 | min = pvr & 0xFF; |
| 218 | 218 | ||
| 219 | seq_printf(m, "processor\t: %lu\n", cpu_id); | 219 | seq_printf(m, "processor\t: %lu\n", cpu_id); |
| 220 | seq_printf(m, "cpu\t\t: "); | 220 | seq_printf(m, "cpu\t\t: "); |
| 221 | 221 | ||
| 222 | if (cur_cpu_spec->pvr_mask) | 222 | if (cur_cpu_spec->pvr_mask) |
| 223 | seq_printf(m, "%s", cur_cpu_spec->cpu_name); | 223 | seq_printf(m, "%s", cur_cpu_spec->cpu_name); |
| 224 | else | 224 | else |
| 225 | seq_printf(m, "unknown (%08x)", pvr); | 225 | seq_printf(m, "unknown (%08x)", pvr); |
| 226 | 226 | ||
| 227 | #ifdef CONFIG_ALTIVEC | 227 | #ifdef CONFIG_ALTIVEC |
| 228 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | 228 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
| 229 | seq_printf(m, ", altivec supported"); | 229 | seq_printf(m, ", altivec supported"); |
| 230 | #endif /* CONFIG_ALTIVEC */ | 230 | #endif /* CONFIG_ALTIVEC */ |
| 231 | 231 | ||
| 232 | seq_printf(m, "\n"); | 232 | seq_printf(m, "\n"); |
| 233 | 233 | ||
| 234 | #ifdef CONFIG_TAU | 234 | #ifdef CONFIG_TAU |
| 235 | if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) { | 235 | if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) { |
| 236 | #ifdef CONFIG_TAU_AVERAGE | 236 | #ifdef CONFIG_TAU_AVERAGE |
| 237 | /* more straightforward, but potentially misleading */ | 237 | /* more straightforward, but potentially misleading */ |
| 238 | seq_printf(m, "temperature \t: %u C (uncalibrated)\n", | 238 | seq_printf(m, "temperature \t: %u C (uncalibrated)\n", |
| 239 | cpu_temp(cpu_id)); | 239 | cpu_temp(cpu_id)); |
| 240 | #else | 240 | #else |
| 241 | /* show the actual temp sensor range */ | 241 | /* show the actual temp sensor range */ |
| 242 | u32 temp; | 242 | u32 temp; |
| 243 | temp = cpu_temp_both(cpu_id); | 243 | temp = cpu_temp_both(cpu_id); |
| 244 | seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n", | 244 | seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n", |
| 245 | temp & 0xff, temp >> 16); | 245 | temp & 0xff, temp >> 16); |
| 246 | #endif | 246 | #endif |
| 247 | } | 247 | } |
| 248 | #endif /* CONFIG_TAU */ | 248 | #endif /* CONFIG_TAU */ |
| 249 | 249 | ||
| 250 | /* | 250 | /* |
| 251 | * Assume here that all clock rates are the same in a | 251 | * Assume here that all clock rates are the same in a |
| 252 | * smp system. -- Cort | 252 | * smp system. -- Cort |
| 253 | */ | 253 | */ |
| 254 | if (ppc_proc_freq) | 254 | if (ppc_proc_freq) |
| 255 | seq_printf(m, "clock\t\t: %lu.%06luMHz\n", | 255 | seq_printf(m, "clock\t\t: %lu.%06luMHz\n", |
| 256 | ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); | 256 | ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); |
| 257 | 257 | ||
| 258 | if (ppc_md.show_percpuinfo != NULL) | 258 | if (ppc_md.show_percpuinfo != NULL) |
| 259 | ppc_md.show_percpuinfo(m, cpu_id); | 259 | ppc_md.show_percpuinfo(m, cpu_id); |
| 260 | 260 | ||
| 261 | /* If we are a Freescale core do a simple check so | 261 | /* If we are a Freescale core do a simple check so |
| 262 | * we dont have to keep adding cases in the future */ | 262 | * we dont have to keep adding cases in the future */ |
| 263 | if (PVR_VER(pvr) & 0x8000) { | 263 | if (PVR_VER(pvr) & 0x8000) { |
| 264 | switch (PVR_VER(pvr)) { | 264 | switch (PVR_VER(pvr)) { |
| 265 | case 0x8000: /* 7441/7450/7451, Voyager */ | 265 | case 0x8000: /* 7441/7450/7451, Voyager */ |
| 266 | case 0x8001: /* 7445/7455, Apollo 6 */ | 266 | case 0x8001: /* 7445/7455, Apollo 6 */ |
| 267 | case 0x8002: /* 7447/7457, Apollo 7 */ | 267 | case 0x8002: /* 7447/7457, Apollo 7 */ |
| 268 | case 0x8003: /* 7447A, Apollo 7 PM */ | 268 | case 0x8003: /* 7447A, Apollo 7 PM */ |
| 269 | case 0x8004: /* 7448, Apollo 8 */ | 269 | case 0x8004: /* 7448, Apollo 8 */ |
| 270 | case 0x800c: /* 7410, Nitro */ | 270 | case 0x800c: /* 7410, Nitro */ |
| 271 | maj = ((pvr >> 8) & 0xF); | 271 | maj = ((pvr >> 8) & 0xF); |
| 272 | min = PVR_MIN(pvr); | 272 | min = PVR_MIN(pvr); |
| 273 | break; | 273 | break; |
| 274 | default: /* e500/book-e */ | 274 | default: /* e500/book-e */ |
| 275 | maj = PVR_MAJ(pvr); | 275 | maj = PVR_MAJ(pvr); |
| 276 | min = PVR_MIN(pvr); | 276 | min = PVR_MIN(pvr); |
| 277 | break; | 277 | break; |
| 278 | } | 278 | } |
| 279 | } else { | 279 | } else { |
| 280 | switch (PVR_VER(pvr)) { | 280 | switch (PVR_VER(pvr)) { |
| 281 | case 0x0020: /* 403 family */ | 281 | case 0x0020: /* 403 family */ |
| 282 | maj = PVR_MAJ(pvr) + 1; | 282 | maj = PVR_MAJ(pvr) + 1; |
| 283 | min = PVR_MIN(pvr); | 283 | min = PVR_MIN(pvr); |
| 284 | break; | 284 | break; |
| 285 | case 0x1008: /* 740P/750P ?? */ | 285 | case 0x1008: /* 740P/750P ?? */ |
| 286 | maj = ((pvr >> 8) & 0xFF) - 1; | 286 | maj = ((pvr >> 8) & 0xFF) - 1; |
| 287 | min = pvr & 0xFF; | 287 | min = pvr & 0xFF; |
| 288 | break; | 288 | break; |
| 289 | default: | 289 | default: |
| 290 | maj = (pvr >> 8) & 0xFF; | 290 | maj = (pvr >> 8) & 0xFF; |
| 291 | min = pvr & 0xFF; | 291 | min = pvr & 0xFF; |
| 292 | break; | 292 | break; |
| 293 | } | 293 | } |
| 294 | } | 294 | } |
| 295 | 295 | ||
| 296 | seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n", | 296 | seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n", |
| 297 | maj, min, PVR_VER(pvr), PVR_REV(pvr)); | 297 | maj, min, PVR_VER(pvr), PVR_REV(pvr)); |
| 298 | 298 | ||
| 299 | #ifdef CONFIG_PPC32 | 299 | #ifdef CONFIG_PPC32 |
| 300 | seq_printf(m, "bogomips\t: %lu.%02lu\n", | 300 | seq_printf(m, "bogomips\t: %lu.%02lu\n", |
| 301 | loops_per_jiffy / (500000/HZ), | 301 | loops_per_jiffy / (500000/HZ), |
| 302 | (loops_per_jiffy / (5000/HZ)) % 100); | 302 | (loops_per_jiffy / (5000/HZ)) % 100); |
| 303 | #endif | 303 | #endif |
| 304 | 304 | ||
| 305 | #ifdef CONFIG_SMP | 305 | #ifdef CONFIG_SMP |
| 306 | seq_printf(m, "\n"); | 306 | seq_printf(m, "\n"); |
| 307 | #endif | 307 | #endif |
| 308 | 308 | ||
| 309 | preempt_enable(); | 309 | preempt_enable(); |
| 310 | return 0; | 310 | return 0; |
| 311 | } | 311 | } |
| 312 | 312 | ||
| 313 | static void *c_start(struct seq_file *m, loff_t *pos) | 313 | static void *c_start(struct seq_file *m, loff_t *pos) |
| 314 | { | 314 | { |
| 315 | unsigned long i = *pos; | 315 | unsigned long i = *pos; |
| 316 | 316 | ||
| 317 | return i <= NR_CPUS ? (void *)(i + 1) : NULL; | 317 | return i <= NR_CPUS ? (void *)(i + 1) : NULL; |
| 318 | } | 318 | } |
| 319 | 319 | ||
| 320 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | 320 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) |
| 321 | { | 321 | { |
| 322 | ++*pos; | 322 | ++*pos; |
| 323 | return c_start(m, pos); | 323 | return c_start(m, pos); |
| 324 | } | 324 | } |
| 325 | 325 | ||
| 326 | static void c_stop(struct seq_file *m, void *v) | 326 | static void c_stop(struct seq_file *m, void *v) |
| 327 | { | 327 | { |
| 328 | } | 328 | } |
| 329 | 329 | ||
| 330 | const struct seq_operations cpuinfo_op = { | 330 | const struct seq_operations cpuinfo_op = { |
| 331 | .start =c_start, | 331 | .start =c_start, |
| 332 | .next = c_next, | 332 | .next = c_next, |
| 333 | .stop = c_stop, | 333 | .stop = c_stop, |
| 334 | .show = show_cpuinfo, | 334 | .show = show_cpuinfo, |
| 335 | }; | 335 | }; |
| 336 | 336 | ||
| 337 | void __init check_for_initrd(void) | 337 | void __init check_for_initrd(void) |
| 338 | { | 338 | { |
| 339 | #ifdef CONFIG_BLK_DEV_INITRD | 339 | #ifdef CONFIG_BLK_DEV_INITRD |
| 340 | DBG(" -> check_for_initrd() initrd_start=0x%lx initrd_end=0x%lx\n", | 340 | DBG(" -> check_for_initrd() initrd_start=0x%lx initrd_end=0x%lx\n", |
| 341 | initrd_start, initrd_end); | 341 | initrd_start, initrd_end); |
| 342 | 342 | ||
| 343 | /* If we were passed an initrd, set the ROOT_DEV properly if the values | 343 | /* If we were passed an initrd, set the ROOT_DEV properly if the values |
| 344 | * look sensible. If not, clear initrd reference. | 344 | * look sensible. If not, clear initrd reference. |
| 345 | */ | 345 | */ |
| 346 | if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) && | 346 | if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) && |
| 347 | initrd_end > initrd_start) | 347 | initrd_end > initrd_start) |
| 348 | ROOT_DEV = Root_RAM0; | 348 | ROOT_DEV = Root_RAM0; |
| 349 | else | 349 | else |
| 350 | initrd_start = initrd_end = 0; | 350 | initrd_start = initrd_end = 0; |
| 351 | 351 | ||
| 352 | if (initrd_start) | 352 | if (initrd_start) |
| 353 | printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end); | 353 | printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end); |
| 354 | 354 | ||
| 355 | DBG(" <- check_for_initrd()\n"); | 355 | DBG(" <- check_for_initrd()\n"); |
| 356 | #endif /* CONFIG_BLK_DEV_INITRD */ | 356 | #endif /* CONFIG_BLK_DEV_INITRD */ |
| 357 | } | 357 | } |
| 358 | 358 | ||
| 359 | #ifdef CONFIG_SMP | 359 | #ifdef CONFIG_SMP |
| 360 | 360 | ||
| 361 | int threads_per_core, threads_shift; | 361 | int threads_per_core, threads_shift; |
| 362 | cpumask_t threads_core_mask; | 362 | cpumask_t threads_core_mask; |
| 363 | 363 | ||
| 364 | static void __init cpu_init_thread_core_maps(int tpc) | 364 | static void __init cpu_init_thread_core_maps(int tpc) |
| 365 | { | 365 | { |
| 366 | int i; | 366 | int i; |
| 367 | 367 | ||
| 368 | threads_per_core = tpc; | 368 | threads_per_core = tpc; |
| 369 | threads_core_mask = CPU_MASK_NONE; | 369 | threads_core_mask = CPU_MASK_NONE; |
| 370 | 370 | ||
| 371 | /* This implementation only supports power of 2 number of threads | 371 | /* This implementation only supports power of 2 number of threads |
| 372 | * for simplicity and performance | 372 | * for simplicity and performance |
| 373 | */ | 373 | */ |
| 374 | threads_shift = ilog2(tpc); | 374 | threads_shift = ilog2(tpc); |
| 375 | BUG_ON(tpc != (1 << threads_shift)); | 375 | BUG_ON(tpc != (1 << threads_shift)); |
| 376 | 376 | ||
| 377 | for (i = 0; i < tpc; i++) | 377 | for (i = 0; i < tpc; i++) |
| 378 | cpu_set(i, threads_core_mask); | 378 | cpu_set(i, threads_core_mask); |
| 379 | 379 | ||
| 380 | printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n", | 380 | printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n", |
| 381 | tpc, tpc > 1 ? "s" : ""); | 381 | tpc, tpc > 1 ? "s" : ""); |
| 382 | printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift); | 382 | printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift); |
| 383 | } | 383 | } |
| 384 | 384 | ||
| 385 | 385 | ||
| 386 | /** | 386 | /** |
| 387 | * setup_cpu_maps - initialize the following cpu maps: | 387 | * setup_cpu_maps - initialize the following cpu maps: |
| 388 | * cpu_possible_map | 388 | * cpu_possible_map |
| 389 | * cpu_present_map | 389 | * cpu_present_map |
| 390 | * | 390 | * |
| 391 | * Having the possible map set up early allows us to restrict allocations | 391 | * Having the possible map set up early allows us to restrict allocations |
| 392 | * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. | 392 | * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. |
| 393 | * | 393 | * |
| 394 | * We do not initialize the online map here; cpus set their own bits in | 394 | * We do not initialize the online map here; cpus set their own bits in |
| 395 | * cpu_online_map as they come up. | 395 | * cpu_online_map as they come up. |
| 396 | * | 396 | * |
| 397 | * This function is valid only for Open Firmware systems. finish_device_tree | 397 | * This function is valid only for Open Firmware systems. finish_device_tree |
| 398 | * must be called before using this. | 398 | * must be called before using this. |
| 399 | * | 399 | * |
| 400 | * While we're here, we may as well set the "physical" cpu ids in the paca. | 400 | * While we're here, we may as well set the "physical" cpu ids in the paca. |
| 401 | * | 401 | * |
| 402 | * NOTE: This must match the parsing done in early_init_dt_scan_cpus. | 402 | * NOTE: This must match the parsing done in early_init_dt_scan_cpus. |
| 403 | */ | 403 | */ |
| 404 | void __init smp_setup_cpu_maps(void) | 404 | void __init smp_setup_cpu_maps(void) |
| 405 | { | 405 | { |
| 406 | struct device_node *dn = NULL; | 406 | struct device_node *dn = NULL; |
| 407 | int cpu = 0; | 407 | int cpu = 0; |
| 408 | int nthreads = 1; | 408 | int nthreads = 1; |
| 409 | 409 | ||
| 410 | DBG("smp_setup_cpu_maps()\n"); | 410 | DBG("smp_setup_cpu_maps()\n"); |
| 411 | 411 | ||
| 412 | while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { | 412 | while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { |
| 413 | const int *intserv; | 413 | const int *intserv; |
| 414 | int j, len; | 414 | int j, len; |
| 415 | 415 | ||
| 416 | DBG(" * %s...\n", dn->full_name); | 416 | DBG(" * %s...\n", dn->full_name); |
| 417 | 417 | ||
| 418 | intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", | 418 | intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", |
| 419 | &len); | 419 | &len); |
| 420 | if (intserv) { | 420 | if (intserv) { |
| 421 | nthreads = len / sizeof(int); | 421 | nthreads = len / sizeof(int); |
| 422 | DBG(" ibm,ppc-interrupt-server#s -> %d threads\n", | 422 | DBG(" ibm,ppc-interrupt-server#s -> %d threads\n", |
| 423 | nthreads); | 423 | nthreads); |
| 424 | } else { | 424 | } else { |
| 425 | DBG(" no ibm,ppc-interrupt-server#s -> 1 thread\n"); | 425 | DBG(" no ibm,ppc-interrupt-server#s -> 1 thread\n"); |
| 426 | intserv = of_get_property(dn, "reg", NULL); | 426 | intserv = of_get_property(dn, "reg", NULL); |
| 427 | if (!intserv) | 427 | if (!intserv) |
| 428 | intserv = &cpu; /* assume logical == phys */ | 428 | intserv = &cpu; /* assume logical == phys */ |
| 429 | } | 429 | } |
| 430 | 430 | ||
| 431 | for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { | 431 | for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { |
| 432 | DBG(" thread %d -> cpu %d (hard id %d)\n", | 432 | DBG(" thread %d -> cpu %d (hard id %d)\n", |
| 433 | j, cpu, intserv[j]); | 433 | j, cpu, intserv[j]); |
| 434 | set_cpu_present(cpu, true); | 434 | set_cpu_present(cpu, true); |
| 435 | set_hard_smp_processor_id(cpu, intserv[j]); | 435 | set_hard_smp_processor_id(cpu, intserv[j]); |
| 436 | set_cpu_possible(cpu, true); | 436 | set_cpu_possible(cpu, true); |
| 437 | cpu++; | 437 | cpu++; |
| 438 | } | 438 | } |
| 439 | } | 439 | } |
| 440 | 440 | ||
| 441 | /* If no SMT supported, nthreads is forced to 1 */ | 441 | /* If no SMT supported, nthreads is forced to 1 */ |
| 442 | if (!cpu_has_feature(CPU_FTR_SMT)) { | 442 | if (!cpu_has_feature(CPU_FTR_SMT)) { |
| 443 | DBG(" SMT disabled ! nthreads forced to 1\n"); | 443 | DBG(" SMT disabled ! nthreads forced to 1\n"); |
| 444 | nthreads = 1; | 444 | nthreads = 1; |
| 445 | } | 445 | } |
| 446 | 446 | ||
| 447 | #ifdef CONFIG_PPC64 | 447 | #ifdef CONFIG_PPC64 |
| 448 | /* | 448 | /* |
| 449 | * On pSeries LPAR, we need to know how many cpus | 449 | * On pSeries LPAR, we need to know how many cpus |
| 450 | * could possibly be added to this partition. | 450 | * could possibly be added to this partition. |
| 451 | */ | 451 | */ |
| 452 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && | 452 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && |
| 453 | (dn = of_find_node_by_path("/rtas"))) { | 453 | (dn = of_find_node_by_path("/rtas"))) { |
| 454 | int num_addr_cell, num_size_cell, maxcpus; | 454 | int num_addr_cell, num_size_cell, maxcpus; |
| 455 | const unsigned int *ireg; | 455 | const unsigned int *ireg; |
| 456 | 456 | ||
| 457 | num_addr_cell = of_n_addr_cells(dn); | 457 | num_addr_cell = of_n_addr_cells(dn); |
| 458 | num_size_cell = of_n_size_cells(dn); | 458 | num_size_cell = of_n_size_cells(dn); |
| 459 | 459 | ||
| 460 | ireg = of_get_property(dn, "ibm,lrdr-capacity", NULL); | 460 | ireg = of_get_property(dn, "ibm,lrdr-capacity", NULL); |
| 461 | 461 | ||
| 462 | if (!ireg) | 462 | if (!ireg) |
| 463 | goto out; | 463 | goto out; |
| 464 | 464 | ||
| 465 | maxcpus = ireg[num_addr_cell + num_size_cell]; | 465 | maxcpus = ireg[num_addr_cell + num_size_cell]; |
| 466 | 466 | ||
| 467 | /* Double maxcpus for processors which have SMT capability */ | 467 | /* Double maxcpus for processors which have SMT capability */ |
| 468 | if (cpu_has_feature(CPU_FTR_SMT)) | 468 | if (cpu_has_feature(CPU_FTR_SMT)) |
| 469 | maxcpus *= nthreads; | 469 | maxcpus *= nthreads; |
| 470 | 470 | ||
| 471 | if (maxcpus > NR_CPUS) { | 471 | if (maxcpus > NR_CPUS) { |
| 472 | printk(KERN_WARNING | 472 | printk(KERN_WARNING |
| 473 | "Partition configured for %d cpus, " | 473 | "Partition configured for %d cpus, " |
| 474 | "operating system maximum is %d.\n", | 474 | "operating system maximum is %d.\n", |
| 475 | maxcpus, NR_CPUS); | 475 | maxcpus, NR_CPUS); |
| 476 | maxcpus = NR_CPUS; | 476 | maxcpus = NR_CPUS; |
| 477 | } else | 477 | } else |
| 478 | printk(KERN_INFO "Partition configured for %d cpus.\n", | 478 | printk(KERN_INFO "Partition configured for %d cpus.\n", |
| 479 | maxcpus); | 479 | maxcpus); |
| 480 | 480 | ||
| 481 | for (cpu = 0; cpu < maxcpus; cpu++) | 481 | for (cpu = 0; cpu < maxcpus; cpu++) |
| 482 | set_cpu_possible(cpu, true); | 482 | set_cpu_possible(cpu, true); |
| 483 | out: | 483 | out: |
| 484 | of_node_put(dn); | 484 | of_node_put(dn); |
| 485 | } | 485 | } |
| 486 | vdso_data->processorCount = num_present_cpus(); | 486 | vdso_data->processorCount = num_present_cpus(); |
| 487 | #endif /* CONFIG_PPC64 */ | 487 | #endif /* CONFIG_PPC64 */ |
| 488 | 488 | ||
| 489 | /* Initialize CPU <=> thread mapping/ | 489 | /* Initialize CPU <=> thread mapping/ |
| 490 | * | 490 | * |
| 491 | * WARNING: We assume that the number of threads is the same for | 491 | * WARNING: We assume that the number of threads is the same for |
| 492 | * every CPU in the system. If that is not the case, then some code | 492 | * every CPU in the system. If that is not the case, then some code |
| 493 | * here will have to be reworked | 493 | * here will have to be reworked |
| 494 | */ | 494 | */ |
| 495 | cpu_init_thread_core_maps(nthreads); | 495 | cpu_init_thread_core_maps(nthreads); |
| 496 | } | 496 | } |
| 497 | #endif /* CONFIG_SMP */ | 497 | #endif /* CONFIG_SMP */ |
| 498 | 498 | ||
| 499 | #ifdef CONFIG_PCSPKR_PLATFORM | 499 | #ifdef CONFIG_PCSPKR_PLATFORM |
| 500 | static __init int add_pcspkr(void) | 500 | static __init int add_pcspkr(void) |
| 501 | { | 501 | { |
| 502 | struct device_node *np; | 502 | struct device_node *np; |
| 503 | struct platform_device *pd; | 503 | struct platform_device *pd; |
| 504 | int ret; | 504 | int ret; |
| 505 | 505 | ||
| 506 | np = of_find_compatible_node(NULL, NULL, "pnpPNP,100"); | 506 | np = of_find_compatible_node(NULL, NULL, "pnpPNP,100"); |
| 507 | of_node_put(np); | 507 | of_node_put(np); |
| 508 | if (!np) | 508 | if (!np) |
| 509 | return -ENODEV; | 509 | return -ENODEV; |
| 510 | 510 | ||
| 511 | pd = platform_device_alloc("pcspkr", -1); | 511 | pd = platform_device_alloc("pcspkr", -1); |
| 512 | if (!pd) | 512 | if (!pd) |
| 513 | return -ENOMEM; | 513 | return -ENOMEM; |
| 514 | 514 | ||
| 515 | ret = platform_device_add(pd); | 515 | ret = platform_device_add(pd); |
| 516 | if (ret) | 516 | if (ret) |
| 517 | platform_device_put(pd); | 517 | platform_device_put(pd); |
| 518 | 518 | ||
| 519 | return ret; | 519 | return ret; |
| 520 | } | 520 | } |
| 521 | device_initcall(add_pcspkr); | 521 | device_initcall(add_pcspkr); |
| 522 | #endif /* CONFIG_PCSPKR_PLATFORM */ | 522 | #endif /* CONFIG_PCSPKR_PLATFORM */ |
| 523 | 523 | ||
| 524 | void probe_machine(void) | 524 | void probe_machine(void) |
| 525 | { | 525 | { |
| 526 | extern struct machdep_calls __machine_desc_start; | 526 | extern struct machdep_calls __machine_desc_start; |
| 527 | extern struct machdep_calls __machine_desc_end; | 527 | extern struct machdep_calls __machine_desc_end; |
| 528 | 528 | ||
| 529 | /* | 529 | /* |
| 530 | * Iterate all ppc_md structures until we find the proper | 530 | * Iterate all ppc_md structures until we find the proper |
| 531 | * one for the current machine type | 531 | * one for the current machine type |
| 532 | */ | 532 | */ |
| 533 | DBG("Probing machine type ...\n"); | 533 | DBG("Probing machine type ...\n"); |
| 534 | 534 | ||
| 535 | for (machine_id = &__machine_desc_start; | 535 | for (machine_id = &__machine_desc_start; |
| 536 | machine_id < &__machine_desc_end; | 536 | machine_id < &__machine_desc_end; |
| 537 | machine_id++) { | 537 | machine_id++) { |
| 538 | DBG(" %s ...", machine_id->name); | 538 | DBG(" %s ...", machine_id->name); |
| 539 | memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls)); | 539 | memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls)); |
| 540 | if (ppc_md.probe()) { | 540 | if (ppc_md.probe()) { |
| 541 | DBG(" match !\n"); | 541 | DBG(" match !\n"); |
| 542 | break; | 542 | break; |
| 543 | } | 543 | } |
| 544 | DBG("\n"); | 544 | DBG("\n"); |
| 545 | } | 545 | } |
| 546 | /* What can we do if we didn't find ? */ | 546 | /* What can we do if we didn't find ? */ |
| 547 | if (machine_id >= &__machine_desc_end) { | 547 | if (machine_id >= &__machine_desc_end) { |
| 548 | DBG("No suitable machine found !\n"); | 548 | DBG("No suitable machine found !\n"); |
| 549 | for (;;); | 549 | for (;;); |
| 550 | } | 550 | } |
| 551 | 551 | ||
| 552 | printk(KERN_INFO "Using %s machine description\n", ppc_md.name); | 552 | printk(KERN_INFO "Using %s machine description\n", ppc_md.name); |
| 553 | } | 553 | } |
| 554 | 554 | ||
| 555 | /* Match a class of boards, not a specific device configuration. */ | 555 | /* Match a class of boards, not a specific device configuration. */ |
| 556 | int check_legacy_ioport(unsigned long base_port) | 556 | int check_legacy_ioport(unsigned long base_port) |
| 557 | { | 557 | { |
| 558 | struct device_node *parent, *np = NULL; | 558 | struct device_node *parent, *np = NULL; |
| 559 | int ret = -ENODEV; | 559 | int ret = -ENODEV; |
| 560 | 560 | ||
| 561 | switch(base_port) { | 561 | switch(base_port) { |
| 562 | case I8042_DATA_REG: | 562 | case I8042_DATA_REG: |
| 563 | if (!(np = of_find_compatible_node(NULL, NULL, "pnpPNP,303"))) | 563 | if (!(np = of_find_compatible_node(NULL, NULL, "pnpPNP,303"))) |
| 564 | np = of_find_compatible_node(NULL, NULL, "pnpPNP,f03"); | 564 | np = of_find_compatible_node(NULL, NULL, "pnpPNP,f03"); |
| 565 | if (np) { | 565 | if (np) { |
| 566 | parent = of_get_parent(np); | 566 | parent = of_get_parent(np); |
| 567 | of_node_put(np); | 567 | of_node_put(np); |
| 568 | np = parent; | 568 | np = parent; |
| 569 | break; | 569 | break; |
| 570 | } | 570 | } |
| 571 | np = of_find_node_by_type(NULL, "8042"); | 571 | np = of_find_node_by_type(NULL, "8042"); |
| 572 | /* Pegasos has no device_type on its 8042 node, look for the | 572 | /* Pegasos has no device_type on its 8042 node, look for the |
| 573 | * name instead */ | 573 | * name instead */ |
| 574 | if (!np) | 574 | if (!np) |
| 575 | np = of_find_node_by_name(NULL, "8042"); | 575 | np = of_find_node_by_name(NULL, "8042"); |
| 576 | break; | 576 | break; |
| 577 | case FDC_BASE: /* FDC1 */ | 577 | case FDC_BASE: /* FDC1 */ |
| 578 | np = of_find_node_by_type(NULL, "fdc"); | 578 | np = of_find_node_by_type(NULL, "fdc"); |
| 579 | break; | 579 | break; |
| 580 | #ifdef CONFIG_PPC_PREP | 580 | #ifdef CONFIG_PPC_PREP |
| 581 | case _PIDXR: | 581 | case _PIDXR: |
| 582 | case _PNPWRP: | 582 | case _PNPWRP: |
| 583 | case PNPBIOS_BASE: | 583 | case PNPBIOS_BASE: |
| 584 | /* implement me */ | 584 | /* implement me */ |
| 585 | #endif | 585 | #endif |
| 586 | default: | 586 | default: |
| 587 | /* ipmi is supposed to fail here */ | 587 | /* ipmi is supposed to fail here */ |
| 588 | break; | 588 | break; |
| 589 | } | 589 | } |
| 590 | if (!np) | 590 | if (!np) |
| 591 | return ret; | 591 | return ret; |
| 592 | parent = of_get_parent(np); | 592 | parent = of_get_parent(np); |
| 593 | if (parent) { | 593 | if (parent) { |
| 594 | if (strcmp(parent->type, "isa") == 0) | 594 | if (strcmp(parent->type, "isa") == 0) |
| 595 | ret = 0; | 595 | ret = 0; |
| 596 | of_node_put(parent); | 596 | of_node_put(parent); |
| 597 | } | 597 | } |
| 598 | of_node_put(np); | 598 | of_node_put(np); |
| 599 | return ret; | 599 | return ret; |
| 600 | } | 600 | } |
| 601 | EXPORT_SYMBOL(check_legacy_ioport); | 601 | EXPORT_SYMBOL(check_legacy_ioport); |
| 602 | 602 | ||
| 603 | static int ppc_panic_event(struct notifier_block *this, | 603 | static int ppc_panic_event(struct notifier_block *this, |
| 604 | unsigned long event, void *ptr) | 604 | unsigned long event, void *ptr) |
| 605 | { | 605 | { |
| 606 | ppc_md.panic(ptr); /* May not return */ | 606 | ppc_md.panic(ptr); /* May not return */ |
| 607 | return NOTIFY_DONE; | 607 | return NOTIFY_DONE; |
| 608 | } | 608 | } |
| 609 | 609 | ||
| 610 | static struct notifier_block ppc_panic_block = { | 610 | static struct notifier_block ppc_panic_block = { |
| 611 | .notifier_call = ppc_panic_event, | 611 | .notifier_call = ppc_panic_event, |
| 612 | .priority = INT_MIN /* may not return; must be done last */ | 612 | .priority = INT_MIN /* may not return; must be done last */ |
| 613 | }; | 613 | }; |
| 614 | 614 | ||
| 615 | void __init setup_panic(void) | 615 | void __init setup_panic(void) |
| 616 | { | 616 | { |
| 617 | atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block); | 617 | atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block); |
| 618 | } | 618 | } |
| 619 | 619 | ||
| 620 | #ifdef CONFIG_CHECK_CACHE_COHERENCY | 620 | #ifdef CONFIG_CHECK_CACHE_COHERENCY |
| 621 | /* | 621 | /* |
| 622 | * For platforms that have configurable cache-coherency. This function | 622 | * For platforms that have configurable cache-coherency. This function |
| 623 | * checks that the cache coherency setting of the kernel matches the setting | 623 | * checks that the cache coherency setting of the kernel matches the setting |
| 624 | * left by the firmware, as indicated in the device tree. Since a mismatch | 624 | * left by the firmware, as indicated in the device tree. Since a mismatch |
| 625 | * will eventually result in DMA failures, we print * and error and call | 625 | * will eventually result in DMA failures, we print * and error and call |
| 626 | * BUG() in that case. | 626 | * BUG() in that case. |
| 627 | */ | 627 | */ |
| 628 | 628 | ||
| 629 | #ifdef CONFIG_NOT_COHERENT_CACHE | 629 | #ifdef CONFIG_NOT_COHERENT_CACHE |
| 630 | #define KERNEL_COHERENCY 0 | 630 | #define KERNEL_COHERENCY 0 |
| 631 | #else | 631 | #else |
| 632 | #define KERNEL_COHERENCY 1 | 632 | #define KERNEL_COHERENCY 1 |
| 633 | #endif | 633 | #endif |
| 634 | 634 | ||
| 635 | static int __init check_cache_coherency(void) | 635 | static int __init check_cache_coherency(void) |
| 636 | { | 636 | { |
| 637 | struct device_node *np; | 637 | struct device_node *np; |
| 638 | const void *prop; | 638 | const void *prop; |
| 639 | int devtree_coherency; | 639 | int devtree_coherency; |
| 640 | 640 | ||
| 641 | np = of_find_node_by_path("/"); | 641 | np = of_find_node_by_path("/"); |
| 642 | prop = of_get_property(np, "coherency-off", NULL); | 642 | prop = of_get_property(np, "coherency-off", NULL); |
| 643 | of_node_put(np); | 643 | of_node_put(np); |
| 644 | 644 | ||
| 645 | devtree_coherency = prop ? 0 : 1; | 645 | devtree_coherency = prop ? 0 : 1; |
| 646 | 646 | ||
| 647 | if (devtree_coherency != KERNEL_COHERENCY) { | 647 | if (devtree_coherency != KERNEL_COHERENCY) { |
| 648 | printk(KERN_ERR | 648 | printk(KERN_ERR |
| 649 | "kernel coherency:%s != device tree_coherency:%s\n", | 649 | "kernel coherency:%s != device tree_coherency:%s\n", |
| 650 | KERNEL_COHERENCY ? "on" : "off", | 650 | KERNEL_COHERENCY ? "on" : "off", |
| 651 | devtree_coherency ? "on" : "off"); | 651 | devtree_coherency ? "on" : "off"); |
| 652 | BUG(); | 652 | BUG(); |
| 653 | } | 653 | } |
| 654 | 654 | ||
| 655 | return 0; | 655 | return 0; |
| 656 | } | 656 | } |
| 657 | 657 | ||
| 658 | late_initcall(check_cache_coherency); | 658 | late_initcall(check_cache_coherency); |
| 659 | #endif /* CONFIG_CHECK_CACHE_COHERENCY */ | 659 | #endif /* CONFIG_CHECK_CACHE_COHERENCY */ |
| 660 | 660 | ||
| 661 | #ifdef CONFIG_DEBUG_FS | 661 | #ifdef CONFIG_DEBUG_FS |
| 662 | struct dentry *powerpc_debugfs_root; | 662 | struct dentry *powerpc_debugfs_root; |
| 663 | EXPORT_SYMBOL(powerpc_debugfs_root); | ||
| 663 | 664 | ||
| 664 | static int powerpc_debugfs_init(void) | 665 | static int powerpc_debugfs_init(void) |
| 665 | { | 666 | { |
| 666 | powerpc_debugfs_root = debugfs_create_dir("powerpc", NULL); | 667 | powerpc_debugfs_root = debugfs_create_dir("powerpc", NULL); |
| 667 | 668 | ||
| 668 | return powerpc_debugfs_root == NULL; | 669 | return powerpc_debugfs_root == NULL; |
| 669 | } | 670 | } |
| 670 | arch_initcall(powerpc_debugfs_init); | 671 | arch_initcall(powerpc_debugfs_init); |
| 671 | #endif | 672 | #endif |
| 672 | 673 | ||
| 673 | static int ppc_dflt_bus_notify(struct notifier_block *nb, | 674 | static int ppc_dflt_bus_notify(struct notifier_block *nb, |
| 674 | unsigned long action, void *data) | 675 | unsigned long action, void *data) |
| 675 | { | 676 | { |
| 676 | struct device *dev = data; | 677 | struct device *dev = data; |
| 677 | 678 | ||
| 678 | /* We are only intereted in device addition */ | 679 | /* We are only intereted in device addition */ |
| 679 | if (action != BUS_NOTIFY_ADD_DEVICE) | 680 | if (action != BUS_NOTIFY_ADD_DEVICE) |
| 680 | return 0; | 681 | return 0; |
| 681 | 682 | ||
| 682 | set_dma_ops(dev, &dma_direct_ops); | 683 | set_dma_ops(dev, &dma_direct_ops); |
| 683 | 684 | ||
| 684 | return NOTIFY_DONE; | 685 | return NOTIFY_DONE; |
| 685 | } | 686 | } |
| 686 | 687 | ||
| 687 | static struct notifier_block ppc_dflt_plat_bus_notifier = { | 688 | static struct notifier_block ppc_dflt_plat_bus_notifier = { |
| 688 | .notifier_call = ppc_dflt_bus_notify, | 689 | .notifier_call = ppc_dflt_bus_notify, |
| 689 | .priority = INT_MAX, | 690 | .priority = INT_MAX, |
| 690 | }; | 691 | }; |
| 691 | 692 | ||
| 692 | static struct notifier_block ppc_dflt_of_bus_notifier = { | 693 | static struct notifier_block ppc_dflt_of_bus_notifier = { |
| 693 | .notifier_call = ppc_dflt_bus_notify, | 694 | .notifier_call = ppc_dflt_bus_notify, |
| 694 | .priority = INT_MAX, | 695 | .priority = INT_MAX, |
| 695 | }; | 696 | }; |
| 696 | 697 | ||
| 697 | static int __init setup_bus_notifier(void) | 698 | static int __init setup_bus_notifier(void) |
| 698 | { | 699 | { |
| 699 | bus_register_notifier(&platform_bus_type, &ppc_dflt_plat_bus_notifier); | 700 | bus_register_notifier(&platform_bus_type, &ppc_dflt_plat_bus_notifier); |
| 700 | bus_register_notifier(&of_platform_bus_type, &ppc_dflt_of_bus_notifier); | 701 | bus_register_notifier(&of_platform_bus_type, &ppc_dflt_of_bus_notifier); |
| 701 | 702 | ||
| 702 | return 0; | 703 | return 0; |
| 703 | } | 704 | } |
| 704 | 705 | ||
| 705 | arch_initcall(setup_bus_notifier); | 706 | arch_initcall(setup_bus_notifier); |
| 706 | 707 |
arch/powerpc/kernel/time.c
| 1 | /* | 1 | /* |
| 2 | * Common time routines among all ppc machines. | 2 | * Common time routines among all ppc machines. |
| 3 | * | 3 | * |
| 4 | * Written by Cort Dougan (cort@cs.nmt.edu) to merge | 4 | * Written by Cort Dougan (cort@cs.nmt.edu) to merge |
| 5 | * Paul Mackerras' version and mine for PReP and Pmac. | 5 | * Paul Mackerras' version and mine for PReP and Pmac. |
| 6 | * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net). | 6 | * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net). |
| 7 | * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com) | 7 | * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com) |
| 8 | * | 8 | * |
| 9 | * First round of bugfixes by Gabriel Paubert (paubert@iram.es) | 9 | * First round of bugfixes by Gabriel Paubert (paubert@iram.es) |
| 10 | * to make clock more stable (2.4.0-test5). The only thing | 10 | * to make clock more stable (2.4.0-test5). The only thing |
| 11 | * that this code assumes is that the timebases have been synchronized | 11 | * that this code assumes is that the timebases have been synchronized |
| 12 | * by firmware on SMP and are never stopped (never do sleep | 12 | * by firmware on SMP and are never stopped (never do sleep |
| 13 | * on SMP then, nap and doze are OK). | 13 | * on SMP then, nap and doze are OK). |
| 14 | * | 14 | * |
| 15 | * Speeded up do_gettimeofday by getting rid of references to | 15 | * Speeded up do_gettimeofday by getting rid of references to |
| 16 | * xtime (which required locks for consistency). (mikejc@us.ibm.com) | 16 | * xtime (which required locks for consistency). (mikejc@us.ibm.com) |
| 17 | * | 17 | * |
| 18 | * TODO (not necessarily in this file): | 18 | * TODO (not necessarily in this file): |
| 19 | * - improve precision and reproducibility of timebase frequency | 19 | * - improve precision and reproducibility of timebase frequency |
| 20 | * measurement at boot time. (for iSeries, we calibrate the timebase | 20 | * measurement at boot time. (for iSeries, we calibrate the timebase |
| 21 | * against the Titan chip's clock.) | 21 | * against the Titan chip's clock.) |
| 22 | * - for astronomical applications: add a new function to get | 22 | * - for astronomical applications: add a new function to get |
| 23 | * non ambiguous timestamps even around leap seconds. This needs | 23 | * non ambiguous timestamps even around leap seconds. This needs |
| 24 | * a new timestamp format and a good name. | 24 | * a new timestamp format and a good name. |
| 25 | * | 25 | * |
| 26 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 | 26 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 |
| 27 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | 27 | * "A Kernel Model for Precision Timekeeping" by Dave Mills |
| 28 | * | 28 | * |
| 29 | * This program is free software; you can redistribute it and/or | 29 | * This program is free software; you can redistribute it and/or |
| 30 | * modify it under the terms of the GNU General Public License | 30 | * modify it under the terms of the GNU General Public License |
| 31 | * as published by the Free Software Foundation; either version | 31 | * as published by the Free Software Foundation; either version |
| 32 | * 2 of the License, or (at your option) any later version. | 32 | * 2 of the License, or (at your option) any later version. |
| 33 | */ | 33 | */ |
| 34 | 34 | ||
| 35 | #include <linux/errno.h> | 35 | #include <linux/errno.h> |
| 36 | #include <linux/module.h> | 36 | #include <linux/module.h> |
| 37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
| 38 | #include <linux/kernel.h> | 38 | #include <linux/kernel.h> |
| 39 | #include <linux/param.h> | 39 | #include <linux/param.h> |
| 40 | #include <linux/string.h> | 40 | #include <linux/string.h> |
| 41 | #include <linux/mm.h> | 41 | #include <linux/mm.h> |
| 42 | #include <linux/interrupt.h> | 42 | #include <linux/interrupt.h> |
| 43 | #include <linux/timex.h> | 43 | #include <linux/timex.h> |
| 44 | #include <linux/kernel_stat.h> | 44 | #include <linux/kernel_stat.h> |
| 45 | #include <linux/time.h> | 45 | #include <linux/time.h> |
| 46 | #include <linux/init.h> | 46 | #include <linux/init.h> |
| 47 | #include <linux/profile.h> | 47 | #include <linux/profile.h> |
| 48 | #include <linux/cpu.h> | 48 | #include <linux/cpu.h> |
| 49 | #include <linux/security.h> | 49 | #include <linux/security.h> |
| 50 | #include <linux/percpu.h> | 50 | #include <linux/percpu.h> |
| 51 | #include <linux/rtc.h> | 51 | #include <linux/rtc.h> |
| 52 | #include <linux/jiffies.h> | 52 | #include <linux/jiffies.h> |
| 53 | #include <linux/posix-timers.h> | 53 | #include <linux/posix-timers.h> |
| 54 | #include <linux/irq.h> | 54 | #include <linux/irq.h> |
| 55 | #include <linux/delay.h> | 55 | #include <linux/delay.h> |
| 56 | #include <linux/perf_event.h> | 56 | #include <linux/perf_event.h> |
| 57 | #include <asm/trace.h> | ||
| 57 | 58 | ||
| 58 | #include <asm/io.h> | 59 | #include <asm/io.h> |
| 59 | #include <asm/processor.h> | 60 | #include <asm/processor.h> |
| 60 | #include <asm/nvram.h> | 61 | #include <asm/nvram.h> |
| 61 | #include <asm/cache.h> | 62 | #include <asm/cache.h> |
| 62 | #include <asm/machdep.h> | 63 | #include <asm/machdep.h> |
| 63 | #include <asm/uaccess.h> | 64 | #include <asm/uaccess.h> |
| 64 | #include <asm/time.h> | 65 | #include <asm/time.h> |
| 65 | #include <asm/prom.h> | 66 | #include <asm/prom.h> |
| 66 | #include <asm/irq.h> | 67 | #include <asm/irq.h> |
| 67 | #include <asm/div64.h> | 68 | #include <asm/div64.h> |
| 68 | #include <asm/smp.h> | 69 | #include <asm/smp.h> |
| 69 | #include <asm/vdso_datapage.h> | 70 | #include <asm/vdso_datapage.h> |
| 70 | #include <asm/firmware.h> | 71 | #include <asm/firmware.h> |
| 71 | #include <asm/cputime.h> | 72 | #include <asm/cputime.h> |
| 72 | #ifdef CONFIG_PPC_ISERIES | 73 | #ifdef CONFIG_PPC_ISERIES |
| 73 | #include <asm/iseries/it_lp_queue.h> | 74 | #include <asm/iseries/it_lp_queue.h> |
| 74 | #include <asm/iseries/hv_call_xm.h> | 75 | #include <asm/iseries/hv_call_xm.h> |
| 75 | #endif | 76 | #endif |
| 76 | 77 | ||
| 77 | /* powerpc clocksource/clockevent code */ | 78 | /* powerpc clocksource/clockevent code */ |
| 78 | 79 | ||
| 79 | #include <linux/clockchips.h> | 80 | #include <linux/clockchips.h> |
| 80 | #include <linux/clocksource.h> | 81 | #include <linux/clocksource.h> |
| 81 | 82 | ||
| 82 | static cycle_t rtc_read(struct clocksource *); | 83 | static cycle_t rtc_read(struct clocksource *); |
| 83 | static struct clocksource clocksource_rtc = { | 84 | static struct clocksource clocksource_rtc = { |
| 84 | .name = "rtc", | 85 | .name = "rtc", |
| 85 | .rating = 400, | 86 | .rating = 400, |
| 86 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 87 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
| 87 | .mask = CLOCKSOURCE_MASK(64), | 88 | .mask = CLOCKSOURCE_MASK(64), |
| 88 | .shift = 22, | 89 | .shift = 22, |
| 89 | .mult = 0, /* To be filled in */ | 90 | .mult = 0, /* To be filled in */ |
| 90 | .read = rtc_read, | 91 | .read = rtc_read, |
| 91 | }; | 92 | }; |
| 92 | 93 | ||
| 93 | static cycle_t timebase_read(struct clocksource *); | 94 | static cycle_t timebase_read(struct clocksource *); |
| 94 | static struct clocksource clocksource_timebase = { | 95 | static struct clocksource clocksource_timebase = { |
| 95 | .name = "timebase", | 96 | .name = "timebase", |
| 96 | .rating = 400, | 97 | .rating = 400, |
| 97 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 98 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
| 98 | .mask = CLOCKSOURCE_MASK(64), | 99 | .mask = CLOCKSOURCE_MASK(64), |
| 99 | .shift = 22, | 100 | .shift = 22, |
| 100 | .mult = 0, /* To be filled in */ | 101 | .mult = 0, /* To be filled in */ |
| 101 | .read = timebase_read, | 102 | .read = timebase_read, |
| 102 | }; | 103 | }; |
| 103 | 104 | ||
| 104 | #define DECREMENTER_MAX 0x7fffffff | 105 | #define DECREMENTER_MAX 0x7fffffff |
| 105 | 106 | ||
| 106 | static int decrementer_set_next_event(unsigned long evt, | 107 | static int decrementer_set_next_event(unsigned long evt, |
| 107 | struct clock_event_device *dev); | 108 | struct clock_event_device *dev); |
| 108 | static void decrementer_set_mode(enum clock_event_mode mode, | 109 | static void decrementer_set_mode(enum clock_event_mode mode, |
| 109 | struct clock_event_device *dev); | 110 | struct clock_event_device *dev); |
| 110 | 111 | ||
| 111 | static struct clock_event_device decrementer_clockevent = { | 112 | static struct clock_event_device decrementer_clockevent = { |
| 112 | .name = "decrementer", | 113 | .name = "decrementer", |
| 113 | .rating = 200, | 114 | .rating = 200, |
| 114 | .shift = 0, /* To be filled in */ | 115 | .shift = 0, /* To be filled in */ |
| 115 | .mult = 0, /* To be filled in */ | 116 | .mult = 0, /* To be filled in */ |
| 116 | .irq = 0, | 117 | .irq = 0, |
| 117 | .set_next_event = decrementer_set_next_event, | 118 | .set_next_event = decrementer_set_next_event, |
| 118 | .set_mode = decrementer_set_mode, | 119 | .set_mode = decrementer_set_mode, |
| 119 | .features = CLOCK_EVT_FEAT_ONESHOT, | 120 | .features = CLOCK_EVT_FEAT_ONESHOT, |
| 120 | }; | 121 | }; |
| 121 | 122 | ||
| 122 | struct decrementer_clock { | 123 | struct decrementer_clock { |
| 123 | struct clock_event_device event; | 124 | struct clock_event_device event; |
| 124 | u64 next_tb; | 125 | u64 next_tb; |
| 125 | }; | 126 | }; |
| 126 | 127 | ||
| 127 | static DEFINE_PER_CPU(struct decrementer_clock, decrementers); | 128 | static DEFINE_PER_CPU(struct decrementer_clock, decrementers); |
| 128 | 129 | ||
| 129 | #ifdef CONFIG_PPC_ISERIES | 130 | #ifdef CONFIG_PPC_ISERIES |
| 130 | static unsigned long __initdata iSeries_recal_titan; | 131 | static unsigned long __initdata iSeries_recal_titan; |
| 131 | static signed long __initdata iSeries_recal_tb; | 132 | static signed long __initdata iSeries_recal_tb; |
| 132 | 133 | ||
| 133 | /* Forward declaration is only needed for iSereis compiles */ | 134 | /* Forward declaration is only needed for iSereis compiles */ |
| 134 | static void __init clocksource_init(void); | 135 | static void __init clocksource_init(void); |
| 135 | #endif | 136 | #endif |
| 136 | 137 | ||
| 137 | #define XSEC_PER_SEC (1024*1024) | 138 | #define XSEC_PER_SEC (1024*1024) |
| 138 | 139 | ||
| 139 | #ifdef CONFIG_PPC64 | 140 | #ifdef CONFIG_PPC64 |
| 140 | #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC) | 141 | #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC) |
| 141 | #else | 142 | #else |
| 142 | /* compute ((xsec << 12) * max) >> 32 */ | 143 | /* compute ((xsec << 12) * max) >> 32 */ |
| 143 | #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max) | 144 | #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max) |
| 144 | #endif | 145 | #endif |
| 145 | 146 | ||
| 146 | unsigned long tb_ticks_per_jiffy; | 147 | unsigned long tb_ticks_per_jiffy; |
| 147 | unsigned long tb_ticks_per_usec = 100; /* sane default */ | 148 | unsigned long tb_ticks_per_usec = 100; /* sane default */ |
| 148 | EXPORT_SYMBOL(tb_ticks_per_usec); | 149 | EXPORT_SYMBOL(tb_ticks_per_usec); |
| 149 | unsigned long tb_ticks_per_sec; | 150 | unsigned long tb_ticks_per_sec; |
| 150 | EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */ | 151 | EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */ |
| 151 | u64 tb_to_xs; | 152 | u64 tb_to_xs; |
| 152 | unsigned tb_to_us; | 153 | unsigned tb_to_us; |
| 153 | 154 | ||
| 154 | #define TICKLEN_SCALE NTP_SCALE_SHIFT | 155 | #define TICKLEN_SCALE NTP_SCALE_SHIFT |
| 155 | static u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */ | 156 | static u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */ |
| 156 | static u64 ticklen_to_xs; /* 0.64 fraction */ | 157 | static u64 ticklen_to_xs; /* 0.64 fraction */ |
| 157 | 158 | ||
| 158 | /* If last_tick_len corresponds to about 1/HZ seconds, then | 159 | /* If last_tick_len corresponds to about 1/HZ seconds, then |
| 159 | last_tick_len << TICKLEN_SHIFT will be about 2^63. */ | 160 | last_tick_len << TICKLEN_SHIFT will be about 2^63. */ |
| 160 | #define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ) | 161 | #define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ) |
| 161 | 162 | ||
| 162 | DEFINE_SPINLOCK(rtc_lock); | 163 | DEFINE_SPINLOCK(rtc_lock); |
| 163 | EXPORT_SYMBOL_GPL(rtc_lock); | 164 | EXPORT_SYMBOL_GPL(rtc_lock); |
| 164 | 165 | ||
| 165 | static u64 tb_to_ns_scale __read_mostly; | 166 | static u64 tb_to_ns_scale __read_mostly; |
| 166 | static unsigned tb_to_ns_shift __read_mostly; | 167 | static unsigned tb_to_ns_shift __read_mostly; |
| 167 | static unsigned long boot_tb __read_mostly; | 168 | static unsigned long boot_tb __read_mostly; |
| 168 | 169 | ||
| 169 | extern struct timezone sys_tz; | 170 | extern struct timezone sys_tz; |
| 170 | static long timezone_offset; | 171 | static long timezone_offset; |
| 171 | 172 | ||
| 172 | unsigned long ppc_proc_freq; | 173 | unsigned long ppc_proc_freq; |
| 173 | EXPORT_SYMBOL(ppc_proc_freq); | 174 | EXPORT_SYMBOL(ppc_proc_freq); |
| 174 | unsigned long ppc_tb_freq; | 175 | unsigned long ppc_tb_freq; |
| 175 | 176 | ||
| 176 | static u64 tb_last_jiffy __cacheline_aligned_in_smp; | 177 | static u64 tb_last_jiffy __cacheline_aligned_in_smp; |
| 177 | static DEFINE_PER_CPU(u64, last_jiffy); | 178 | static DEFINE_PER_CPU(u64, last_jiffy); |
| 178 | 179 | ||
| 179 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 180 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
| 180 | /* | 181 | /* |
| 181 | * Factors for converting from cputime_t (timebase ticks) to | 182 | * Factors for converting from cputime_t (timebase ticks) to |
| 182 | * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds). | 183 | * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds). |
| 183 | * These are all stored as 0.64 fixed-point binary fractions. | 184 | * These are all stored as 0.64 fixed-point binary fractions. |
| 184 | */ | 185 | */ |
| 185 | u64 __cputime_jiffies_factor; | 186 | u64 __cputime_jiffies_factor; |
| 186 | EXPORT_SYMBOL(__cputime_jiffies_factor); | 187 | EXPORT_SYMBOL(__cputime_jiffies_factor); |
| 187 | u64 __cputime_msec_factor; | 188 | u64 __cputime_msec_factor; |
| 188 | EXPORT_SYMBOL(__cputime_msec_factor); | 189 | EXPORT_SYMBOL(__cputime_msec_factor); |
| 189 | u64 __cputime_sec_factor; | 190 | u64 __cputime_sec_factor; |
| 190 | EXPORT_SYMBOL(__cputime_sec_factor); | 191 | EXPORT_SYMBOL(__cputime_sec_factor); |
| 191 | u64 __cputime_clockt_factor; | 192 | u64 __cputime_clockt_factor; |
| 192 | EXPORT_SYMBOL(__cputime_clockt_factor); | 193 | EXPORT_SYMBOL(__cputime_clockt_factor); |
| 193 | DEFINE_PER_CPU(unsigned long, cputime_last_delta); | 194 | DEFINE_PER_CPU(unsigned long, cputime_last_delta); |
| 194 | DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta); | 195 | DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta); |
| 195 | 196 | ||
| 196 | cputime_t cputime_one_jiffy; | 197 | cputime_t cputime_one_jiffy; |
| 197 | 198 | ||
| 198 | static void calc_cputime_factors(void) | 199 | static void calc_cputime_factors(void) |
| 199 | { | 200 | { |
| 200 | struct div_result res; | 201 | struct div_result res; |
| 201 | 202 | ||
| 202 | div128_by_32(HZ, 0, tb_ticks_per_sec, &res); | 203 | div128_by_32(HZ, 0, tb_ticks_per_sec, &res); |
| 203 | __cputime_jiffies_factor = res.result_low; | 204 | __cputime_jiffies_factor = res.result_low; |
| 204 | div128_by_32(1000, 0, tb_ticks_per_sec, &res); | 205 | div128_by_32(1000, 0, tb_ticks_per_sec, &res); |
| 205 | __cputime_msec_factor = res.result_low; | 206 | __cputime_msec_factor = res.result_low; |
| 206 | div128_by_32(1, 0, tb_ticks_per_sec, &res); | 207 | div128_by_32(1, 0, tb_ticks_per_sec, &res); |
| 207 | __cputime_sec_factor = res.result_low; | 208 | __cputime_sec_factor = res.result_low; |
| 208 | div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res); | 209 | div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res); |
| 209 | __cputime_clockt_factor = res.result_low; | 210 | __cputime_clockt_factor = res.result_low; |
| 210 | } | 211 | } |
| 211 | 212 | ||
| 212 | /* | 213 | /* |
| 213 | * Read the PURR on systems that have it, otherwise the timebase. | 214 | * Read the PURR on systems that have it, otherwise the timebase. |
| 214 | */ | 215 | */ |
| 215 | static u64 read_purr(void) | 216 | static u64 read_purr(void) |
| 216 | { | 217 | { |
| 217 | if (cpu_has_feature(CPU_FTR_PURR)) | 218 | if (cpu_has_feature(CPU_FTR_PURR)) |
| 218 | return mfspr(SPRN_PURR); | 219 | return mfspr(SPRN_PURR); |
| 219 | return mftb(); | 220 | return mftb(); |
| 220 | } | 221 | } |
| 221 | 222 | ||
| 222 | /* | 223 | /* |
| 223 | * Read the SPURR on systems that have it, otherwise the purr | 224 | * Read the SPURR on systems that have it, otherwise the purr |
| 224 | */ | 225 | */ |
| 225 | static u64 read_spurr(u64 purr) | 226 | static u64 read_spurr(u64 purr) |
| 226 | { | 227 | { |
| 227 | /* | 228 | /* |
| 228 | * cpus without PURR won't have a SPURR | 229 | * cpus without PURR won't have a SPURR |
| 229 | * We already know the former when we use this, so tell gcc | 230 | * We already know the former when we use this, so tell gcc |
| 230 | */ | 231 | */ |
| 231 | if (cpu_has_feature(CPU_FTR_PURR) && cpu_has_feature(CPU_FTR_SPURR)) | 232 | if (cpu_has_feature(CPU_FTR_PURR) && cpu_has_feature(CPU_FTR_SPURR)) |
| 232 | return mfspr(SPRN_SPURR); | 233 | return mfspr(SPRN_SPURR); |
| 233 | return purr; | 234 | return purr; |
| 234 | } | 235 | } |
| 235 | 236 | ||
| 236 | /* | 237 | /* |
| 237 | * Account time for a transition between system, hard irq | 238 | * Account time for a transition between system, hard irq |
| 238 | * or soft irq state. | 239 | * or soft irq state. |
| 239 | */ | 240 | */ |
| 240 | void account_system_vtime(struct task_struct *tsk) | 241 | void account_system_vtime(struct task_struct *tsk) |
| 241 | { | 242 | { |
| 242 | u64 now, nowscaled, delta, deltascaled, sys_time; | 243 | u64 now, nowscaled, delta, deltascaled, sys_time; |
| 243 | unsigned long flags; | 244 | unsigned long flags; |
| 244 | 245 | ||
| 245 | local_irq_save(flags); | 246 | local_irq_save(flags); |
| 246 | now = read_purr(); | 247 | now = read_purr(); |
| 247 | nowscaled = read_spurr(now); | 248 | nowscaled = read_spurr(now); |
| 248 | delta = now - get_paca()->startpurr; | 249 | delta = now - get_paca()->startpurr; |
| 249 | deltascaled = nowscaled - get_paca()->startspurr; | 250 | deltascaled = nowscaled - get_paca()->startspurr; |
| 250 | get_paca()->startpurr = now; | 251 | get_paca()->startpurr = now; |
| 251 | get_paca()->startspurr = nowscaled; | 252 | get_paca()->startspurr = nowscaled; |
| 252 | if (!in_interrupt()) { | 253 | if (!in_interrupt()) { |
| 253 | /* deltascaled includes both user and system time. | 254 | /* deltascaled includes both user and system time. |
| 254 | * Hence scale it based on the purr ratio to estimate | 255 | * Hence scale it based on the purr ratio to estimate |
| 255 | * the system time */ | 256 | * the system time */ |
| 256 | sys_time = get_paca()->system_time; | 257 | sys_time = get_paca()->system_time; |
| 257 | if (get_paca()->user_time) | 258 | if (get_paca()->user_time) |
| 258 | deltascaled = deltascaled * sys_time / | 259 | deltascaled = deltascaled * sys_time / |
| 259 | (sys_time + get_paca()->user_time); | 260 | (sys_time + get_paca()->user_time); |
| 260 | delta += sys_time; | 261 | delta += sys_time; |
| 261 | get_paca()->system_time = 0; | 262 | get_paca()->system_time = 0; |
| 262 | } | 263 | } |
| 263 | if (in_irq() || idle_task(smp_processor_id()) != tsk) | 264 | if (in_irq() || idle_task(smp_processor_id()) != tsk) |
| 264 | account_system_time(tsk, 0, delta, deltascaled); | 265 | account_system_time(tsk, 0, delta, deltascaled); |
| 265 | else | 266 | else |
| 266 | account_idle_time(delta); | 267 | account_idle_time(delta); |
| 267 | per_cpu(cputime_last_delta, smp_processor_id()) = delta; | 268 | per_cpu(cputime_last_delta, smp_processor_id()) = delta; |
| 268 | per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled; | 269 | per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled; |
| 269 | local_irq_restore(flags); | 270 | local_irq_restore(flags); |
| 270 | } | 271 | } |
| 271 | 272 | ||
| 272 | /* | 273 | /* |
| 273 | * Transfer the user and system times accumulated in the paca | 274 | * Transfer the user and system times accumulated in the paca |
| 274 | * by the exception entry and exit code to the generic process | 275 | * by the exception entry and exit code to the generic process |
| 275 | * user and system time records. | 276 | * user and system time records. |
| 276 | * Must be called with interrupts disabled. | 277 | * Must be called with interrupts disabled. |
| 277 | */ | 278 | */ |
| 278 | void account_process_tick(struct task_struct *tsk, int user_tick) | 279 | void account_process_tick(struct task_struct *tsk, int user_tick) |
| 279 | { | 280 | { |
| 280 | cputime_t utime, utimescaled; | 281 | cputime_t utime, utimescaled; |
| 281 | 282 | ||
| 282 | utime = get_paca()->user_time; | 283 | utime = get_paca()->user_time; |
| 283 | get_paca()->user_time = 0; | 284 | get_paca()->user_time = 0; |
| 284 | utimescaled = cputime_to_scaled(utime); | 285 | utimescaled = cputime_to_scaled(utime); |
| 285 | account_user_time(tsk, utime, utimescaled); | 286 | account_user_time(tsk, utime, utimescaled); |
| 286 | } | 287 | } |
| 287 | 288 | ||
| 288 | /* | 289 | /* |
| 289 | * Stuff for accounting stolen time. | 290 | * Stuff for accounting stolen time. |
| 290 | */ | 291 | */ |
| 291 | struct cpu_purr_data { | 292 | struct cpu_purr_data { |
| 292 | int initialized; /* thread is running */ | 293 | int initialized; /* thread is running */ |
| 293 | u64 tb; /* last TB value read */ | 294 | u64 tb; /* last TB value read */ |
| 294 | u64 purr; /* last PURR value read */ | 295 | u64 purr; /* last PURR value read */ |
| 295 | u64 spurr; /* last SPURR value read */ | 296 | u64 spurr; /* last SPURR value read */ |
| 296 | }; | 297 | }; |
| 297 | 298 | ||
| 298 | /* | 299 | /* |
| 299 | * Each entry in the cpu_purr_data array is manipulated only by its | 300 | * Each entry in the cpu_purr_data array is manipulated only by its |
| 300 | * "owner" cpu -- usually in the timer interrupt but also occasionally | 301 | * "owner" cpu -- usually in the timer interrupt but also occasionally |
| 301 | * in process context for cpu online. As long as cpus do not touch | 302 | * in process context for cpu online. As long as cpus do not touch |
| 302 | * each others' cpu_purr_data, disabling local interrupts is | 303 | * each others' cpu_purr_data, disabling local interrupts is |
| 303 | * sufficient to serialize accesses. | 304 | * sufficient to serialize accesses. |
| 304 | */ | 305 | */ |
| 305 | static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data); | 306 | static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data); |
| 306 | 307 | ||
| 307 | static void snapshot_tb_and_purr(void *data) | 308 | static void snapshot_tb_and_purr(void *data) |
| 308 | { | 309 | { |
| 309 | unsigned long flags; | 310 | unsigned long flags; |
| 310 | struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data); | 311 | struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data); |
| 311 | 312 | ||
| 312 | local_irq_save(flags); | 313 | local_irq_save(flags); |
| 313 | p->tb = get_tb_or_rtc(); | 314 | p->tb = get_tb_or_rtc(); |
| 314 | p->purr = mfspr(SPRN_PURR); | 315 | p->purr = mfspr(SPRN_PURR); |
| 315 | wmb(); | 316 | wmb(); |
| 316 | p->initialized = 1; | 317 | p->initialized = 1; |
| 317 | local_irq_restore(flags); | 318 | local_irq_restore(flags); |
| 318 | } | 319 | } |
| 319 | 320 | ||
| 320 | /* | 321 | /* |
| 321 | * Called during boot when all cpus have come up. | 322 | * Called during boot when all cpus have come up. |
| 322 | */ | 323 | */ |
| 323 | void snapshot_timebases(void) | 324 | void snapshot_timebases(void) |
| 324 | { | 325 | { |
| 325 | if (!cpu_has_feature(CPU_FTR_PURR)) | 326 | if (!cpu_has_feature(CPU_FTR_PURR)) |
| 326 | return; | 327 | return; |
| 327 | on_each_cpu(snapshot_tb_and_purr, NULL, 1); | 328 | on_each_cpu(snapshot_tb_and_purr, NULL, 1); |
| 328 | } | 329 | } |
| 329 | 330 | ||
| 330 | /* | 331 | /* |
| 331 | * Must be called with interrupts disabled. | 332 | * Must be called with interrupts disabled. |
| 332 | */ | 333 | */ |
| 333 | void calculate_steal_time(void) | 334 | void calculate_steal_time(void) |
| 334 | { | 335 | { |
| 335 | u64 tb, purr; | 336 | u64 tb, purr; |
| 336 | s64 stolen; | 337 | s64 stolen; |
| 337 | struct cpu_purr_data *pme; | 338 | struct cpu_purr_data *pme; |
| 338 | 339 | ||
| 339 | pme = &__get_cpu_var(cpu_purr_data); | 340 | pme = &__get_cpu_var(cpu_purr_data); |
| 340 | if (!pme->initialized) | 341 | if (!pme->initialized) |
| 341 | return; /* !CPU_FTR_PURR or early in early boot */ | 342 | return; /* !CPU_FTR_PURR or early in early boot */ |
| 342 | tb = mftb(); | 343 | tb = mftb(); |
| 343 | purr = mfspr(SPRN_PURR); | 344 | purr = mfspr(SPRN_PURR); |
| 344 | stolen = (tb - pme->tb) - (purr - pme->purr); | 345 | stolen = (tb - pme->tb) - (purr - pme->purr); |
| 345 | if (stolen > 0) { | 346 | if (stolen > 0) { |
| 346 | if (idle_task(smp_processor_id()) != current) | 347 | if (idle_task(smp_processor_id()) != current) |
| 347 | account_steal_time(stolen); | 348 | account_steal_time(stolen); |
| 348 | else | 349 | else |
| 349 | account_idle_time(stolen); | 350 | account_idle_time(stolen); |
| 350 | } | 351 | } |
| 351 | pme->tb = tb; | 352 | pme->tb = tb; |
| 352 | pme->purr = purr; | 353 | pme->purr = purr; |
| 353 | } | 354 | } |
| 354 | 355 | ||
| 355 | #ifdef CONFIG_PPC_SPLPAR | 356 | #ifdef CONFIG_PPC_SPLPAR |
| 356 | /* | 357 | /* |
| 357 | * Must be called before the cpu is added to the online map when | 358 | * Must be called before the cpu is added to the online map when |
| 358 | * a cpu is being brought up at runtime. | 359 | * a cpu is being brought up at runtime. |
| 359 | */ | 360 | */ |
| 360 | static void snapshot_purr(void) | 361 | static void snapshot_purr(void) |
| 361 | { | 362 | { |
| 362 | struct cpu_purr_data *pme; | 363 | struct cpu_purr_data *pme; |
| 363 | unsigned long flags; | 364 | unsigned long flags; |
| 364 | 365 | ||
| 365 | if (!cpu_has_feature(CPU_FTR_PURR)) | 366 | if (!cpu_has_feature(CPU_FTR_PURR)) |
| 366 | return; | 367 | return; |
| 367 | local_irq_save(flags); | 368 | local_irq_save(flags); |
| 368 | pme = &__get_cpu_var(cpu_purr_data); | 369 | pme = &__get_cpu_var(cpu_purr_data); |
| 369 | pme->tb = mftb(); | 370 | pme->tb = mftb(); |
| 370 | pme->purr = mfspr(SPRN_PURR); | 371 | pme->purr = mfspr(SPRN_PURR); |
| 371 | pme->initialized = 1; | 372 | pme->initialized = 1; |
| 372 | local_irq_restore(flags); | 373 | local_irq_restore(flags); |
| 373 | } | 374 | } |
| 374 | 375 | ||
| 375 | #endif /* CONFIG_PPC_SPLPAR */ | 376 | #endif /* CONFIG_PPC_SPLPAR */ |
| 376 | 377 | ||
| 377 | #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ | 378 | #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ |
| 378 | #define calc_cputime_factors() | 379 | #define calc_cputime_factors() |
| 379 | #define calculate_steal_time() do { } while (0) | 380 | #define calculate_steal_time() do { } while (0) |
| 380 | #endif | 381 | #endif |
| 381 | 382 | ||
| 382 | #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)) | 383 | #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)) |
| 383 | #define snapshot_purr() do { } while (0) | 384 | #define snapshot_purr() do { } while (0) |
| 384 | #endif | 385 | #endif |
| 385 | 386 | ||
| 386 | /* | 387 | /* |
| 387 | * Called when a cpu comes up after the system has finished booting, | 388 | * Called when a cpu comes up after the system has finished booting, |
| 388 | * i.e. as a result of a hotplug cpu action. | 389 | * i.e. as a result of a hotplug cpu action. |
| 389 | */ | 390 | */ |
| 390 | void snapshot_timebase(void) | 391 | void snapshot_timebase(void) |
| 391 | { | 392 | { |
| 392 | __get_cpu_var(last_jiffy) = get_tb_or_rtc(); | 393 | __get_cpu_var(last_jiffy) = get_tb_or_rtc(); |
| 393 | snapshot_purr(); | 394 | snapshot_purr(); |
| 394 | } | 395 | } |
| 395 | 396 | ||
| 396 | void __delay(unsigned long loops) | 397 | void __delay(unsigned long loops) |
| 397 | { | 398 | { |
| 398 | unsigned long start; | 399 | unsigned long start; |
| 399 | int diff; | 400 | int diff; |
| 400 | 401 | ||
| 401 | if (__USE_RTC()) { | 402 | if (__USE_RTC()) { |
| 402 | start = get_rtcl(); | 403 | start = get_rtcl(); |
| 403 | do { | 404 | do { |
| 404 | /* the RTCL register wraps at 1000000000 */ | 405 | /* the RTCL register wraps at 1000000000 */ |
| 405 | diff = get_rtcl() - start; | 406 | diff = get_rtcl() - start; |
| 406 | if (diff < 0) | 407 | if (diff < 0) |
| 407 | diff += 1000000000; | 408 | diff += 1000000000; |
| 408 | } while (diff < loops); | 409 | } while (diff < loops); |
| 409 | } else { | 410 | } else { |
| 410 | start = get_tbl(); | 411 | start = get_tbl(); |
| 411 | while (get_tbl() - start < loops) | 412 | while (get_tbl() - start < loops) |
| 412 | HMT_low(); | 413 | HMT_low(); |
| 413 | HMT_medium(); | 414 | HMT_medium(); |
| 414 | } | 415 | } |
| 415 | } | 416 | } |
| 416 | EXPORT_SYMBOL(__delay); | 417 | EXPORT_SYMBOL(__delay); |
| 417 | 418 | ||
| 418 | void udelay(unsigned long usecs) | 419 | void udelay(unsigned long usecs) |
| 419 | { | 420 | { |
| 420 | __delay(tb_ticks_per_usec * usecs); | 421 | __delay(tb_ticks_per_usec * usecs); |
| 421 | } | 422 | } |
| 422 | EXPORT_SYMBOL(udelay); | 423 | EXPORT_SYMBOL(udelay); |
| 423 | 424 | ||
| 424 | static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, | 425 | static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, |
| 425 | u64 new_tb_to_xs) | 426 | u64 new_tb_to_xs) |
| 426 | { | 427 | { |
| 427 | /* | 428 | /* |
| 428 | * tb_update_count is used to allow the userspace gettimeofday code | 429 | * tb_update_count is used to allow the userspace gettimeofday code |
| 429 | * to assure itself that it sees a consistent view of the tb_to_xs and | 430 | * to assure itself that it sees a consistent view of the tb_to_xs and |
| 430 | * stamp_xsec variables. It reads the tb_update_count, then reads | 431 | * stamp_xsec variables. It reads the tb_update_count, then reads |
| 431 | * tb_to_xs and stamp_xsec and then reads tb_update_count again. If | 432 | * tb_to_xs and stamp_xsec and then reads tb_update_count again. If |
| 432 | * the two values of tb_update_count match and are even then the | 433 | * the two values of tb_update_count match and are even then the |
| 433 | * tb_to_xs and stamp_xsec values are consistent. If not, then it | 434 | * tb_to_xs and stamp_xsec values are consistent. If not, then it |
| 434 | * loops back and reads them again until this criteria is met. | 435 | * loops back and reads them again until this criteria is met. |
| 435 | * We expect the caller to have done the first increment of | 436 | * We expect the caller to have done the first increment of |
| 436 | * vdso_data->tb_update_count already. | 437 | * vdso_data->tb_update_count already. |
| 437 | */ | 438 | */ |
| 438 | vdso_data->tb_orig_stamp = new_tb_stamp; | 439 | vdso_data->tb_orig_stamp = new_tb_stamp; |
| 439 | vdso_data->stamp_xsec = new_stamp_xsec; | 440 | vdso_data->stamp_xsec = new_stamp_xsec; |
| 440 | vdso_data->tb_to_xs = new_tb_to_xs; | 441 | vdso_data->tb_to_xs = new_tb_to_xs; |
| 441 | vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; | 442 | vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; |
| 442 | vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; | 443 | vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; |
| 443 | vdso_data->stamp_xtime = xtime; | 444 | vdso_data->stamp_xtime = xtime; |
| 444 | smp_wmb(); | 445 | smp_wmb(); |
| 445 | ++(vdso_data->tb_update_count); | 446 | ++(vdso_data->tb_update_count); |
| 446 | } | 447 | } |
| 447 | 448 | ||
| 448 | #ifdef CONFIG_SMP | 449 | #ifdef CONFIG_SMP |
| 449 | unsigned long profile_pc(struct pt_regs *regs) | 450 | unsigned long profile_pc(struct pt_regs *regs) |
| 450 | { | 451 | { |
| 451 | unsigned long pc = instruction_pointer(regs); | 452 | unsigned long pc = instruction_pointer(regs); |
| 452 | 453 | ||
| 453 | if (in_lock_functions(pc)) | 454 | if (in_lock_functions(pc)) |
| 454 | return regs->link; | 455 | return regs->link; |
| 455 | 456 | ||
| 456 | return pc; | 457 | return pc; |
| 457 | } | 458 | } |
| 458 | EXPORT_SYMBOL(profile_pc); | 459 | EXPORT_SYMBOL(profile_pc); |
| 459 | #endif | 460 | #endif |
| 460 | 461 | ||
| 461 | #ifdef CONFIG_PPC_ISERIES | 462 | #ifdef CONFIG_PPC_ISERIES |
| 462 | 463 | ||
| 463 | /* | 464 | /* |
| 464 | * This function recalibrates the timebase based on the 49-bit time-of-day | 465 | * This function recalibrates the timebase based on the 49-bit time-of-day |
| 465 | * value in the Titan chip. The Titan is much more accurate than the value | 466 | * value in the Titan chip. The Titan is much more accurate than the value |
| 466 | * returned by the service processor for the timebase frequency. | 467 | * returned by the service processor for the timebase frequency. |
| 467 | */ | 468 | */ |
| 468 | 469 | ||
| 469 | static int __init iSeries_tb_recal(void) | 470 | static int __init iSeries_tb_recal(void) |
| 470 | { | 471 | { |
| 471 | struct div_result divres; | 472 | struct div_result divres; |
| 472 | unsigned long titan, tb; | 473 | unsigned long titan, tb; |
| 473 | 474 | ||
| 474 | /* Make sure we only run on iSeries */ | 475 | /* Make sure we only run on iSeries */ |
| 475 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) | 476 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) |
| 476 | return -ENODEV; | 477 | return -ENODEV; |
| 477 | 478 | ||
| 478 | tb = get_tb(); | 479 | tb = get_tb(); |
| 479 | titan = HvCallXm_loadTod(); | 480 | titan = HvCallXm_loadTod(); |
| 480 | if ( iSeries_recal_titan ) { | 481 | if ( iSeries_recal_titan ) { |
| 481 | unsigned long tb_ticks = tb - iSeries_recal_tb; | 482 | unsigned long tb_ticks = tb - iSeries_recal_tb; |
| 482 | unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12; | 483 | unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12; |
| 483 | unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec; | 484 | unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec; |
| 484 | unsigned long new_tb_ticks_per_jiffy = | 485 | unsigned long new_tb_ticks_per_jiffy = |
| 485 | DIV_ROUND_CLOSEST(new_tb_ticks_per_sec, HZ); | 486 | DIV_ROUND_CLOSEST(new_tb_ticks_per_sec, HZ); |
| 486 | long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy; | 487 | long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy; |
| 487 | char sign = '+'; | 488 | char sign = '+'; |
| 488 | /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */ | 489 | /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */ |
| 489 | new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ; | 490 | new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ; |
| 490 | 491 | ||
| 491 | if ( tick_diff < 0 ) { | 492 | if ( tick_diff < 0 ) { |
| 492 | tick_diff = -tick_diff; | 493 | tick_diff = -tick_diff; |
| 493 | sign = '-'; | 494 | sign = '-'; |
| 494 | } | 495 | } |
| 495 | if ( tick_diff ) { | 496 | if ( tick_diff ) { |
| 496 | if ( tick_diff < tb_ticks_per_jiffy/25 ) { | 497 | if ( tick_diff < tb_ticks_per_jiffy/25 ) { |
| 497 | printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n", | 498 | printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n", |
| 498 | new_tb_ticks_per_jiffy, sign, tick_diff ); | 499 | new_tb_ticks_per_jiffy, sign, tick_diff ); |
| 499 | tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; | 500 | tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; |
| 500 | tb_ticks_per_sec = new_tb_ticks_per_sec; | 501 | tb_ticks_per_sec = new_tb_ticks_per_sec; |
| 501 | calc_cputime_factors(); | 502 | calc_cputime_factors(); |
| 502 | div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); | 503 | div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); |
| 503 | tb_to_xs = divres.result_low; | 504 | tb_to_xs = divres.result_low; |
| 504 | vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; | 505 | vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; |
| 505 | vdso_data->tb_to_xs = tb_to_xs; | 506 | vdso_data->tb_to_xs = tb_to_xs; |
| 506 | setup_cputime_one_jiffy(); | 507 | setup_cputime_one_jiffy(); |
| 507 | } | 508 | } |
| 508 | else { | 509 | else { |
| 509 | printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" | 510 | printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" |
| 510 | " new tb_ticks_per_jiffy = %lu\n" | 511 | " new tb_ticks_per_jiffy = %lu\n" |
| 511 | " old tb_ticks_per_jiffy = %lu\n", | 512 | " old tb_ticks_per_jiffy = %lu\n", |
| 512 | new_tb_ticks_per_jiffy, tb_ticks_per_jiffy ); | 513 | new_tb_ticks_per_jiffy, tb_ticks_per_jiffy ); |
| 513 | } | 514 | } |
| 514 | } | 515 | } |
| 515 | } | 516 | } |
| 516 | iSeries_recal_titan = titan; | 517 | iSeries_recal_titan = titan; |
| 517 | iSeries_recal_tb = tb; | 518 | iSeries_recal_tb = tb; |
| 518 | 519 | ||
| 519 | /* Called here as now we know accurate values for the timebase */ | 520 | /* Called here as now we know accurate values for the timebase */ |
| 520 | clocksource_init(); | 521 | clocksource_init(); |
| 521 | return 0; | 522 | return 0; |
| 522 | } | 523 | } |
| 523 | late_initcall(iSeries_tb_recal); | 524 | late_initcall(iSeries_tb_recal); |
| 524 | 525 | ||
| 525 | /* Called from platform early init */ | 526 | /* Called from platform early init */ |
| 526 | void __init iSeries_time_init_early(void) | 527 | void __init iSeries_time_init_early(void) |
| 527 | { | 528 | { |
| 528 | iSeries_recal_tb = get_tb(); | 529 | iSeries_recal_tb = get_tb(); |
| 529 | iSeries_recal_titan = HvCallXm_loadTod(); | 530 | iSeries_recal_titan = HvCallXm_loadTod(); |
| 530 | } | 531 | } |
| 531 | #endif /* CONFIG_PPC_ISERIES */ | 532 | #endif /* CONFIG_PPC_ISERIES */ |
| 532 | 533 | ||
| 533 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32) | 534 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32) |
| 534 | DEFINE_PER_CPU(u8, perf_event_pending); | 535 | DEFINE_PER_CPU(u8, perf_event_pending); |
| 535 | 536 | ||
| 536 | void set_perf_event_pending(void) | 537 | void set_perf_event_pending(void) |
| 537 | { | 538 | { |
| 538 | get_cpu_var(perf_event_pending) = 1; | 539 | get_cpu_var(perf_event_pending) = 1; |
| 539 | set_dec(1); | 540 | set_dec(1); |
| 540 | put_cpu_var(perf_event_pending); | 541 | put_cpu_var(perf_event_pending); |
| 541 | } | 542 | } |
| 542 | 543 | ||
| 543 | #define test_perf_event_pending() __get_cpu_var(perf_event_pending) | 544 | #define test_perf_event_pending() __get_cpu_var(perf_event_pending) |
| 544 | #define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 | 545 | #define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 |
| 545 | 546 | ||
| 546 | #else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ | 547 | #else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ |
| 547 | 548 | ||
| 548 | #define test_perf_event_pending() 0 | 549 | #define test_perf_event_pending() 0 |
| 549 | #define clear_perf_event_pending() | 550 | #define clear_perf_event_pending() |
| 550 | 551 | ||
| 551 | #endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ | 552 | #endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ |
| 552 | 553 | ||
| 553 | /* | 554 | /* |
| 554 | * For iSeries shared processors, we have to let the hypervisor | 555 | * For iSeries shared processors, we have to let the hypervisor |
| 555 | * set the hardware decrementer. We set a virtual decrementer | 556 | * set the hardware decrementer. We set a virtual decrementer |
| 556 | * in the lppaca and call the hypervisor if the virtual | 557 | * in the lppaca and call the hypervisor if the virtual |
| 557 | * decrementer is less than the current value in the hardware | 558 | * decrementer is less than the current value in the hardware |
| 558 | * decrementer. (almost always the new decrementer value will | 559 | * decrementer. (almost always the new decrementer value will |
| 559 | * be greater than the current hardware decementer so the hypervisor | 560 | * be greater than the current hardware decementer so the hypervisor |
| 560 | * call will not be needed) | 561 | * call will not be needed) |
| 561 | */ | 562 | */ |
| 562 | 563 | ||
| 563 | /* | 564 | /* |
| 564 | * timer_interrupt - gets called when the decrementer overflows, | 565 | * timer_interrupt - gets called when the decrementer overflows, |
| 565 | * with interrupts disabled. | 566 | * with interrupts disabled. |
| 566 | */ | 567 | */ |
| 567 | void timer_interrupt(struct pt_regs * regs) | 568 | void timer_interrupt(struct pt_regs * regs) |
| 568 | { | 569 | { |
| 569 | struct pt_regs *old_regs; | 570 | struct pt_regs *old_regs; |
| 570 | struct decrementer_clock *decrementer = &__get_cpu_var(decrementers); | 571 | struct decrementer_clock *decrementer = &__get_cpu_var(decrementers); |
| 571 | struct clock_event_device *evt = &decrementer->event; | 572 | struct clock_event_device *evt = &decrementer->event; |
| 572 | u64 now; | 573 | u64 now; |
| 573 | 574 | ||
| 575 | trace_timer_interrupt_entry(regs); | ||
| 576 | |||
| 574 | /* Ensure a positive value is written to the decrementer, or else | 577 | /* Ensure a positive value is written to the decrementer, or else |
| 575 | * some CPUs will continuue to take decrementer exceptions */ | 578 | * some CPUs will continuue to take decrementer exceptions */ |
| 576 | set_dec(DECREMENTER_MAX); | 579 | set_dec(DECREMENTER_MAX); |
| 577 | 580 | ||
| 578 | #ifdef CONFIG_PPC32 | 581 | #ifdef CONFIG_PPC32 |
| 579 | if (test_perf_event_pending()) { | 582 | if (test_perf_event_pending()) { |
| 580 | clear_perf_event_pending(); | 583 | clear_perf_event_pending(); |
| 581 | perf_event_do_pending(); | 584 | perf_event_do_pending(); |
| 582 | } | 585 | } |
| 583 | if (atomic_read(&ppc_n_lost_interrupts) != 0) | 586 | if (atomic_read(&ppc_n_lost_interrupts) != 0) |
| 584 | do_IRQ(regs); | 587 | do_IRQ(regs); |
| 585 | #endif | 588 | #endif |
| 586 | 589 | ||
| 587 | now = get_tb_or_rtc(); | 590 | now = get_tb_or_rtc(); |
| 588 | if (now < decrementer->next_tb) { | 591 | if (now < decrementer->next_tb) { |
| 589 | /* not time for this event yet */ | 592 | /* not time for this event yet */ |
| 590 | now = decrementer->next_tb - now; | 593 | now = decrementer->next_tb - now; |
| 591 | if (now <= DECREMENTER_MAX) | 594 | if (now <= DECREMENTER_MAX) |
| 592 | set_dec((int)now); | 595 | set_dec((int)now); |
| 596 | trace_timer_interrupt_exit(regs); | ||
| 593 | return; | 597 | return; |
| 594 | } | 598 | } |
| 595 | old_regs = set_irq_regs(regs); | 599 | old_regs = set_irq_regs(regs); |
| 596 | irq_enter(); | 600 | irq_enter(); |
| 597 | 601 | ||
| 598 | calculate_steal_time(); | 602 | calculate_steal_time(); |
| 599 | 603 | ||
| 600 | #ifdef CONFIG_PPC_ISERIES | 604 | #ifdef CONFIG_PPC_ISERIES |
| 601 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | 605 | if (firmware_has_feature(FW_FEATURE_ISERIES)) |
| 602 | get_lppaca()->int_dword.fields.decr_int = 0; | 606 | get_lppaca()->int_dword.fields.decr_int = 0; |
| 603 | #endif | 607 | #endif |
| 604 | 608 | ||
| 605 | if (evt->event_handler) | 609 | if (evt->event_handler) |
| 606 | evt->event_handler(evt); | 610 | evt->event_handler(evt); |
| 607 | 611 | ||
| 608 | #ifdef CONFIG_PPC_ISERIES | 612 | #ifdef CONFIG_PPC_ISERIES |
| 609 | if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) | 613 | if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) |
| 610 | process_hvlpevents(); | 614 | process_hvlpevents(); |
| 611 | #endif | 615 | #endif |
| 612 | 616 | ||
| 613 | #ifdef CONFIG_PPC64 | 617 | #ifdef CONFIG_PPC64 |
| 614 | /* collect purr register values often, for accurate calculations */ | 618 | /* collect purr register values often, for accurate calculations */ |
| 615 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { | 619 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { |
| 616 | struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); | 620 | struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); |
| 617 | cu->current_tb = mfspr(SPRN_PURR); | 621 | cu->current_tb = mfspr(SPRN_PURR); |
| 618 | } | 622 | } |
| 619 | #endif | 623 | #endif |
| 620 | 624 | ||
| 621 | irq_exit(); | 625 | irq_exit(); |
| 622 | set_irq_regs(old_regs); | 626 | set_irq_regs(old_regs); |
| 627 | |||
| 628 | trace_timer_interrupt_exit(regs); | ||
| 623 | } | 629 | } |
| 624 | 630 | ||
| 625 | void wakeup_decrementer(void) | 631 | void wakeup_decrementer(void) |
| 626 | { | 632 | { |
| 627 | unsigned long ticks; | 633 | unsigned long ticks; |
| 628 | 634 | ||
| 629 | /* | 635 | /* |
| 630 | * The timebase gets saved on sleep and restored on wakeup, | 636 | * The timebase gets saved on sleep and restored on wakeup, |
| 631 | * so all we need to do is to reset the decrementer. | 637 | * so all we need to do is to reset the decrementer. |
| 632 | */ | 638 | */ |
| 633 | ticks = tb_ticks_since(__get_cpu_var(last_jiffy)); | 639 | ticks = tb_ticks_since(__get_cpu_var(last_jiffy)); |
| 634 | if (ticks < tb_ticks_per_jiffy) | 640 | if (ticks < tb_ticks_per_jiffy) |
| 635 | ticks = tb_ticks_per_jiffy - ticks; | 641 | ticks = tb_ticks_per_jiffy - ticks; |
| 636 | else | 642 | else |
| 637 | ticks = 1; | 643 | ticks = 1; |
| 638 | set_dec(ticks); | 644 | set_dec(ticks); |
| 639 | } | 645 | } |
| 640 | 646 | ||
| 641 | #ifdef CONFIG_SUSPEND | 647 | #ifdef CONFIG_SUSPEND |
| 642 | void generic_suspend_disable_irqs(void) | 648 | void generic_suspend_disable_irqs(void) |
| 643 | { | 649 | { |
| 644 | preempt_disable(); | 650 | preempt_disable(); |
| 645 | 651 | ||
| 646 | /* Disable the decrementer, so that it doesn't interfere | 652 | /* Disable the decrementer, so that it doesn't interfere |
| 647 | * with suspending. | 653 | * with suspending. |
| 648 | */ | 654 | */ |
| 649 | 655 | ||
| 650 | set_dec(0x7fffffff); | 656 | set_dec(0x7fffffff); |
| 651 | local_irq_disable(); | 657 | local_irq_disable(); |
| 652 | set_dec(0x7fffffff); | 658 | set_dec(0x7fffffff); |
| 653 | } | 659 | } |
| 654 | 660 | ||
| 655 | void generic_suspend_enable_irqs(void) | 661 | void generic_suspend_enable_irqs(void) |
| 656 | { | 662 | { |
| 657 | wakeup_decrementer(); | 663 | wakeup_decrementer(); |
| 658 | 664 | ||
| 659 | local_irq_enable(); | 665 | local_irq_enable(); |
| 660 | preempt_enable(); | 666 | preempt_enable(); |
| 661 | } | 667 | } |
| 662 | 668 | ||
| 663 | /* Overrides the weak version in kernel/power/main.c */ | 669 | /* Overrides the weak version in kernel/power/main.c */ |
| 664 | void arch_suspend_disable_irqs(void) | 670 | void arch_suspend_disable_irqs(void) |
| 665 | { | 671 | { |
| 666 | if (ppc_md.suspend_disable_irqs) | 672 | if (ppc_md.suspend_disable_irqs) |
| 667 | ppc_md.suspend_disable_irqs(); | 673 | ppc_md.suspend_disable_irqs(); |
| 668 | generic_suspend_disable_irqs(); | 674 | generic_suspend_disable_irqs(); |
| 669 | } | 675 | } |
| 670 | 676 | ||
| 671 | /* Overrides the weak version in kernel/power/main.c */ | 677 | /* Overrides the weak version in kernel/power/main.c */ |
| 672 | void arch_suspend_enable_irqs(void) | 678 | void arch_suspend_enable_irqs(void) |
| 673 | { | 679 | { |
| 674 | generic_suspend_enable_irqs(); | 680 | generic_suspend_enable_irqs(); |
| 675 | if (ppc_md.suspend_enable_irqs) | 681 | if (ppc_md.suspend_enable_irqs) |
| 676 | ppc_md.suspend_enable_irqs(); | 682 | ppc_md.suspend_enable_irqs(); |
| 677 | } | 683 | } |
| 678 | #endif | 684 | #endif |
| 679 | 685 | ||
| 680 | #ifdef CONFIG_SMP | 686 | #ifdef CONFIG_SMP |
| 681 | void __init smp_space_timers(unsigned int max_cpus) | 687 | void __init smp_space_timers(unsigned int max_cpus) |
| 682 | { | 688 | { |
| 683 | int i; | 689 | int i; |
| 684 | u64 previous_tb = per_cpu(last_jiffy, boot_cpuid); | 690 | u64 previous_tb = per_cpu(last_jiffy, boot_cpuid); |
| 685 | 691 | ||
| 686 | /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */ | 692 | /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */ |
| 687 | previous_tb -= tb_ticks_per_jiffy; | 693 | previous_tb -= tb_ticks_per_jiffy; |
| 688 | 694 | ||
| 689 | for_each_possible_cpu(i) { | 695 | for_each_possible_cpu(i) { |
| 690 | if (i == boot_cpuid) | 696 | if (i == boot_cpuid) |
| 691 | continue; | 697 | continue; |
| 692 | per_cpu(last_jiffy, i) = previous_tb; | 698 | per_cpu(last_jiffy, i) = previous_tb; |
| 693 | } | 699 | } |
| 694 | } | 700 | } |
| 695 | #endif | 701 | #endif |
| 696 | 702 | ||
| 697 | /* | 703 | /* |
| 698 | * Scheduler clock - returns current time in nanosec units. | 704 | * Scheduler clock - returns current time in nanosec units. |
| 699 | * | 705 | * |
| 700 | * Note: mulhdu(a, b) (multiply high double unsigned) returns | 706 | * Note: mulhdu(a, b) (multiply high double unsigned) returns |
| 701 | * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b | 707 | * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b |
| 702 | * are 64-bit unsigned numbers. | 708 | * are 64-bit unsigned numbers. |
| 703 | */ | 709 | */ |
| 704 | unsigned long long sched_clock(void) | 710 | unsigned long long sched_clock(void) |
| 705 | { | 711 | { |
| 706 | if (__USE_RTC()) | 712 | if (__USE_RTC()) |
| 707 | return get_rtc(); | 713 | return get_rtc(); |
| 708 | return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; | 714 | return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; |
| 709 | } | 715 | } |
| 710 | 716 | ||
| 711 | static int __init get_freq(char *name, int cells, unsigned long *val) | 717 | static int __init get_freq(char *name, int cells, unsigned long *val) |
| 712 | { | 718 | { |
| 713 | struct device_node *cpu; | 719 | struct device_node *cpu; |
| 714 | const unsigned int *fp; | 720 | const unsigned int *fp; |
| 715 | int found = 0; | 721 | int found = 0; |
| 716 | 722 | ||
| 717 | /* The cpu node should have timebase and clock frequency properties */ | 723 | /* The cpu node should have timebase and clock frequency properties */ |
| 718 | cpu = of_find_node_by_type(NULL, "cpu"); | 724 | cpu = of_find_node_by_type(NULL, "cpu"); |
| 719 | 725 | ||
| 720 | if (cpu) { | 726 | if (cpu) { |
| 721 | fp = of_get_property(cpu, name, NULL); | 727 | fp = of_get_property(cpu, name, NULL); |
| 722 | if (fp) { | 728 | if (fp) { |
| 723 | found = 1; | 729 | found = 1; |
| 724 | *val = of_read_ulong(fp, cells); | 730 | *val = of_read_ulong(fp, cells); |
| 725 | } | 731 | } |
| 726 | 732 | ||
| 727 | of_node_put(cpu); | 733 | of_node_put(cpu); |
| 728 | } | 734 | } |
| 729 | 735 | ||
| 730 | return found; | 736 | return found; |
| 731 | } | 737 | } |
| 732 | 738 | ||
| 733 | /* should become __cpuinit when secondary_cpu_time_init also is */ | 739 | /* should become __cpuinit when secondary_cpu_time_init also is */ |
| 734 | void start_cpu_decrementer(void) | 740 | void start_cpu_decrementer(void) |
| 735 | { | 741 | { |
| 736 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) | 742 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) |
| 737 | /* Clear any pending timer interrupts */ | 743 | /* Clear any pending timer interrupts */ |
| 738 | mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); | 744 | mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); |
| 739 | 745 | ||
| 740 | /* Enable decrementer interrupt */ | 746 | /* Enable decrementer interrupt */ |
| 741 | mtspr(SPRN_TCR, TCR_DIE); | 747 | mtspr(SPRN_TCR, TCR_DIE); |
| 742 | #endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */ | 748 | #endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */ |
| 743 | } | 749 | } |
| 744 | 750 | ||
| 745 | void __init generic_calibrate_decr(void) | 751 | void __init generic_calibrate_decr(void) |
| 746 | { | 752 | { |
| 747 | ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */ | 753 | ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */ |
| 748 | 754 | ||
| 749 | if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) && | 755 | if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) && |
| 750 | !get_freq("timebase-frequency", 1, &ppc_tb_freq)) { | 756 | !get_freq("timebase-frequency", 1, &ppc_tb_freq)) { |
| 751 | 757 | ||
| 752 | printk(KERN_ERR "WARNING: Estimating decrementer frequency " | 758 | printk(KERN_ERR "WARNING: Estimating decrementer frequency " |
| 753 | "(not found)\n"); | 759 | "(not found)\n"); |
| 754 | } | 760 | } |
| 755 | 761 | ||
| 756 | ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */ | 762 | ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */ |
| 757 | 763 | ||
| 758 | if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) && | 764 | if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) && |
| 759 | !get_freq("clock-frequency", 1, &ppc_proc_freq)) { | 765 | !get_freq("clock-frequency", 1, &ppc_proc_freq)) { |
| 760 | 766 | ||
| 761 | printk(KERN_ERR "WARNING: Estimating processor frequency " | 767 | printk(KERN_ERR "WARNING: Estimating processor frequency " |
| 762 | "(not found)\n"); | 768 | "(not found)\n"); |
| 763 | } | 769 | } |
| 764 | } | 770 | } |
| 765 | 771 | ||
| 766 | int update_persistent_clock(struct timespec now) | 772 | int update_persistent_clock(struct timespec now) |
| 767 | { | 773 | { |
| 768 | struct rtc_time tm; | 774 | struct rtc_time tm; |
| 769 | 775 | ||
| 770 | if (!ppc_md.set_rtc_time) | 776 | if (!ppc_md.set_rtc_time) |
| 771 | return 0; | 777 | return 0; |
| 772 | 778 | ||
| 773 | to_tm(now.tv_sec + 1 + timezone_offset, &tm); | 779 | to_tm(now.tv_sec + 1 + timezone_offset, &tm); |
| 774 | tm.tm_year -= 1900; | 780 | tm.tm_year -= 1900; |
| 775 | tm.tm_mon -= 1; | 781 | tm.tm_mon -= 1; |
| 776 | 782 | ||
| 777 | return ppc_md.set_rtc_time(&tm); | 783 | return ppc_md.set_rtc_time(&tm); |
| 778 | } | 784 | } |
| 779 | 785 | ||
| 780 | static void __read_persistent_clock(struct timespec *ts) | 786 | static void __read_persistent_clock(struct timespec *ts) |
| 781 | { | 787 | { |
| 782 | struct rtc_time tm; | 788 | struct rtc_time tm; |
| 783 | static int first = 1; | 789 | static int first = 1; |
| 784 | 790 | ||
| 785 | ts->tv_nsec = 0; | 791 | ts->tv_nsec = 0; |
| 786 | /* XXX this is a litle fragile but will work okay in the short term */ | 792 | /* XXX this is a litle fragile but will work okay in the short term */ |
| 787 | if (first) { | 793 | if (first) { |
| 788 | first = 0; | 794 | first = 0; |
| 789 | if (ppc_md.time_init) | 795 | if (ppc_md.time_init) |
| 790 | timezone_offset = ppc_md.time_init(); | 796 | timezone_offset = ppc_md.time_init(); |
| 791 | 797 | ||
| 792 | /* get_boot_time() isn't guaranteed to be safe to call late */ | 798 | /* get_boot_time() isn't guaranteed to be safe to call late */ |
| 793 | if (ppc_md.get_boot_time) { | 799 | if (ppc_md.get_boot_time) { |
| 794 | ts->tv_sec = ppc_md.get_boot_time() - timezone_offset; | 800 | ts->tv_sec = ppc_md.get_boot_time() - timezone_offset; |
| 795 | return; | 801 | return; |
| 796 | } | 802 | } |
| 797 | } | 803 | } |
| 798 | if (!ppc_md.get_rtc_time) { | 804 | if (!ppc_md.get_rtc_time) { |
| 799 | ts->tv_sec = 0; | 805 | ts->tv_sec = 0; |
| 800 | return; | 806 | return; |
| 801 | } | 807 | } |
| 802 | ppc_md.get_rtc_time(&tm); | 808 | ppc_md.get_rtc_time(&tm); |
| 803 | 809 | ||
| 804 | ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, | 810 | ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, |
| 805 | tm.tm_hour, tm.tm_min, tm.tm_sec); | 811 | tm.tm_hour, tm.tm_min, tm.tm_sec); |
| 806 | } | 812 | } |
| 807 | 813 | ||
| 808 | void read_persistent_clock(struct timespec *ts) | 814 | void read_persistent_clock(struct timespec *ts) |
| 809 | { | 815 | { |
| 810 | __read_persistent_clock(ts); | 816 | __read_persistent_clock(ts); |
| 811 | 817 | ||
| 812 | /* Sanitize it in case real time clock is set below EPOCH */ | 818 | /* Sanitize it in case real time clock is set below EPOCH */ |
| 813 | if (ts->tv_sec < 0) { | 819 | if (ts->tv_sec < 0) { |
| 814 | ts->tv_sec = 0; | 820 | ts->tv_sec = 0; |
| 815 | ts->tv_nsec = 0; | 821 | ts->tv_nsec = 0; |
| 816 | } | 822 | } |
| 817 | 823 | ||
| 818 | } | 824 | } |
| 819 | 825 | ||
| 820 | /* clocksource code */ | 826 | /* clocksource code */ |
| 821 | static cycle_t rtc_read(struct clocksource *cs) | 827 | static cycle_t rtc_read(struct clocksource *cs) |
| 822 | { | 828 | { |
| 823 | return (cycle_t)get_rtc(); | 829 | return (cycle_t)get_rtc(); |
| 824 | } | 830 | } |
| 825 | 831 | ||
| 826 | static cycle_t timebase_read(struct clocksource *cs) | 832 | static cycle_t timebase_read(struct clocksource *cs) |
| 827 | { | 833 | { |
| 828 | return (cycle_t)get_tb(); | 834 | return (cycle_t)get_tb(); |
| 829 | } | 835 | } |
| 830 | 836 | ||
| 831 | void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) | 837 | void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) |
| 832 | { | 838 | { |
| 833 | u64 t2x, stamp_xsec; | 839 | u64 t2x, stamp_xsec; |
| 834 | 840 | ||
| 835 | if (clock != &clocksource_timebase) | 841 | if (clock != &clocksource_timebase) |
| 836 | return; | 842 | return; |
| 837 | 843 | ||
| 838 | /* Make userspace gettimeofday spin until we're done. */ | 844 | /* Make userspace gettimeofday spin until we're done. */ |
| 839 | ++vdso_data->tb_update_count; | 845 | ++vdso_data->tb_update_count; |
| 840 | smp_mb(); | 846 | smp_mb(); |
| 841 | 847 | ||
| 842 | /* XXX this assumes clock->shift == 22 */ | 848 | /* XXX this assumes clock->shift == 22 */ |
| 843 | /* 4611686018 ~= 2^(20+64-22) / 1e9 */ | 849 | /* 4611686018 ~= 2^(20+64-22) / 1e9 */ |
| 844 | t2x = (u64) clock->mult * 4611686018ULL; | 850 | t2x = (u64) clock->mult * 4611686018ULL; |
| 845 | stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; | 851 | stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; |
| 846 | do_div(stamp_xsec, 1000000000); | 852 | do_div(stamp_xsec, 1000000000); |
| 847 | stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; | 853 | stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; |
| 848 | update_gtod(clock->cycle_last, stamp_xsec, t2x); | 854 | update_gtod(clock->cycle_last, stamp_xsec, t2x); |
| 849 | } | 855 | } |
| 850 | 856 | ||
| 851 | void update_vsyscall_tz(void) | 857 | void update_vsyscall_tz(void) |
| 852 | { | 858 | { |
| 853 | /* Make userspace gettimeofday spin until we're done. */ | 859 | /* Make userspace gettimeofday spin until we're done. */ |
| 854 | ++vdso_data->tb_update_count; | 860 | ++vdso_data->tb_update_count; |
| 855 | smp_mb(); | 861 | smp_mb(); |
| 856 | vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; | 862 | vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; |
| 857 | vdso_data->tz_dsttime = sys_tz.tz_dsttime; | 863 | vdso_data->tz_dsttime = sys_tz.tz_dsttime; |
| 858 | smp_mb(); | 864 | smp_mb(); |
| 859 | ++vdso_data->tb_update_count; | 865 | ++vdso_data->tb_update_count; |
| 860 | } | 866 | } |
| 861 | 867 | ||
| 862 | static void __init clocksource_init(void) | 868 | static void __init clocksource_init(void) |
| 863 | { | 869 | { |
| 864 | struct clocksource *clock; | 870 | struct clocksource *clock; |
| 865 | 871 | ||
| 866 | if (__USE_RTC()) | 872 | if (__USE_RTC()) |
| 867 | clock = &clocksource_rtc; | 873 | clock = &clocksource_rtc; |
| 868 | else | 874 | else |
| 869 | clock = &clocksource_timebase; | 875 | clock = &clocksource_timebase; |
| 870 | 876 | ||
| 871 | clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift); | 877 | clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift); |
| 872 | 878 | ||
| 873 | if (clocksource_register(clock)) { | 879 | if (clocksource_register(clock)) { |
| 874 | printk(KERN_ERR "clocksource: %s is already registered\n", | 880 | printk(KERN_ERR "clocksource: %s is already registered\n", |
| 875 | clock->name); | 881 | clock->name); |
| 876 | return; | 882 | return; |
| 877 | } | 883 | } |
| 878 | 884 | ||
| 879 | printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n", | 885 | printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n", |
| 880 | clock->name, clock->mult, clock->shift); | 886 | clock->name, clock->mult, clock->shift); |
| 881 | } | 887 | } |
| 882 | 888 | ||
| 883 | static int decrementer_set_next_event(unsigned long evt, | 889 | static int decrementer_set_next_event(unsigned long evt, |
| 884 | struct clock_event_device *dev) | 890 | struct clock_event_device *dev) |
| 885 | { | 891 | { |
| 886 | __get_cpu_var(decrementers).next_tb = get_tb_or_rtc() + evt; | 892 | __get_cpu_var(decrementers).next_tb = get_tb_or_rtc() + evt; |
| 887 | set_dec(evt); | 893 | set_dec(evt); |
| 888 | return 0; | 894 | return 0; |
| 889 | } | 895 | } |
| 890 | 896 | ||
| 891 | static void decrementer_set_mode(enum clock_event_mode mode, | 897 | static void decrementer_set_mode(enum clock_event_mode mode, |
| 892 | struct clock_event_device *dev) | 898 | struct clock_event_device *dev) |
| 893 | { | 899 | { |
| 894 | if (mode != CLOCK_EVT_MODE_ONESHOT) | 900 | if (mode != CLOCK_EVT_MODE_ONESHOT) |
| 895 | decrementer_set_next_event(DECREMENTER_MAX, dev); | 901 | decrementer_set_next_event(DECREMENTER_MAX, dev); |
| 896 | } | 902 | } |
| 897 | 903 | ||
| 898 | static void __init setup_clockevent_multiplier(unsigned long hz) | 904 | static void __init setup_clockevent_multiplier(unsigned long hz) |
| 899 | { | 905 | { |
| 900 | u64 mult, shift = 32; | 906 | u64 mult, shift = 32; |
| 901 | 907 | ||
| 902 | while (1) { | 908 | while (1) { |
| 903 | mult = div_sc(hz, NSEC_PER_SEC, shift); | 909 | mult = div_sc(hz, NSEC_PER_SEC, shift); |
| 904 | if (mult && (mult >> 32UL) == 0UL) | 910 | if (mult && (mult >> 32UL) == 0UL) |
| 905 | break; | 911 | break; |
| 906 | 912 | ||
| 907 | shift--; | 913 | shift--; |
| 908 | } | 914 | } |
| 909 | 915 | ||
| 910 | decrementer_clockevent.shift = shift; | 916 | decrementer_clockevent.shift = shift; |
| 911 | decrementer_clockevent.mult = mult; | 917 | decrementer_clockevent.mult = mult; |
| 912 | } | 918 | } |
| 913 | 919 | ||
| 914 | static void register_decrementer_clockevent(int cpu) | 920 | static void register_decrementer_clockevent(int cpu) |
| 915 | { | 921 | { |
| 916 | struct clock_event_device *dec = &per_cpu(decrementers, cpu).event; | 922 | struct clock_event_device *dec = &per_cpu(decrementers, cpu).event; |
| 917 | 923 | ||
| 918 | *dec = decrementer_clockevent; | 924 | *dec = decrementer_clockevent; |
| 919 | dec->cpumask = cpumask_of(cpu); | 925 | dec->cpumask = cpumask_of(cpu); |
| 920 | 926 | ||
| 921 | printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n", | 927 | printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n", |
| 922 | dec->name, dec->mult, dec->shift, cpu); | 928 | dec->name, dec->mult, dec->shift, cpu); |
| 923 | 929 | ||
| 924 | clockevents_register_device(dec); | 930 | clockevents_register_device(dec); |
| 925 | } | 931 | } |
| 926 | 932 | ||
| 927 | static void __init init_decrementer_clockevent(void) | 933 | static void __init init_decrementer_clockevent(void) |
| 928 | { | 934 | { |
| 929 | int cpu = smp_processor_id(); | 935 | int cpu = smp_processor_id(); |
| 930 | 936 | ||
| 931 | setup_clockevent_multiplier(ppc_tb_freq); | 937 | setup_clockevent_multiplier(ppc_tb_freq); |
| 932 | decrementer_clockevent.max_delta_ns = | 938 | decrementer_clockevent.max_delta_ns = |
| 933 | clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent); | 939 | clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent); |
| 934 | decrementer_clockevent.min_delta_ns = | 940 | decrementer_clockevent.min_delta_ns = |
| 935 | clockevent_delta2ns(2, &decrementer_clockevent); | 941 | clockevent_delta2ns(2, &decrementer_clockevent); |
| 936 | 942 | ||
| 937 | register_decrementer_clockevent(cpu); | 943 | register_decrementer_clockevent(cpu); |
| 938 | } | 944 | } |
| 939 | 945 | ||
| 940 | void secondary_cpu_time_init(void) | 946 | void secondary_cpu_time_init(void) |
| 941 | { | 947 | { |
| 942 | /* Start the decrementer on CPUs that have manual control | 948 | /* Start the decrementer on CPUs that have manual control |
| 943 | * such as BookE | 949 | * such as BookE |
| 944 | */ | 950 | */ |
| 945 | start_cpu_decrementer(); | 951 | start_cpu_decrementer(); |
| 946 | 952 | ||
| 947 | /* FIME: Should make unrelatred change to move snapshot_timebase | 953 | /* FIME: Should make unrelatred change to move snapshot_timebase |
| 948 | * call here ! */ | 954 | * call here ! */ |
| 949 | register_decrementer_clockevent(smp_processor_id()); | 955 | register_decrementer_clockevent(smp_processor_id()); |
| 950 | } | 956 | } |
| 951 | 957 | ||
| 952 | /* This function is only called on the boot processor */ | 958 | /* This function is only called on the boot processor */ |
| 953 | void __init time_init(void) | 959 | void __init time_init(void) |
| 954 | { | 960 | { |
| 955 | unsigned long flags; | 961 | unsigned long flags; |
| 956 | struct div_result res; | 962 | struct div_result res; |
| 957 | u64 scale, x; | 963 | u64 scale, x; |
| 958 | unsigned shift; | 964 | unsigned shift; |
| 959 | 965 | ||
| 960 | if (__USE_RTC()) { | 966 | if (__USE_RTC()) { |
| 961 | /* 601 processor: dec counts down by 128 every 128ns */ | 967 | /* 601 processor: dec counts down by 128 every 128ns */ |
| 962 | ppc_tb_freq = 1000000000; | 968 | ppc_tb_freq = 1000000000; |
| 963 | tb_last_jiffy = get_rtcl(); | 969 | tb_last_jiffy = get_rtcl(); |
| 964 | } else { | 970 | } else { |
| 965 | /* Normal PowerPC with timebase register */ | 971 | /* Normal PowerPC with timebase register */ |
| 966 | ppc_md.calibrate_decr(); | 972 | ppc_md.calibrate_decr(); |
| 967 | printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n", | 973 | printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n", |
| 968 | ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); | 974 | ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); |
| 969 | printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", | 975 | printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", |
| 970 | ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); | 976 | ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); |
| 971 | tb_last_jiffy = get_tb(); | 977 | tb_last_jiffy = get_tb(); |
| 972 | } | 978 | } |
| 973 | 979 | ||
| 974 | tb_ticks_per_jiffy = ppc_tb_freq / HZ; | 980 | tb_ticks_per_jiffy = ppc_tb_freq / HZ; |
| 975 | tb_ticks_per_sec = ppc_tb_freq; | 981 | tb_ticks_per_sec = ppc_tb_freq; |
| 976 | tb_ticks_per_usec = ppc_tb_freq / 1000000; | 982 | tb_ticks_per_usec = ppc_tb_freq / 1000000; |
| 977 | tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); | 983 | tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); |
| 978 | calc_cputime_factors(); | 984 | calc_cputime_factors(); |
| 979 | setup_cputime_one_jiffy(); | 985 | setup_cputime_one_jiffy(); |
| 980 | 986 | ||
| 981 | /* | 987 | /* |
| 982 | * Calculate the length of each tick in ns. It will not be | 988 | * Calculate the length of each tick in ns. It will not be |
| 983 | * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ. | 989 | * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ. |
| 984 | * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq, | 990 | * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq, |
| 985 | * rounded up. | 991 | * rounded up. |
| 986 | */ | 992 | */ |
| 987 | x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1; | 993 | x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1; |
| 988 | do_div(x, ppc_tb_freq); | 994 | do_div(x, ppc_tb_freq); |
| 989 | tick_nsec = x; | 995 | tick_nsec = x; |
| 990 | last_tick_len = x << TICKLEN_SCALE; | 996 | last_tick_len = x << TICKLEN_SCALE; |
| 991 | 997 | ||
| 992 | /* | 998 | /* |
| 993 | * Compute ticklen_to_xs, which is a factor which gets multiplied | 999 | * Compute ticklen_to_xs, which is a factor which gets multiplied |
| 994 | * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value. | 1000 | * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value. |
| 995 | * It is computed as: | 1001 | * It is computed as: |
| 996 | * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9) | 1002 | * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9) |
| 997 | * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT | 1003 | * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT |
| 998 | * which turns out to be N = 51 - SHIFT_HZ. | 1004 | * which turns out to be N = 51 - SHIFT_HZ. |
| 999 | * This gives the result as a 0.64 fixed-point fraction. | 1005 | * This gives the result as a 0.64 fixed-point fraction. |
| 1000 | * That value is reduced by an offset amounting to 1 xsec per | 1006 | * That value is reduced by an offset amounting to 1 xsec per |
| 1001 | * 2^31 timebase ticks to avoid problems with time going backwards | 1007 | * 2^31 timebase ticks to avoid problems with time going backwards |
| 1002 | * by 1 xsec when we do timer_recalc_offset due to losing the | 1008 | * by 1 xsec when we do timer_recalc_offset due to losing the |
| 1003 | * fractional xsec. That offset is equal to ppc_tb_freq/2^51 | 1009 | * fractional xsec. That offset is equal to ppc_tb_freq/2^51 |
| 1004 | * since there are 2^20 xsec in a second. | 1010 | * since there are 2^20 xsec in a second. |
| 1005 | */ | 1011 | */ |
| 1006 | div128_by_32((1ULL << 51) - ppc_tb_freq, 0, | 1012 | div128_by_32((1ULL << 51) - ppc_tb_freq, 0, |
| 1007 | tb_ticks_per_jiffy << SHIFT_HZ, &res); | 1013 | tb_ticks_per_jiffy << SHIFT_HZ, &res); |
| 1008 | div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res); | 1014 | div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res); |
| 1009 | ticklen_to_xs = res.result_low; | 1015 | ticklen_to_xs = res.result_low; |
| 1010 | 1016 | ||
| 1011 | /* Compute tb_to_xs from tick_nsec */ | 1017 | /* Compute tb_to_xs from tick_nsec */ |
| 1012 | tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs); | 1018 | tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs); |
| 1013 | 1019 | ||
| 1014 | /* | 1020 | /* |
| 1015 | * Compute scale factor for sched_clock. | 1021 | * Compute scale factor for sched_clock. |
| 1016 | * The calibrate_decr() function has set tb_ticks_per_sec, | 1022 | * The calibrate_decr() function has set tb_ticks_per_sec, |
| 1017 | * which is the timebase frequency. | 1023 | * which is the timebase frequency. |
| 1018 | * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret | 1024 | * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret |
| 1019 | * the 128-bit result as a 64.64 fixed-point number. | 1025 | * the 128-bit result as a 64.64 fixed-point number. |
| 1020 | * We then shift that number right until it is less than 1.0, | 1026 | * We then shift that number right until it is less than 1.0, |
| 1021 | * giving us the scale factor and shift count to use in | 1027 | * giving us the scale factor and shift count to use in |
| 1022 | * sched_clock(). | 1028 | * sched_clock(). |
| 1023 | */ | 1029 | */ |
| 1024 | div128_by_32(1000000000, 0, tb_ticks_per_sec, &res); | 1030 | div128_by_32(1000000000, 0, tb_ticks_per_sec, &res); |
| 1025 | scale = res.result_low; | 1031 | scale = res.result_low; |
| 1026 | for (shift = 0; res.result_high != 0; ++shift) { | 1032 | for (shift = 0; res.result_high != 0; ++shift) { |
| 1027 | scale = (scale >> 1) | (res.result_high << 63); | 1033 | scale = (scale >> 1) | (res.result_high << 63); |
| 1028 | res.result_high >>= 1; | 1034 | res.result_high >>= 1; |
| 1029 | } | 1035 | } |
| 1030 | tb_to_ns_scale = scale; | 1036 | tb_to_ns_scale = scale; |
| 1031 | tb_to_ns_shift = shift; | 1037 | tb_to_ns_shift = shift; |
| 1032 | /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ | 1038 | /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ |
| 1033 | boot_tb = get_tb_or_rtc(); | 1039 | boot_tb = get_tb_or_rtc(); |
| 1034 | 1040 | ||
| 1035 | write_seqlock_irqsave(&xtime_lock, flags); | 1041 | write_seqlock_irqsave(&xtime_lock, flags); |
| 1036 | 1042 | ||
| 1037 | /* If platform provided a timezone (pmac), we correct the time */ | 1043 | /* If platform provided a timezone (pmac), we correct the time */ |
| 1038 | if (timezone_offset) { | 1044 | if (timezone_offset) { |
| 1039 | sys_tz.tz_minuteswest = -timezone_offset / 60; | 1045 | sys_tz.tz_minuteswest = -timezone_offset / 60; |
| 1040 | sys_tz.tz_dsttime = 0; | 1046 | sys_tz.tz_dsttime = 0; |
| 1041 | } | 1047 | } |
| 1042 | 1048 | ||
| 1043 | vdso_data->tb_orig_stamp = tb_last_jiffy; | 1049 | vdso_data->tb_orig_stamp = tb_last_jiffy; |
| 1044 | vdso_data->tb_update_count = 0; | 1050 | vdso_data->tb_update_count = 0; |
| 1045 | vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; | 1051 | vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; |
| 1046 | vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; | 1052 | vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; |
| 1047 | vdso_data->tb_to_xs = tb_to_xs; | 1053 | vdso_data->tb_to_xs = tb_to_xs; |
| 1048 | 1054 | ||
| 1049 | write_sequnlock_irqrestore(&xtime_lock, flags); | 1055 | write_sequnlock_irqrestore(&xtime_lock, flags); |
| 1050 | 1056 | ||
| 1051 | /* Start the decrementer on CPUs that have manual control | 1057 | /* Start the decrementer on CPUs that have manual control |
| 1052 | * such as BookE | 1058 | * such as BookE |
| 1053 | */ | 1059 | */ |
| 1054 | start_cpu_decrementer(); | 1060 | start_cpu_decrementer(); |
| 1055 | 1061 | ||
| 1056 | /* Register the clocksource, if we're not running on iSeries */ | 1062 | /* Register the clocksource, if we're not running on iSeries */ |
| 1057 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) | 1063 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) |
| 1058 | clocksource_init(); | 1064 | clocksource_init(); |
| 1059 | 1065 | ||
| 1060 | init_decrementer_clockevent(); | 1066 | init_decrementer_clockevent(); |
| 1061 | } | 1067 | } |
| 1062 | 1068 | ||
| 1063 | 1069 | ||
| 1064 | #define FEBRUARY 2 | 1070 | #define FEBRUARY 2 |
| 1065 | #define STARTOFTIME 1970 | 1071 | #define STARTOFTIME 1970 |
| 1066 | #define SECDAY 86400L | 1072 | #define SECDAY 86400L |
| 1067 | #define SECYR (SECDAY * 365) | 1073 | #define SECYR (SECDAY * 365) |
| 1068 | #define leapyear(year) ((year) % 4 == 0 && \ | 1074 | #define leapyear(year) ((year) % 4 == 0 && \ |
| 1069 | ((year) % 100 != 0 || (year) % 400 == 0)) | 1075 | ((year) % 100 != 0 || (year) % 400 == 0)) |
| 1070 | #define days_in_year(a) (leapyear(a) ? 366 : 365) | 1076 | #define days_in_year(a) (leapyear(a) ? 366 : 365) |
| 1071 | #define days_in_month(a) (month_days[(a) - 1]) | 1077 | #define days_in_month(a) (month_days[(a) - 1]) |
| 1072 | 1078 | ||
| 1073 | static int month_days[12] = { | 1079 | static int month_days[12] = { |
| 1074 | 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 | 1080 | 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 |
| 1075 | }; | 1081 | }; |
| 1076 | 1082 | ||
| 1077 | /* | 1083 | /* |
| 1078 | * This only works for the Gregorian calendar - i.e. after 1752 (in the UK) | 1084 | * This only works for the Gregorian calendar - i.e. after 1752 (in the UK) |
| 1079 | */ | 1085 | */ |
| 1080 | void GregorianDay(struct rtc_time * tm) | 1086 | void GregorianDay(struct rtc_time * tm) |
| 1081 | { | 1087 | { |
| 1082 | int leapsToDate; | 1088 | int leapsToDate; |
| 1083 | int lastYear; | 1089 | int lastYear; |
| 1084 | int day; | 1090 | int day; |
| 1085 | int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; | 1091 | int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; |
| 1086 | 1092 | ||
| 1087 | lastYear = tm->tm_year - 1; | 1093 | lastYear = tm->tm_year - 1; |
| 1088 | 1094 | ||
| 1089 | /* | 1095 | /* |
| 1090 | * Number of leap corrections to apply up to end of last year | 1096 | * Number of leap corrections to apply up to end of last year |
| 1091 | */ | 1097 | */ |
| 1092 | leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400; | 1098 | leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400; |
| 1093 | 1099 | ||
| 1094 | /* | 1100 | /* |
| 1095 | * This year is a leap year if it is divisible by 4 except when it is | 1101 | * This year is a leap year if it is divisible by 4 except when it is |
| 1096 | * divisible by 100 unless it is divisible by 400 | 1102 | * divisible by 100 unless it is divisible by 400 |
| 1097 | * | 1103 | * |
| 1098 | * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was | 1104 | * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was |
| 1099 | */ | 1105 | */ |
| 1100 | day = tm->tm_mon > 2 && leapyear(tm->tm_year); | 1106 | day = tm->tm_mon > 2 && leapyear(tm->tm_year); |
| 1101 | 1107 | ||
| 1102 | day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] + | 1108 | day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] + |
| 1103 | tm->tm_mday; | 1109 | tm->tm_mday; |
| 1104 | 1110 | ||
| 1105 | tm->tm_wday = day % 7; | 1111 | tm->tm_wday = day % 7; |
| 1106 | } | 1112 | } |
| 1107 | 1113 | ||
| 1108 | void to_tm(int tim, struct rtc_time * tm) | 1114 | void to_tm(int tim, struct rtc_time * tm) |
| 1109 | { | 1115 | { |
| 1110 | register int i; | 1116 | register int i; |
| 1111 | register long hms, day; | 1117 | register long hms, day; |
| 1112 | 1118 | ||
| 1113 | day = tim / SECDAY; | 1119 | day = tim / SECDAY; |
| 1114 | hms = tim % SECDAY; | 1120 | hms = tim % SECDAY; |
| 1115 | 1121 | ||
| 1116 | /* Hours, minutes, seconds are easy */ | 1122 | /* Hours, minutes, seconds are easy */ |
| 1117 | tm->tm_hour = hms / 3600; | 1123 | tm->tm_hour = hms / 3600; |
| 1118 | tm->tm_min = (hms % 3600) / 60; | 1124 | tm->tm_min = (hms % 3600) / 60; |
| 1119 | tm->tm_sec = (hms % 3600) % 60; | 1125 | tm->tm_sec = (hms % 3600) % 60; |
| 1120 | 1126 | ||
| 1121 | /* Number of years in days */ | 1127 | /* Number of years in days */ |
| 1122 | for (i = STARTOFTIME; day >= days_in_year(i); i++) | 1128 | for (i = STARTOFTIME; day >= days_in_year(i); i++) |
| 1123 | day -= days_in_year(i); | 1129 | day -= days_in_year(i); |
| 1124 | tm->tm_year = i; | 1130 | tm->tm_year = i; |
| 1125 | 1131 | ||
| 1126 | /* Number of months in days left */ | 1132 | /* Number of months in days left */ |
| 1127 | if (leapyear(tm->tm_year)) | 1133 | if (leapyear(tm->tm_year)) |
| 1128 | days_in_month(FEBRUARY) = 29; | 1134 | days_in_month(FEBRUARY) = 29; |
| 1129 | for (i = 1; day >= days_in_month(i); i++) | 1135 | for (i = 1; day >= days_in_month(i); i++) |
| 1130 | day -= days_in_month(i); | 1136 | day -= days_in_month(i); |
| 1131 | days_in_month(FEBRUARY) = 28; | 1137 | days_in_month(FEBRUARY) = 28; |
| 1132 | tm->tm_mon = i; | 1138 | tm->tm_mon = i; |
| 1133 | 1139 | ||
| 1134 | /* Days are what is left over (+1) from all that. */ | 1140 | /* Days are what is left over (+1) from all that. */ |
| 1135 | tm->tm_mday = day + 1; | 1141 | tm->tm_mday = day + 1; |
| 1136 | 1142 | ||
| 1137 | /* | 1143 | /* |
| 1138 | * Determine the day of week | 1144 | * Determine the day of week |
| 1139 | */ | 1145 | */ |
| 1140 | GregorianDay(tm); | 1146 | GregorianDay(tm); |
| 1141 | } | 1147 | } |
| 1142 | 1148 | ||
| 1143 | /* Auxiliary function to compute scaling factors */ | 1149 | /* Auxiliary function to compute scaling factors */ |
| 1144 | /* Actually the choice of a timebase running at 1/4 the of the bus | 1150 | /* Actually the choice of a timebase running at 1/4 the of the bus |
| 1145 | * frequency giving resolution of a few tens of nanoseconds is quite nice. | 1151 | * frequency giving resolution of a few tens of nanoseconds is quite nice. |
| 1146 | * It makes this computation very precise (27-28 bits typically) which | 1152 | * It makes this computation very precise (27-28 bits typically) which |
| 1147 | * is optimistic considering the stability of most processor clock | 1153 | * is optimistic considering the stability of most processor clock |
| 1148 | * oscillators and the precision with which the timebase frequency | 1154 | * oscillators and the precision with which the timebase frequency |
| 1149 | * is measured but does not harm. | 1155 | * is measured but does not harm. |
| 1150 | */ | 1156 | */ |
| 1151 | unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) | 1157 | unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) |
| 1152 | { | 1158 | { |
| 1153 | unsigned mlt=0, tmp, err; | 1159 | unsigned mlt=0, tmp, err; |
| 1154 | /* No concern for performance, it's done once: use a stupid | 1160 | /* No concern for performance, it's done once: use a stupid |
| 1155 | * but safe and compact method to find the multiplier. | 1161 | * but safe and compact method to find the multiplier. |
| 1156 | */ | 1162 | */ |
| 1157 | 1163 | ||
| 1158 | for (tmp = 1U<<31; tmp != 0; tmp >>= 1) { | 1164 | for (tmp = 1U<<31; tmp != 0; tmp >>= 1) { |
| 1159 | if (mulhwu(inscale, mlt|tmp) < outscale) | 1165 | if (mulhwu(inscale, mlt|tmp) < outscale) |
| 1160 | mlt |= tmp; | 1166 | mlt |= tmp; |
| 1161 | } | 1167 | } |
| 1162 | 1168 | ||
| 1163 | /* We might still be off by 1 for the best approximation. | 1169 | /* We might still be off by 1 for the best approximation. |
| 1164 | * A side effect of this is that if outscale is too large | 1170 | * A side effect of this is that if outscale is too large |
| 1165 | * the returned value will be zero. | 1171 | * the returned value will be zero. |
| 1166 | * Many corner cases have been checked and seem to work, | 1172 | * Many corner cases have been checked and seem to work, |
| 1167 | * some might have been forgotten in the test however. | 1173 | * some might have been forgotten in the test however. |
| 1168 | */ | 1174 | */ |
| 1169 | 1175 | ||
| 1170 | err = inscale * (mlt+1); | 1176 | err = inscale * (mlt+1); |
| 1171 | if (err <= inscale/2) | 1177 | if (err <= inscale/2) |
| 1172 | mlt++; | 1178 | mlt++; |
| 1173 | return mlt; | 1179 | return mlt; |
| 1174 | } | 1180 | } |
| 1175 | 1181 | ||
| 1176 | /* | 1182 | /* |
| 1177 | * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit | 1183 | * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit |
| 1178 | * result. | 1184 | * result. |
| 1179 | */ | 1185 | */ |
| 1180 | void div128_by_32(u64 dividend_high, u64 dividend_low, | 1186 | void div128_by_32(u64 dividend_high, u64 dividend_low, |
| 1181 | unsigned divisor, struct div_result *dr) | 1187 | unsigned divisor, struct div_result *dr) |
| 1182 | { | 1188 | { |
| 1183 | unsigned long a, b, c, d; | 1189 | unsigned long a, b, c, d; |
| 1184 | unsigned long w, x, y, z; | 1190 | unsigned long w, x, y, z; |
| 1185 | u64 ra, rb, rc; | 1191 | u64 ra, rb, rc; |
| 1186 | 1192 | ||
| 1187 | a = dividend_high >> 32; | 1193 | a = dividend_high >> 32; |
| 1188 | b = dividend_high & 0xffffffff; | 1194 | b = dividend_high & 0xffffffff; |
| 1189 | c = dividend_low >> 32; | 1195 | c = dividend_low >> 32; |
| 1190 | d = dividend_low & 0xffffffff; | 1196 | d = dividend_low & 0xffffffff; |
| 1191 | 1197 | ||
| 1192 | w = a / divisor; | 1198 | w = a / divisor; |
| 1193 | ra = ((u64)(a - (w * divisor)) << 32) + b; | 1199 | ra = ((u64)(a - (w * divisor)) << 32) + b; |
| 1194 | 1200 | ||
| 1195 | rb = ((u64) do_div(ra, divisor) << 32) + c; | 1201 | rb = ((u64) do_div(ra, divisor) << 32) + c; |
| 1196 | x = ra; | 1202 | x = ra; |
| 1197 | 1203 | ||
| 1198 | rc = ((u64) do_div(rb, divisor) << 32) + d; | 1204 | rc = ((u64) do_div(rb, divisor) << 32) + d; |
| 1199 | y = rb; | 1205 | y = rb; |
| 1200 | 1206 | ||
| 1201 | do_div(rc, divisor); | 1207 | do_div(rc, divisor); |
| 1202 | z = rc; | 1208 | z = rc; |
| 1203 | 1209 | ||
| 1204 | dr->result_high = ((u64)w << 32) + x; | 1210 | dr->result_high = ((u64)w << 32) + x; |
| 1205 | dr->result_low = ((u64)y << 32) + z; | 1211 | dr->result_low = ((u64)y << 32) + z; |
| 1206 | 1212 | ||
| 1207 | } | 1213 | } |
| 1208 | 1214 | ||
| 1209 | /* We don't need to calibrate delay, we use the CPU timebase for that */ | 1215 | /* We don't need to calibrate delay, we use the CPU timebase for that */ |
| 1210 | void calibrate_delay(void) | 1216 | void calibrate_delay(void) |
| 1211 | { | 1217 | { |
| 1212 | /* Some generic code (such as spinlock debug) use loops_per_jiffy | 1218 | /* Some generic code (such as spinlock debug) use loops_per_jiffy |
| 1213 | * as the number of __delay(1) in a jiffy, so make it so | 1219 | * as the number of __delay(1) in a jiffy, so make it so |
| 1214 | */ | 1220 | */ |
| 1215 | loops_per_jiffy = tb_ticks_per_jiffy; | 1221 | loops_per_jiffy = tb_ticks_per_jiffy; |
| 1216 | } | 1222 | } |
| 1217 | 1223 | ||
| 1218 | static int __init rtc_init(void) | 1224 | static int __init rtc_init(void) |
| 1219 | { | 1225 | { |
| 1220 | struct platform_device *pdev; | 1226 | struct platform_device *pdev; |
| 1221 | 1227 | ||
| 1222 | if (!ppc_md.get_rtc_time) | 1228 | if (!ppc_md.get_rtc_time) |
| 1223 | return -ENODEV; | 1229 | return -ENODEV; |
| 1224 | 1230 | ||
| 1225 | pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0); | 1231 | pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0); |
| 1226 | if (IS_ERR(pdev)) | 1232 | if (IS_ERR(pdev)) |
| 1227 | return PTR_ERR(pdev); | 1233 | return PTR_ERR(pdev); |
| 1228 | 1234 | ||
| 1229 | return 0; | 1235 | return 0; |
| 1230 | } | 1236 | } |
| 1231 | 1237 | ||
| 1232 | module_init(rtc_init); | 1238 | module_init(rtc_init); |
| 1233 | 1239 |
arch/powerpc/kernel/traps.c
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 2 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 3 | * | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License | 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation; either version | 6 | * as published by the Free Software Foundation; either version |
| 7 | * 2 of the License, or (at your option) any later version. | 7 | * 2 of the License, or (at your option) any later version. |
| 8 | * | 8 | * |
| 9 | * Modified by Cort Dougan (cort@cs.nmt.edu) | 9 | * Modified by Cort Dougan (cort@cs.nmt.edu) |
| 10 | * and Paul Mackerras (paulus@samba.org) | 10 | * and Paul Mackerras (paulus@samba.org) |
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | /* | 13 | /* |
| 14 | * This file handles the architecture-dependent parts of hardware exceptions | 14 | * This file handles the architecture-dependent parts of hardware exceptions |
| 15 | */ | 15 | */ |
| 16 | 16 | ||
| 17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
| 18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
| 19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
| 20 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
| 21 | #include <linux/stddef.h> | 21 | #include <linux/stddef.h> |
| 22 | #include <linux/unistd.h> | 22 | #include <linux/unistd.h> |
| 23 | #include <linux/ptrace.h> | 23 | #include <linux/ptrace.h> |
| 24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
| 25 | #include <linux/user.h> | 25 | #include <linux/user.h> |
| 26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
| 27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
| 28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
| 29 | #include <linux/prctl.h> | 29 | #include <linux/prctl.h> |
| 30 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
| 31 | #include <linux/kprobes.h> | 31 | #include <linux/kprobes.h> |
| 32 | #include <linux/kexec.h> | 32 | #include <linux/kexec.h> |
| 33 | #include <linux/backlight.h> | 33 | #include <linux/backlight.h> |
| 34 | #include <linux/bug.h> | 34 | #include <linux/bug.h> |
| 35 | #include <linux/kdebug.h> | 35 | #include <linux/kdebug.h> |
| 36 | #include <linux/debugfs.h> | 36 | #include <linux/debugfs.h> |
| 37 | 37 | ||
| 38 | #include <asm/emulated_ops.h> | 38 | #include <asm/emulated_ops.h> |
| 39 | #include <asm/pgtable.h> | 39 | #include <asm/pgtable.h> |
| 40 | #include <asm/uaccess.h> | 40 | #include <asm/uaccess.h> |
| 41 | #include <asm/system.h> | 41 | #include <asm/system.h> |
| 42 | #include <asm/io.h> | 42 | #include <asm/io.h> |
| 43 | #include <asm/machdep.h> | 43 | #include <asm/machdep.h> |
| 44 | #include <asm/rtas.h> | 44 | #include <asm/rtas.h> |
| 45 | #include <asm/pmc.h> | 45 | #include <asm/pmc.h> |
| 46 | #ifdef CONFIG_PPC32 | 46 | #ifdef CONFIG_PPC32 |
| 47 | #include <asm/reg.h> | 47 | #include <asm/reg.h> |
| 48 | #endif | 48 | #endif |
| 49 | #ifdef CONFIG_PMAC_BACKLIGHT | 49 | #ifdef CONFIG_PMAC_BACKLIGHT |
| 50 | #include <asm/backlight.h> | 50 | #include <asm/backlight.h> |
| 51 | #endif | 51 | #endif |
| 52 | #ifdef CONFIG_PPC64 | 52 | #ifdef CONFIG_PPC64 |
| 53 | #include <asm/firmware.h> | 53 | #include <asm/firmware.h> |
| 54 | #include <asm/processor.h> | 54 | #include <asm/processor.h> |
| 55 | #endif | 55 | #endif |
| 56 | #include <asm/kexec.h> | 56 | #include <asm/kexec.h> |
| 57 | #include <asm/ppc-opcode.h> | 57 | #include <asm/ppc-opcode.h> |
| 58 | #ifdef CONFIG_FSL_BOOKE | 58 | #ifdef CONFIG_FSL_BOOKE |
| 59 | #include <asm/dbell.h> | 59 | #include <asm/dbell.h> |
| 60 | #endif | 60 | #endif |
| 61 | 61 | ||
| 62 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | 62 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) |
| 63 | int (*__debugger)(struct pt_regs *regs); | 63 | int (*__debugger)(struct pt_regs *regs); |
| 64 | int (*__debugger_ipi)(struct pt_regs *regs); | 64 | int (*__debugger_ipi)(struct pt_regs *regs); |
| 65 | int (*__debugger_bpt)(struct pt_regs *regs); | 65 | int (*__debugger_bpt)(struct pt_regs *regs); |
| 66 | int (*__debugger_sstep)(struct pt_regs *regs); | 66 | int (*__debugger_sstep)(struct pt_regs *regs); |
| 67 | int (*__debugger_iabr_match)(struct pt_regs *regs); | 67 | int (*__debugger_iabr_match)(struct pt_regs *regs); |
| 68 | int (*__debugger_dabr_match)(struct pt_regs *regs); | 68 | int (*__debugger_dabr_match)(struct pt_regs *regs); |
| 69 | int (*__debugger_fault_handler)(struct pt_regs *regs); | 69 | int (*__debugger_fault_handler)(struct pt_regs *regs); |
| 70 | 70 | ||
| 71 | EXPORT_SYMBOL(__debugger); | 71 | EXPORT_SYMBOL(__debugger); |
| 72 | EXPORT_SYMBOL(__debugger_ipi); | 72 | EXPORT_SYMBOL(__debugger_ipi); |
| 73 | EXPORT_SYMBOL(__debugger_bpt); | 73 | EXPORT_SYMBOL(__debugger_bpt); |
| 74 | EXPORT_SYMBOL(__debugger_sstep); | 74 | EXPORT_SYMBOL(__debugger_sstep); |
| 75 | EXPORT_SYMBOL(__debugger_iabr_match); | 75 | EXPORT_SYMBOL(__debugger_iabr_match); |
| 76 | EXPORT_SYMBOL(__debugger_dabr_match); | 76 | EXPORT_SYMBOL(__debugger_dabr_match); |
| 77 | EXPORT_SYMBOL(__debugger_fault_handler); | 77 | EXPORT_SYMBOL(__debugger_fault_handler); |
| 78 | #endif | 78 | #endif |
| 79 | 79 | ||
| 80 | /* | 80 | /* |
| 81 | * Trap & Exception support | 81 | * Trap & Exception support |
| 82 | */ | 82 | */ |
| 83 | 83 | ||
| 84 | #ifdef CONFIG_PMAC_BACKLIGHT | 84 | #ifdef CONFIG_PMAC_BACKLIGHT |
| 85 | static void pmac_backlight_unblank(void) | 85 | static void pmac_backlight_unblank(void) |
| 86 | { | 86 | { |
| 87 | mutex_lock(&pmac_backlight_mutex); | 87 | mutex_lock(&pmac_backlight_mutex); |
| 88 | if (pmac_backlight) { | 88 | if (pmac_backlight) { |
| 89 | struct backlight_properties *props; | 89 | struct backlight_properties *props; |
| 90 | 90 | ||
| 91 | props = &pmac_backlight->props; | 91 | props = &pmac_backlight->props; |
| 92 | props->brightness = props->max_brightness; | 92 | props->brightness = props->max_brightness; |
| 93 | props->power = FB_BLANK_UNBLANK; | 93 | props->power = FB_BLANK_UNBLANK; |
| 94 | backlight_update_status(pmac_backlight); | 94 | backlight_update_status(pmac_backlight); |
| 95 | } | 95 | } |
| 96 | mutex_unlock(&pmac_backlight_mutex); | 96 | mutex_unlock(&pmac_backlight_mutex); |
| 97 | } | 97 | } |
| 98 | #else | 98 | #else |
| 99 | static inline void pmac_backlight_unblank(void) { } | 99 | static inline void pmac_backlight_unblank(void) { } |
| 100 | #endif | 100 | #endif |
| 101 | 101 | ||
| 102 | int die(const char *str, struct pt_regs *regs, long err) | 102 | int die(const char *str, struct pt_regs *regs, long err) |
| 103 | { | 103 | { |
| 104 | static struct { | 104 | static struct { |
| 105 | spinlock_t lock; | 105 | spinlock_t lock; |
| 106 | u32 lock_owner; | 106 | u32 lock_owner; |
| 107 | int lock_owner_depth; | 107 | int lock_owner_depth; |
| 108 | } die = { | 108 | } die = { |
| 109 | .lock = __SPIN_LOCK_UNLOCKED(die.lock), | 109 | .lock = __SPIN_LOCK_UNLOCKED(die.lock), |
| 110 | .lock_owner = -1, | 110 | .lock_owner = -1, |
| 111 | .lock_owner_depth = 0 | 111 | .lock_owner_depth = 0 |
| 112 | }; | 112 | }; |
| 113 | static int die_counter; | 113 | static int die_counter; |
| 114 | unsigned long flags; | 114 | unsigned long flags; |
| 115 | 115 | ||
| 116 | if (debugger(regs)) | 116 | if (debugger(regs)) |
| 117 | return 1; | 117 | return 1; |
| 118 | 118 | ||
| 119 | oops_enter(); | 119 | oops_enter(); |
| 120 | 120 | ||
| 121 | if (die.lock_owner != raw_smp_processor_id()) { | 121 | if (die.lock_owner != raw_smp_processor_id()) { |
| 122 | console_verbose(); | 122 | console_verbose(); |
| 123 | spin_lock_irqsave(&die.lock, flags); | 123 | spin_lock_irqsave(&die.lock, flags); |
| 124 | die.lock_owner = smp_processor_id(); | 124 | die.lock_owner = smp_processor_id(); |
| 125 | die.lock_owner_depth = 0; | 125 | die.lock_owner_depth = 0; |
| 126 | bust_spinlocks(1); | 126 | bust_spinlocks(1); |
| 127 | if (machine_is(powermac)) | 127 | if (machine_is(powermac)) |
| 128 | pmac_backlight_unblank(); | 128 | pmac_backlight_unblank(); |
| 129 | } else { | 129 | } else { |
| 130 | local_save_flags(flags); | 130 | local_save_flags(flags); |
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | if (++die.lock_owner_depth < 3) { | 133 | if (++die.lock_owner_depth < 3) { |
| 134 | printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); | 134 | printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); |
| 135 | #ifdef CONFIG_PREEMPT | 135 | #ifdef CONFIG_PREEMPT |
| 136 | printk("PREEMPT "); | 136 | printk("PREEMPT "); |
| 137 | #endif | 137 | #endif |
| 138 | #ifdef CONFIG_SMP | 138 | #ifdef CONFIG_SMP |
| 139 | printk("SMP NR_CPUS=%d ", NR_CPUS); | 139 | printk("SMP NR_CPUS=%d ", NR_CPUS); |
| 140 | #endif | 140 | #endif |
| 141 | #ifdef CONFIG_DEBUG_PAGEALLOC | 141 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 142 | printk("DEBUG_PAGEALLOC "); | 142 | printk("DEBUG_PAGEALLOC "); |
| 143 | #endif | 143 | #endif |
| 144 | #ifdef CONFIG_NUMA | 144 | #ifdef CONFIG_NUMA |
| 145 | printk("NUMA "); | 145 | printk("NUMA "); |
| 146 | #endif | 146 | #endif |
| 147 | printk("%s\n", ppc_md.name ? ppc_md.name : ""); | 147 | printk("%s\n", ppc_md.name ? ppc_md.name : ""); |
| 148 | 148 | ||
| 149 | print_modules(); | 149 | print_modules(); |
| 150 | show_regs(regs); | 150 | show_regs(regs); |
| 151 | } else { | 151 | } else { |
| 152 | printk("Recursive die() failure, output suppressed\n"); | 152 | printk("Recursive die() failure, output suppressed\n"); |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | bust_spinlocks(0); | 155 | bust_spinlocks(0); |
| 156 | die.lock_owner = -1; | 156 | die.lock_owner = -1; |
| 157 | add_taint(TAINT_DIE); | 157 | add_taint(TAINT_DIE); |
| 158 | spin_unlock_irqrestore(&die.lock, flags); | 158 | spin_unlock_irqrestore(&die.lock, flags); |
| 159 | 159 | ||
| 160 | if (kexec_should_crash(current) || | 160 | if (kexec_should_crash(current) || |
| 161 | kexec_sr_activated(smp_processor_id())) | 161 | kexec_sr_activated(smp_processor_id())) |
| 162 | crash_kexec(regs); | 162 | crash_kexec(regs); |
| 163 | crash_kexec_secondary(regs); | 163 | crash_kexec_secondary(regs); |
| 164 | 164 | ||
| 165 | if (in_interrupt()) | 165 | if (in_interrupt()) |
| 166 | panic("Fatal exception in interrupt"); | 166 | panic("Fatal exception in interrupt"); |
| 167 | 167 | ||
| 168 | if (panic_on_oops) | 168 | if (panic_on_oops) |
| 169 | panic("Fatal exception"); | 169 | panic("Fatal exception"); |
| 170 | 170 | ||
| 171 | oops_exit(); | 171 | oops_exit(); |
| 172 | do_exit(err); | 172 | do_exit(err); |
| 173 | 173 | ||
| 174 | return 0; | 174 | return 0; |
| 175 | } | 175 | } |
| 176 | 176 | ||
| 177 | void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) | 177 | void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) |
| 178 | { | 178 | { |
| 179 | siginfo_t info; | 179 | siginfo_t info; |
| 180 | const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ | 180 | const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ |
| 181 | "at %08lx nip %08lx lr %08lx code %x\n"; | 181 | "at %08lx nip %08lx lr %08lx code %x\n"; |
| 182 | const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ | 182 | const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ |
| 183 | "at %016lx nip %016lx lr %016lx code %x\n"; | 183 | "at %016lx nip %016lx lr %016lx code %x\n"; |
| 184 | 184 | ||
| 185 | if (!user_mode(regs)) { | 185 | if (!user_mode(regs)) { |
| 186 | if (die("Exception in kernel mode", regs, signr)) | 186 | if (die("Exception in kernel mode", regs, signr)) |
| 187 | return; | 187 | return; |
| 188 | } else if (show_unhandled_signals && | 188 | } else if (show_unhandled_signals && |
| 189 | unhandled_signal(current, signr) && | 189 | unhandled_signal(current, signr) && |
| 190 | printk_ratelimit()) { | 190 | printk_ratelimit()) { |
| 191 | printk(regs->msr & MSR_SF ? fmt64 : fmt32, | 191 | printk(regs->msr & MSR_SF ? fmt64 : fmt32, |
| 192 | current->comm, current->pid, signr, | 192 | current->comm, current->pid, signr, |
| 193 | addr, regs->nip, regs->link, code); | 193 | addr, regs->nip, regs->link, code); |
| 194 | } | 194 | } |
| 195 | 195 | ||
| 196 | memset(&info, 0, sizeof(info)); | 196 | memset(&info, 0, sizeof(info)); |
| 197 | info.si_signo = signr; | 197 | info.si_signo = signr; |
| 198 | info.si_code = code; | 198 | info.si_code = code; |
| 199 | info.si_addr = (void __user *) addr; | 199 | info.si_addr = (void __user *) addr; |
| 200 | force_sig_info(signr, &info, current); | 200 | force_sig_info(signr, &info, current); |
| 201 | 201 | ||
| 202 | /* | 202 | /* |
| 203 | * Init gets no signals that it doesn't have a handler for. | 203 | * Init gets no signals that it doesn't have a handler for. |
| 204 | * That's all very well, but if it has caused a synchronous | 204 | * That's all very well, but if it has caused a synchronous |
| 205 | * exception and we ignore the resulting signal, it will just | 205 | * exception and we ignore the resulting signal, it will just |
| 206 | * generate the same exception over and over again and we get | 206 | * generate the same exception over and over again and we get |
| 207 | * nowhere. Better to kill it and let the kernel panic. | 207 | * nowhere. Better to kill it and let the kernel panic. |
| 208 | */ | 208 | */ |
| 209 | if (is_global_init(current)) { | 209 | if (is_global_init(current)) { |
| 210 | __sighandler_t handler; | 210 | __sighandler_t handler; |
| 211 | 211 | ||
| 212 | spin_lock_irq(¤t->sighand->siglock); | 212 | spin_lock_irq(¤t->sighand->siglock); |
| 213 | handler = current->sighand->action[signr-1].sa.sa_handler; | 213 | handler = current->sighand->action[signr-1].sa.sa_handler; |
| 214 | spin_unlock_irq(¤t->sighand->siglock); | 214 | spin_unlock_irq(¤t->sighand->siglock); |
| 215 | if (handler == SIG_DFL) { | 215 | if (handler == SIG_DFL) { |
| 216 | /* init has generated a synchronous exception | 216 | /* init has generated a synchronous exception |
| 217 | and it doesn't have a handler for the signal */ | 217 | and it doesn't have a handler for the signal */ |
| 218 | printk(KERN_CRIT "init has generated signal %d " | 218 | printk(KERN_CRIT "init has generated signal %d " |
| 219 | "but has no handler for it\n", signr); | 219 | "but has no handler for it\n", signr); |
| 220 | do_exit(signr); | 220 | do_exit(signr); |
| 221 | } | 221 | } |
| 222 | } | 222 | } |
| 223 | } | 223 | } |
| 224 | 224 | ||
| 225 | #ifdef CONFIG_PPC64 | 225 | #ifdef CONFIG_PPC64 |
| 226 | void system_reset_exception(struct pt_regs *regs) | 226 | void system_reset_exception(struct pt_regs *regs) |
| 227 | { | 227 | { |
| 228 | /* See if any machine dependent calls */ | 228 | /* See if any machine dependent calls */ |
| 229 | if (ppc_md.system_reset_exception) { | 229 | if (ppc_md.system_reset_exception) { |
| 230 | if (ppc_md.system_reset_exception(regs)) | 230 | if (ppc_md.system_reset_exception(regs)) |
| 231 | return; | 231 | return; |
| 232 | } | 232 | } |
| 233 | 233 | ||
| 234 | #ifdef CONFIG_KEXEC | 234 | #ifdef CONFIG_KEXEC |
| 235 | cpu_set(smp_processor_id(), cpus_in_sr); | 235 | cpu_set(smp_processor_id(), cpus_in_sr); |
| 236 | #endif | 236 | #endif |
| 237 | 237 | ||
| 238 | die("System Reset", regs, SIGABRT); | 238 | die("System Reset", regs, SIGABRT); |
| 239 | 239 | ||
| 240 | /* | 240 | /* |
| 241 | * Some CPUs when released from the debugger will execute this path. | 241 | * Some CPUs when released from the debugger will execute this path. |
| 242 | * These CPUs entered the debugger via a soft-reset. If the CPU was | 242 | * These CPUs entered the debugger via a soft-reset. If the CPU was |
| 243 | * hung before entering the debugger it will return to the hung | 243 | * hung before entering the debugger it will return to the hung |
| 244 | * state when exiting this function. This causes a problem in | 244 | * state when exiting this function. This causes a problem in |
| 245 | * kdump since the hung CPU(s) will not respond to the IPI sent | 245 | * kdump since the hung CPU(s) will not respond to the IPI sent |
| 246 | * from kdump. To prevent the problem we call crash_kexec_secondary() | 246 | * from kdump. To prevent the problem we call crash_kexec_secondary() |
| 247 | * here. If a kdump had not been initiated or we exit the debugger | 247 | * here. If a kdump had not been initiated or we exit the debugger |
| 248 | * with the "exit and recover" command (x) crash_kexec_secondary() | 248 | * with the "exit and recover" command (x) crash_kexec_secondary() |
| 249 | * will return after 5ms and the CPU returns to its previous state. | 249 | * will return after 5ms and the CPU returns to its previous state. |
| 250 | */ | 250 | */ |
| 251 | crash_kexec_secondary(regs); | 251 | crash_kexec_secondary(regs); |
| 252 | 252 | ||
| 253 | /* Must die if the interrupt is not recoverable */ | 253 | /* Must die if the interrupt is not recoverable */ |
| 254 | if (!(regs->msr & MSR_RI)) | 254 | if (!(regs->msr & MSR_RI)) |
| 255 | panic("Unrecoverable System Reset"); | 255 | panic("Unrecoverable System Reset"); |
| 256 | 256 | ||
| 257 | /* What should we do here? We could issue a shutdown or hard reset. */ | 257 | /* What should we do here? We could issue a shutdown or hard reset. */ |
| 258 | } | 258 | } |
| 259 | #endif | 259 | #endif |
| 260 | 260 | ||
| 261 | /* | 261 | /* |
| 262 | * I/O accesses can cause machine checks on powermacs. | 262 | * I/O accesses can cause machine checks on powermacs. |
| 263 | * Check if the NIP corresponds to the address of a sync | 263 | * Check if the NIP corresponds to the address of a sync |
| 264 | * instruction for which there is an entry in the exception | 264 | * instruction for which there is an entry in the exception |
| 265 | * table. | 265 | * table. |
| 266 | * Note that the 601 only takes a machine check on TEA | 266 | * Note that the 601 only takes a machine check on TEA |
| 267 | * (transfer error ack) signal assertion, and does not | 267 | * (transfer error ack) signal assertion, and does not |
| 268 | * set any of the top 16 bits of SRR1. | 268 | * set any of the top 16 bits of SRR1. |
| 269 | * -- paulus. | 269 | * -- paulus. |
| 270 | */ | 270 | */ |
| 271 | static inline int check_io_access(struct pt_regs *regs) | 271 | static inline int check_io_access(struct pt_regs *regs) |
| 272 | { | 272 | { |
| 273 | #ifdef CONFIG_PPC32 | 273 | #ifdef CONFIG_PPC32 |
| 274 | unsigned long msr = regs->msr; | 274 | unsigned long msr = regs->msr; |
| 275 | const struct exception_table_entry *entry; | 275 | const struct exception_table_entry *entry; |
| 276 | unsigned int *nip = (unsigned int *)regs->nip; | 276 | unsigned int *nip = (unsigned int *)regs->nip; |
| 277 | 277 | ||
| 278 | if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) | 278 | if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) |
| 279 | && (entry = search_exception_tables(regs->nip)) != NULL) { | 279 | && (entry = search_exception_tables(regs->nip)) != NULL) { |
| 280 | /* | 280 | /* |
| 281 | * Check that it's a sync instruction, or somewhere | 281 | * Check that it's a sync instruction, or somewhere |
| 282 | * in the twi; isync; nop sequence that inb/inw/inl uses. | 282 | * in the twi; isync; nop sequence that inb/inw/inl uses. |
| 283 | * As the address is in the exception table | 283 | * As the address is in the exception table |
| 284 | * we should be able to read the instr there. | 284 | * we should be able to read the instr there. |
| 285 | * For the debug message, we look at the preceding | 285 | * For the debug message, we look at the preceding |
| 286 | * load or store. | 286 | * load or store. |
| 287 | */ | 287 | */ |
| 288 | if (*nip == 0x60000000) /* nop */ | 288 | if (*nip == 0x60000000) /* nop */ |
| 289 | nip -= 2; | 289 | nip -= 2; |
| 290 | else if (*nip == 0x4c00012c) /* isync */ | 290 | else if (*nip == 0x4c00012c) /* isync */ |
| 291 | --nip; | 291 | --nip; |
| 292 | if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { | 292 | if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { |
| 293 | /* sync or twi */ | 293 | /* sync or twi */ |
| 294 | unsigned int rb; | 294 | unsigned int rb; |
| 295 | 295 | ||
| 296 | --nip; | 296 | --nip; |
| 297 | rb = (*nip >> 11) & 0x1f; | 297 | rb = (*nip >> 11) & 0x1f; |
| 298 | printk(KERN_DEBUG "%s bad port %lx at %p\n", | 298 | printk(KERN_DEBUG "%s bad port %lx at %p\n", |
| 299 | (*nip & 0x100)? "OUT to": "IN from", | 299 | (*nip & 0x100)? "OUT to": "IN from", |
| 300 | regs->gpr[rb] - _IO_BASE, nip); | 300 | regs->gpr[rb] - _IO_BASE, nip); |
| 301 | regs->msr |= MSR_RI; | 301 | regs->msr |= MSR_RI; |
| 302 | regs->nip = entry->fixup; | 302 | regs->nip = entry->fixup; |
| 303 | return 1; | 303 | return 1; |
| 304 | } | 304 | } |
| 305 | } | 305 | } |
| 306 | #endif /* CONFIG_PPC32 */ | 306 | #endif /* CONFIG_PPC32 */ |
| 307 | return 0; | 307 | return 0; |
| 308 | } | 308 | } |
| 309 | 309 | ||
| 310 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 310 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) |
| 311 | /* On 4xx, the reason for the machine check or program exception | 311 | /* On 4xx, the reason for the machine check or program exception |
| 312 | is in the ESR. */ | 312 | is in the ESR. */ |
| 313 | #define get_reason(regs) ((regs)->dsisr) | 313 | #define get_reason(regs) ((regs)->dsisr) |
| 314 | #ifndef CONFIG_FSL_BOOKE | 314 | #ifndef CONFIG_FSL_BOOKE |
| 315 | #define get_mc_reason(regs) ((regs)->dsisr) | 315 | #define get_mc_reason(regs) ((regs)->dsisr) |
| 316 | #else | 316 | #else |
| 317 | #define get_mc_reason(regs) (mfspr(SPRN_MCSR) & MCSR_MASK) | 317 | #define get_mc_reason(regs) (mfspr(SPRN_MCSR) & MCSR_MASK) |
| 318 | #endif | 318 | #endif |
| 319 | #define REASON_FP ESR_FP | 319 | #define REASON_FP ESR_FP |
| 320 | #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) | 320 | #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) |
| 321 | #define REASON_PRIVILEGED ESR_PPR | 321 | #define REASON_PRIVILEGED ESR_PPR |
| 322 | #define REASON_TRAP ESR_PTR | 322 | #define REASON_TRAP ESR_PTR |
| 323 | 323 | ||
| 324 | /* single-step stuff */ | 324 | /* single-step stuff */ |
| 325 | #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) | 325 | #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) |
| 326 | #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) | 326 | #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) |
| 327 | 327 | ||
| 328 | #else | 328 | #else |
| 329 | /* On non-4xx, the reason for the machine check or program | 329 | /* On non-4xx, the reason for the machine check or program |
| 330 | exception is in the MSR. */ | 330 | exception is in the MSR. */ |
| 331 | #define get_reason(regs) ((regs)->msr) | 331 | #define get_reason(regs) ((regs)->msr) |
| 332 | #define get_mc_reason(regs) ((regs)->msr) | 332 | #define get_mc_reason(regs) ((regs)->msr) |
| 333 | #define REASON_FP 0x100000 | 333 | #define REASON_FP 0x100000 |
| 334 | #define REASON_ILLEGAL 0x80000 | 334 | #define REASON_ILLEGAL 0x80000 |
| 335 | #define REASON_PRIVILEGED 0x40000 | 335 | #define REASON_PRIVILEGED 0x40000 |
| 336 | #define REASON_TRAP 0x20000 | 336 | #define REASON_TRAP 0x20000 |
| 337 | 337 | ||
| 338 | #define single_stepping(regs) ((regs)->msr & MSR_SE) | 338 | #define single_stepping(regs) ((regs)->msr & MSR_SE) |
| 339 | #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) | 339 | #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) |
| 340 | #endif | 340 | #endif |
| 341 | 341 | ||
| 342 | #if defined(CONFIG_4xx) | 342 | #if defined(CONFIG_4xx) |
| 343 | int machine_check_4xx(struct pt_regs *regs) | 343 | int machine_check_4xx(struct pt_regs *regs) |
| 344 | { | 344 | { |
| 345 | unsigned long reason = get_mc_reason(regs); | 345 | unsigned long reason = get_mc_reason(regs); |
| 346 | 346 | ||
| 347 | if (reason & ESR_IMCP) { | 347 | if (reason & ESR_IMCP) { |
| 348 | printk("Instruction"); | 348 | printk("Instruction"); |
| 349 | mtspr(SPRN_ESR, reason & ~ESR_IMCP); | 349 | mtspr(SPRN_ESR, reason & ~ESR_IMCP); |
| 350 | } else | 350 | } else |
| 351 | printk("Data"); | 351 | printk("Data"); |
| 352 | printk(" machine check in kernel mode.\n"); | 352 | printk(" machine check in kernel mode.\n"); |
| 353 | 353 | ||
| 354 | return 0; | 354 | return 0; |
| 355 | } | 355 | } |
| 356 | 356 | ||
| 357 | int machine_check_440A(struct pt_regs *regs) | 357 | int machine_check_440A(struct pt_regs *regs) |
| 358 | { | 358 | { |
| 359 | unsigned long reason = get_mc_reason(regs); | 359 | unsigned long reason = get_mc_reason(regs); |
| 360 | 360 | ||
| 361 | printk("Machine check in kernel mode.\n"); | 361 | printk("Machine check in kernel mode.\n"); |
| 362 | if (reason & ESR_IMCP){ | 362 | if (reason & ESR_IMCP){ |
| 363 | printk("Instruction Synchronous Machine Check exception\n"); | 363 | printk("Instruction Synchronous Machine Check exception\n"); |
| 364 | mtspr(SPRN_ESR, reason & ~ESR_IMCP); | 364 | mtspr(SPRN_ESR, reason & ~ESR_IMCP); |
| 365 | } | 365 | } |
| 366 | else { | 366 | else { |
| 367 | u32 mcsr = mfspr(SPRN_MCSR); | 367 | u32 mcsr = mfspr(SPRN_MCSR); |
| 368 | if (mcsr & MCSR_IB) | 368 | if (mcsr & MCSR_IB) |
| 369 | printk("Instruction Read PLB Error\n"); | 369 | printk("Instruction Read PLB Error\n"); |
| 370 | if (mcsr & MCSR_DRB) | 370 | if (mcsr & MCSR_DRB) |
| 371 | printk("Data Read PLB Error\n"); | 371 | printk("Data Read PLB Error\n"); |
| 372 | if (mcsr & MCSR_DWB) | 372 | if (mcsr & MCSR_DWB) |
| 373 | printk("Data Write PLB Error\n"); | 373 | printk("Data Write PLB Error\n"); |
| 374 | if (mcsr & MCSR_TLBP) | 374 | if (mcsr & MCSR_TLBP) |
| 375 | printk("TLB Parity Error\n"); | 375 | printk("TLB Parity Error\n"); |
| 376 | if (mcsr & MCSR_ICP){ | 376 | if (mcsr & MCSR_ICP){ |
| 377 | flush_instruction_cache(); | 377 | flush_instruction_cache(); |
| 378 | printk("I-Cache Parity Error\n"); | 378 | printk("I-Cache Parity Error\n"); |
| 379 | } | 379 | } |
| 380 | if (mcsr & MCSR_DCSP) | 380 | if (mcsr & MCSR_DCSP) |
| 381 | printk("D-Cache Search Parity Error\n"); | 381 | printk("D-Cache Search Parity Error\n"); |
| 382 | if (mcsr & MCSR_DCFP) | 382 | if (mcsr & MCSR_DCFP) |
| 383 | printk("D-Cache Flush Parity Error\n"); | 383 | printk("D-Cache Flush Parity Error\n"); |
| 384 | if (mcsr & MCSR_IMPE) | 384 | if (mcsr & MCSR_IMPE) |
| 385 | printk("Machine Check exception is imprecise\n"); | 385 | printk("Machine Check exception is imprecise\n"); |
| 386 | 386 | ||
| 387 | /* Clear MCSR */ | 387 | /* Clear MCSR */ |
| 388 | mtspr(SPRN_MCSR, mcsr); | 388 | mtspr(SPRN_MCSR, mcsr); |
| 389 | } | 389 | } |
| 390 | return 0; | 390 | return 0; |
| 391 | } | 391 | } |
| 392 | #elif defined(CONFIG_E500) | 392 | #elif defined(CONFIG_E500) |
| 393 | int machine_check_e500(struct pt_regs *regs) | 393 | int machine_check_e500(struct pt_regs *regs) |
| 394 | { | 394 | { |
| 395 | unsigned long reason = get_mc_reason(regs); | 395 | unsigned long reason = get_mc_reason(regs); |
| 396 | 396 | ||
| 397 | printk("Machine check in kernel mode.\n"); | 397 | printk("Machine check in kernel mode.\n"); |
| 398 | printk("Caused by (from MCSR=%lx): ", reason); | 398 | printk("Caused by (from MCSR=%lx): ", reason); |
| 399 | 399 | ||
| 400 | if (reason & MCSR_MCP) | 400 | if (reason & MCSR_MCP) |
| 401 | printk("Machine Check Signal\n"); | 401 | printk("Machine Check Signal\n"); |
| 402 | if (reason & MCSR_ICPERR) | 402 | if (reason & MCSR_ICPERR) |
| 403 | printk("Instruction Cache Parity Error\n"); | 403 | printk("Instruction Cache Parity Error\n"); |
| 404 | if (reason & MCSR_DCP_PERR) | 404 | if (reason & MCSR_DCP_PERR) |
| 405 | printk("Data Cache Push Parity Error\n"); | 405 | printk("Data Cache Push Parity Error\n"); |
| 406 | if (reason & MCSR_DCPERR) | 406 | if (reason & MCSR_DCPERR) |
| 407 | printk("Data Cache Parity Error\n"); | 407 | printk("Data Cache Parity Error\n"); |
| 408 | if (reason & MCSR_BUS_IAERR) | 408 | if (reason & MCSR_BUS_IAERR) |
| 409 | printk("Bus - Instruction Address Error\n"); | 409 | printk("Bus - Instruction Address Error\n"); |
| 410 | if (reason & MCSR_BUS_RAERR) | 410 | if (reason & MCSR_BUS_RAERR) |
| 411 | printk("Bus - Read Address Error\n"); | 411 | printk("Bus - Read Address Error\n"); |
| 412 | if (reason & MCSR_BUS_WAERR) | 412 | if (reason & MCSR_BUS_WAERR) |
| 413 | printk("Bus - Write Address Error\n"); | 413 | printk("Bus - Write Address Error\n"); |
| 414 | if (reason & MCSR_BUS_IBERR) | 414 | if (reason & MCSR_BUS_IBERR) |
| 415 | printk("Bus - Instruction Data Error\n"); | 415 | printk("Bus - Instruction Data Error\n"); |
| 416 | if (reason & MCSR_BUS_RBERR) | 416 | if (reason & MCSR_BUS_RBERR) |
| 417 | printk("Bus - Read Data Bus Error\n"); | 417 | printk("Bus - Read Data Bus Error\n"); |
| 418 | if (reason & MCSR_BUS_WBERR) | 418 | if (reason & MCSR_BUS_WBERR) |
| 419 | printk("Bus - Read Data Bus Error\n"); | 419 | printk("Bus - Read Data Bus Error\n"); |
| 420 | if (reason & MCSR_BUS_IPERR) | 420 | if (reason & MCSR_BUS_IPERR) |
| 421 | printk("Bus - Instruction Parity Error\n"); | 421 | printk("Bus - Instruction Parity Error\n"); |
| 422 | if (reason & MCSR_BUS_RPERR) | 422 | if (reason & MCSR_BUS_RPERR) |
| 423 | printk("Bus - Read Parity Error\n"); | 423 | printk("Bus - Read Parity Error\n"); |
| 424 | 424 | ||
| 425 | return 0; | 425 | return 0; |
| 426 | } | 426 | } |
| 427 | #elif defined(CONFIG_E200) | 427 | #elif defined(CONFIG_E200) |
| 428 | int machine_check_e200(struct pt_regs *regs) | 428 | int machine_check_e200(struct pt_regs *regs) |
| 429 | { | 429 | { |
| 430 | unsigned long reason = get_mc_reason(regs); | 430 | unsigned long reason = get_mc_reason(regs); |
| 431 | 431 | ||
| 432 | printk("Machine check in kernel mode.\n"); | 432 | printk("Machine check in kernel mode.\n"); |
| 433 | printk("Caused by (from MCSR=%lx): ", reason); | 433 | printk("Caused by (from MCSR=%lx): ", reason); |
| 434 | 434 | ||
| 435 | if (reason & MCSR_MCP) | 435 | if (reason & MCSR_MCP) |
| 436 | printk("Machine Check Signal\n"); | 436 | printk("Machine Check Signal\n"); |
| 437 | if (reason & MCSR_CP_PERR) | 437 | if (reason & MCSR_CP_PERR) |
| 438 | printk("Cache Push Parity Error\n"); | 438 | printk("Cache Push Parity Error\n"); |
| 439 | if (reason & MCSR_CPERR) | 439 | if (reason & MCSR_CPERR) |
| 440 | printk("Cache Parity Error\n"); | 440 | printk("Cache Parity Error\n"); |
| 441 | if (reason & MCSR_EXCP_ERR) | 441 | if (reason & MCSR_EXCP_ERR) |
| 442 | printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); | 442 | printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); |
| 443 | if (reason & MCSR_BUS_IRERR) | 443 | if (reason & MCSR_BUS_IRERR) |
| 444 | printk("Bus - Read Bus Error on instruction fetch\n"); | 444 | printk("Bus - Read Bus Error on instruction fetch\n"); |
| 445 | if (reason & MCSR_BUS_DRERR) | 445 | if (reason & MCSR_BUS_DRERR) |
| 446 | printk("Bus - Read Bus Error on data load\n"); | 446 | printk("Bus - Read Bus Error on data load\n"); |
| 447 | if (reason & MCSR_BUS_WRERR) | 447 | if (reason & MCSR_BUS_WRERR) |
| 448 | printk("Bus - Write Bus Error on buffered store or cache line push\n"); | 448 | printk("Bus - Write Bus Error on buffered store or cache line push\n"); |
| 449 | 449 | ||
| 450 | return 0; | 450 | return 0; |
| 451 | } | 451 | } |
| 452 | #else | 452 | #else |
| 453 | int machine_check_generic(struct pt_regs *regs) | 453 | int machine_check_generic(struct pt_regs *regs) |
| 454 | { | 454 | { |
| 455 | unsigned long reason = get_mc_reason(regs); | 455 | unsigned long reason = get_mc_reason(regs); |
| 456 | 456 | ||
| 457 | printk("Machine check in kernel mode.\n"); | 457 | printk("Machine check in kernel mode.\n"); |
| 458 | printk("Caused by (from SRR1=%lx): ", reason); | 458 | printk("Caused by (from SRR1=%lx): ", reason); |
| 459 | switch (reason & 0x601F0000) { | 459 | switch (reason & 0x601F0000) { |
| 460 | case 0x80000: | 460 | case 0x80000: |
| 461 | printk("Machine check signal\n"); | 461 | printk("Machine check signal\n"); |
| 462 | break; | 462 | break; |
| 463 | case 0: /* for 601 */ | 463 | case 0: /* for 601 */ |
| 464 | case 0x40000: | 464 | case 0x40000: |
| 465 | case 0x140000: /* 7450 MSS error and TEA */ | 465 | case 0x140000: /* 7450 MSS error and TEA */ |
| 466 | printk("Transfer error ack signal\n"); | 466 | printk("Transfer error ack signal\n"); |
| 467 | break; | 467 | break; |
| 468 | case 0x20000: | 468 | case 0x20000: |
| 469 | printk("Data parity error signal\n"); | 469 | printk("Data parity error signal\n"); |
| 470 | break; | 470 | break; |
| 471 | case 0x10000: | 471 | case 0x10000: |
| 472 | printk("Address parity error signal\n"); | 472 | printk("Address parity error signal\n"); |
| 473 | break; | 473 | break; |
| 474 | case 0x20000000: | 474 | case 0x20000000: |
| 475 | printk("L1 Data Cache error\n"); | 475 | printk("L1 Data Cache error\n"); |
| 476 | break; | 476 | break; |
| 477 | case 0x40000000: | 477 | case 0x40000000: |
| 478 | printk("L1 Instruction Cache error\n"); | 478 | printk("L1 Instruction Cache error\n"); |
| 479 | break; | 479 | break; |
| 480 | case 0x00100000: | 480 | case 0x00100000: |
| 481 | printk("L2 data cache parity error\n"); | 481 | printk("L2 data cache parity error\n"); |
| 482 | break; | 482 | break; |
| 483 | default: | 483 | default: |
| 484 | printk("Unknown values in msr\n"); | 484 | printk("Unknown values in msr\n"); |
| 485 | } | 485 | } |
| 486 | return 0; | 486 | return 0; |
| 487 | } | 487 | } |
| 488 | #endif /* everything else */ | 488 | #endif /* everything else */ |
| 489 | 489 | ||
| 490 | void machine_check_exception(struct pt_regs *regs) | 490 | void machine_check_exception(struct pt_regs *regs) |
| 491 | { | 491 | { |
| 492 | int recover = 0; | 492 | int recover = 0; |
| 493 | 493 | ||
| 494 | /* See if any machine dependent calls. In theory, we would want | 494 | /* See if any machine dependent calls. In theory, we would want |
| 495 | * to call the CPU first, and call the ppc_md. one if the CPU | 495 | * to call the CPU first, and call the ppc_md. one if the CPU |
| 496 | * one returns a positive number. However there is existing code | 496 | * one returns a positive number. However there is existing code |
| 497 | * that assumes the board gets a first chance, so let's keep it | 497 | * that assumes the board gets a first chance, so let's keep it |
| 498 | * that way for now and fix things later. --BenH. | 498 | * that way for now and fix things later. --BenH. |
| 499 | */ | 499 | */ |
| 500 | if (ppc_md.machine_check_exception) | 500 | if (ppc_md.machine_check_exception) |
| 501 | recover = ppc_md.machine_check_exception(regs); | 501 | recover = ppc_md.machine_check_exception(regs); |
| 502 | else if (cur_cpu_spec->machine_check) | 502 | else if (cur_cpu_spec->machine_check) |
| 503 | recover = cur_cpu_spec->machine_check(regs); | 503 | recover = cur_cpu_spec->machine_check(regs); |
| 504 | 504 | ||
| 505 | if (recover > 0) | 505 | if (recover > 0) |
| 506 | return; | 506 | return; |
| 507 | 507 | ||
| 508 | if (user_mode(regs)) { | 508 | if (user_mode(regs)) { |
| 509 | regs->msr |= MSR_RI; | 509 | regs->msr |= MSR_RI; |
| 510 | _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); | 510 | _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); |
| 511 | return; | 511 | return; |
| 512 | } | 512 | } |
| 513 | 513 | ||
| 514 | #if defined(CONFIG_8xx) && defined(CONFIG_PCI) | 514 | #if defined(CONFIG_8xx) && defined(CONFIG_PCI) |
| 515 | /* the qspan pci read routines can cause machine checks -- Cort | 515 | /* the qspan pci read routines can cause machine checks -- Cort |
| 516 | * | 516 | * |
| 517 | * yuck !!! that totally needs to go away ! There are better ways | 517 | * yuck !!! that totally needs to go away ! There are better ways |
| 518 | * to deal with that than having a wart in the mcheck handler. | 518 | * to deal with that than having a wart in the mcheck handler. |
| 519 | * -- BenH | 519 | * -- BenH |
| 520 | */ | 520 | */ |
| 521 | bad_page_fault(regs, regs->dar, SIGBUS); | 521 | bad_page_fault(regs, regs->dar, SIGBUS); |
| 522 | return; | 522 | return; |
| 523 | #endif | 523 | #endif |
| 524 | 524 | ||
| 525 | if (debugger_fault_handler(regs)) { | 525 | if (debugger_fault_handler(regs)) { |
| 526 | regs->msr |= MSR_RI; | 526 | regs->msr |= MSR_RI; |
| 527 | return; | 527 | return; |
| 528 | } | 528 | } |
| 529 | 529 | ||
| 530 | if (check_io_access(regs)) | 530 | if (check_io_access(regs)) |
| 531 | return; | 531 | return; |
| 532 | 532 | ||
| 533 | if (debugger_fault_handler(regs)) | 533 | if (debugger_fault_handler(regs)) |
| 534 | return; | 534 | return; |
| 535 | die("Machine check", regs, SIGBUS); | 535 | die("Machine check", regs, SIGBUS); |
| 536 | 536 | ||
| 537 | /* Must die if the interrupt is not recoverable */ | 537 | /* Must die if the interrupt is not recoverable */ |
| 538 | if (!(regs->msr & MSR_RI)) | 538 | if (!(regs->msr & MSR_RI)) |
| 539 | panic("Unrecoverable Machine check"); | 539 | panic("Unrecoverable Machine check"); |
| 540 | } | 540 | } |
| 541 | 541 | ||
| 542 | void SMIException(struct pt_regs *regs) | 542 | void SMIException(struct pt_regs *regs) |
| 543 | { | 543 | { |
| 544 | die("System Management Interrupt", regs, SIGABRT); | 544 | die("System Management Interrupt", regs, SIGABRT); |
| 545 | } | 545 | } |
| 546 | 546 | ||
| 547 | void unknown_exception(struct pt_regs *regs) | 547 | void unknown_exception(struct pt_regs *regs) |
| 548 | { | 548 | { |
| 549 | printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", | 549 | printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", |
| 550 | regs->nip, regs->msr, regs->trap); | 550 | regs->nip, regs->msr, regs->trap); |
| 551 | 551 | ||
| 552 | _exception(SIGTRAP, regs, 0, 0); | 552 | _exception(SIGTRAP, regs, 0, 0); |
| 553 | } | 553 | } |
| 554 | 554 | ||
| 555 | void instruction_breakpoint_exception(struct pt_regs *regs) | 555 | void instruction_breakpoint_exception(struct pt_regs *regs) |
| 556 | { | 556 | { |
| 557 | if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, | 557 | if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, |
| 558 | 5, SIGTRAP) == NOTIFY_STOP) | 558 | 5, SIGTRAP) == NOTIFY_STOP) |
| 559 | return; | 559 | return; |
| 560 | if (debugger_iabr_match(regs)) | 560 | if (debugger_iabr_match(regs)) |
| 561 | return; | 561 | return; |
| 562 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); | 562 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); |
| 563 | } | 563 | } |
| 564 | 564 | ||
| 565 | void RunModeException(struct pt_regs *regs) | 565 | void RunModeException(struct pt_regs *regs) |
| 566 | { | 566 | { |
| 567 | _exception(SIGTRAP, regs, 0, 0); | 567 | _exception(SIGTRAP, regs, 0, 0); |
| 568 | } | 568 | } |
| 569 | 569 | ||
| 570 | void __kprobes single_step_exception(struct pt_regs *regs) | 570 | void __kprobes single_step_exception(struct pt_regs *regs) |
| 571 | { | 571 | { |
| 572 | regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */ | 572 | regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */ |
| 573 | 573 | ||
| 574 | if (notify_die(DIE_SSTEP, "single_step", regs, 5, | 574 | if (notify_die(DIE_SSTEP, "single_step", regs, 5, |
| 575 | 5, SIGTRAP) == NOTIFY_STOP) | 575 | 5, SIGTRAP) == NOTIFY_STOP) |
| 576 | return; | 576 | return; |
| 577 | if (debugger_sstep(regs)) | 577 | if (debugger_sstep(regs)) |
| 578 | return; | 578 | return; |
| 579 | 579 | ||
| 580 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); | 580 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); |
| 581 | } | 581 | } |
| 582 | 582 | ||
| 583 | /* | 583 | /* |
| 584 | * After we have successfully emulated an instruction, we have to | 584 | * After we have successfully emulated an instruction, we have to |
| 585 | * check if the instruction was being single-stepped, and if so, | 585 | * check if the instruction was being single-stepped, and if so, |
| 586 | * pretend we got a single-step exception. This was pointed out | 586 | * pretend we got a single-step exception. This was pointed out |
| 587 | * by Kumar Gala. -- paulus | 587 | * by Kumar Gala. -- paulus |
| 588 | */ | 588 | */ |
| 589 | static void emulate_single_step(struct pt_regs *regs) | 589 | static void emulate_single_step(struct pt_regs *regs) |
| 590 | { | 590 | { |
| 591 | if (single_stepping(regs)) { | 591 | if (single_stepping(regs)) { |
| 592 | clear_single_step(regs); | 592 | clear_single_step(regs); |
| 593 | _exception(SIGTRAP, regs, TRAP_TRACE, 0); | 593 | _exception(SIGTRAP, regs, TRAP_TRACE, 0); |
| 594 | } | 594 | } |
| 595 | } | 595 | } |
| 596 | 596 | ||
| 597 | static inline int __parse_fpscr(unsigned long fpscr) | 597 | static inline int __parse_fpscr(unsigned long fpscr) |
| 598 | { | 598 | { |
| 599 | int ret = 0; | 599 | int ret = 0; |
| 600 | 600 | ||
| 601 | /* Invalid operation */ | 601 | /* Invalid operation */ |
| 602 | if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) | 602 | if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) |
| 603 | ret = FPE_FLTINV; | 603 | ret = FPE_FLTINV; |
| 604 | 604 | ||
| 605 | /* Overflow */ | 605 | /* Overflow */ |
| 606 | else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) | 606 | else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) |
| 607 | ret = FPE_FLTOVF; | 607 | ret = FPE_FLTOVF; |
| 608 | 608 | ||
| 609 | /* Underflow */ | 609 | /* Underflow */ |
| 610 | else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) | 610 | else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) |
| 611 | ret = FPE_FLTUND; | 611 | ret = FPE_FLTUND; |
| 612 | 612 | ||
| 613 | /* Divide by zero */ | 613 | /* Divide by zero */ |
| 614 | else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) | 614 | else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) |
| 615 | ret = FPE_FLTDIV; | 615 | ret = FPE_FLTDIV; |
| 616 | 616 | ||
| 617 | /* Inexact result */ | 617 | /* Inexact result */ |
| 618 | else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) | 618 | else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) |
| 619 | ret = FPE_FLTRES; | 619 | ret = FPE_FLTRES; |
| 620 | 620 | ||
| 621 | return ret; | 621 | return ret; |
| 622 | } | 622 | } |
| 623 | 623 | ||
| 624 | static void parse_fpe(struct pt_regs *regs) | 624 | static void parse_fpe(struct pt_regs *regs) |
| 625 | { | 625 | { |
| 626 | int code = 0; | 626 | int code = 0; |
| 627 | 627 | ||
| 628 | flush_fp_to_thread(current); | 628 | flush_fp_to_thread(current); |
| 629 | 629 | ||
| 630 | code = __parse_fpscr(current->thread.fpscr.val); | 630 | code = __parse_fpscr(current->thread.fpscr.val); |
| 631 | 631 | ||
| 632 | _exception(SIGFPE, regs, code, regs->nip); | 632 | _exception(SIGFPE, regs, code, regs->nip); |
| 633 | } | 633 | } |
| 634 | 634 | ||
| 635 | /* | 635 | /* |
| 636 | * Illegal instruction emulation support. Originally written to | 636 | * Illegal instruction emulation support. Originally written to |
| 637 | * provide the PVR to user applications using the mfspr rd, PVR. | 637 | * provide the PVR to user applications using the mfspr rd, PVR. |
| 638 | * Return non-zero if we can't emulate, or -EFAULT if the associated | 638 | * Return non-zero if we can't emulate, or -EFAULT if the associated |
| 639 | * memory access caused an access fault. Return zero on success. | 639 | * memory access caused an access fault. Return zero on success. |
| 640 | * | 640 | * |
| 641 | * There are a couple of ways to do this, either "decode" the instruction | 641 | * There are a couple of ways to do this, either "decode" the instruction |
| 642 | * or directly match lots of bits. In this case, matching lots of | 642 | * or directly match lots of bits. In this case, matching lots of |
| 643 | * bits is faster and easier. | 643 | * bits is faster and easier. |
| 644 | * | 644 | * |
| 645 | */ | 645 | */ |
| 646 | static int emulate_string_inst(struct pt_regs *regs, u32 instword) | 646 | static int emulate_string_inst(struct pt_regs *regs, u32 instword) |
| 647 | { | 647 | { |
| 648 | u8 rT = (instword >> 21) & 0x1f; | 648 | u8 rT = (instword >> 21) & 0x1f; |
| 649 | u8 rA = (instword >> 16) & 0x1f; | 649 | u8 rA = (instword >> 16) & 0x1f; |
| 650 | u8 NB_RB = (instword >> 11) & 0x1f; | 650 | u8 NB_RB = (instword >> 11) & 0x1f; |
| 651 | u32 num_bytes; | 651 | u32 num_bytes; |
| 652 | unsigned long EA; | 652 | unsigned long EA; |
| 653 | int pos = 0; | 653 | int pos = 0; |
| 654 | 654 | ||
| 655 | /* Early out if we are an invalid form of lswx */ | 655 | /* Early out if we are an invalid form of lswx */ |
| 656 | if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) | 656 | if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) |
| 657 | if ((rT == rA) || (rT == NB_RB)) | 657 | if ((rT == rA) || (rT == NB_RB)) |
| 658 | return -EINVAL; | 658 | return -EINVAL; |
| 659 | 659 | ||
| 660 | EA = (rA == 0) ? 0 : regs->gpr[rA]; | 660 | EA = (rA == 0) ? 0 : regs->gpr[rA]; |
| 661 | 661 | ||
| 662 | switch (instword & PPC_INST_STRING_MASK) { | 662 | switch (instword & PPC_INST_STRING_MASK) { |
| 663 | case PPC_INST_LSWX: | 663 | case PPC_INST_LSWX: |
| 664 | case PPC_INST_STSWX: | 664 | case PPC_INST_STSWX: |
| 665 | EA += NB_RB; | 665 | EA += NB_RB; |
| 666 | num_bytes = regs->xer & 0x7f; | 666 | num_bytes = regs->xer & 0x7f; |
| 667 | break; | 667 | break; |
| 668 | case PPC_INST_LSWI: | 668 | case PPC_INST_LSWI: |
| 669 | case PPC_INST_STSWI: | 669 | case PPC_INST_STSWI: |
| 670 | num_bytes = (NB_RB == 0) ? 32 : NB_RB; | 670 | num_bytes = (NB_RB == 0) ? 32 : NB_RB; |
| 671 | break; | 671 | break; |
| 672 | default: | 672 | default: |
| 673 | return -EINVAL; | 673 | return -EINVAL; |
| 674 | } | 674 | } |
| 675 | 675 | ||
| 676 | while (num_bytes != 0) | 676 | while (num_bytes != 0) |
| 677 | { | 677 | { |
| 678 | u8 val; | 678 | u8 val; |
| 679 | u32 shift = 8 * (3 - (pos & 0x3)); | 679 | u32 shift = 8 * (3 - (pos & 0x3)); |
| 680 | 680 | ||
| 681 | switch ((instword & PPC_INST_STRING_MASK)) { | 681 | switch ((instword & PPC_INST_STRING_MASK)) { |
| 682 | case PPC_INST_LSWX: | 682 | case PPC_INST_LSWX: |
| 683 | case PPC_INST_LSWI: | 683 | case PPC_INST_LSWI: |
| 684 | if (get_user(val, (u8 __user *)EA)) | 684 | if (get_user(val, (u8 __user *)EA)) |
| 685 | return -EFAULT; | 685 | return -EFAULT; |
| 686 | /* first time updating this reg, | 686 | /* first time updating this reg, |
| 687 | * zero it out */ | 687 | * zero it out */ |
| 688 | if (pos == 0) | 688 | if (pos == 0) |
| 689 | regs->gpr[rT] = 0; | 689 | regs->gpr[rT] = 0; |
| 690 | regs->gpr[rT] |= val << shift; | 690 | regs->gpr[rT] |= val << shift; |
| 691 | break; | 691 | break; |
| 692 | case PPC_INST_STSWI: | 692 | case PPC_INST_STSWI: |
| 693 | case PPC_INST_STSWX: | 693 | case PPC_INST_STSWX: |
| 694 | val = regs->gpr[rT] >> shift; | 694 | val = regs->gpr[rT] >> shift; |
| 695 | if (put_user(val, (u8 __user *)EA)) | 695 | if (put_user(val, (u8 __user *)EA)) |
| 696 | return -EFAULT; | 696 | return -EFAULT; |
| 697 | break; | 697 | break; |
| 698 | } | 698 | } |
| 699 | /* move EA to next address */ | 699 | /* move EA to next address */ |
| 700 | EA += 1; | 700 | EA += 1; |
| 701 | num_bytes--; | 701 | num_bytes--; |
| 702 | 702 | ||
| 703 | /* manage our position within the register */ | 703 | /* manage our position within the register */ |
| 704 | if (++pos == 4) { | 704 | if (++pos == 4) { |
| 705 | pos = 0; | 705 | pos = 0; |
| 706 | if (++rT == 32) | 706 | if (++rT == 32) |
| 707 | rT = 0; | 707 | rT = 0; |
| 708 | } | 708 | } |
| 709 | } | 709 | } |
| 710 | 710 | ||
| 711 | return 0; | 711 | return 0; |
| 712 | } | 712 | } |
| 713 | 713 | ||
| 714 | static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) | 714 | static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) |
| 715 | { | 715 | { |
| 716 | u32 ra,rs; | 716 | u32 ra,rs; |
| 717 | unsigned long tmp; | 717 | unsigned long tmp; |
| 718 | 718 | ||
| 719 | ra = (instword >> 16) & 0x1f; | 719 | ra = (instword >> 16) & 0x1f; |
| 720 | rs = (instword >> 21) & 0x1f; | 720 | rs = (instword >> 21) & 0x1f; |
| 721 | 721 | ||
| 722 | tmp = regs->gpr[rs]; | 722 | tmp = regs->gpr[rs]; |
| 723 | tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); | 723 | tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); |
| 724 | tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); | 724 | tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); |
| 725 | tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; | 725 | tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; |
| 726 | regs->gpr[ra] = tmp; | 726 | regs->gpr[ra] = tmp; |
| 727 | 727 | ||
| 728 | return 0; | 728 | return 0; |
| 729 | } | 729 | } |
| 730 | 730 | ||
| 731 | static int emulate_isel(struct pt_regs *regs, u32 instword) | 731 | static int emulate_isel(struct pt_regs *regs, u32 instword) |
| 732 | { | 732 | { |
| 733 | u8 rT = (instword >> 21) & 0x1f; | 733 | u8 rT = (instword >> 21) & 0x1f; |
| 734 | u8 rA = (instword >> 16) & 0x1f; | 734 | u8 rA = (instword >> 16) & 0x1f; |
| 735 | u8 rB = (instword >> 11) & 0x1f; | 735 | u8 rB = (instword >> 11) & 0x1f; |
| 736 | u8 BC = (instword >> 6) & 0x1f; | 736 | u8 BC = (instword >> 6) & 0x1f; |
| 737 | u8 bit; | 737 | u8 bit; |
| 738 | unsigned long tmp; | 738 | unsigned long tmp; |
| 739 | 739 | ||
| 740 | tmp = (rA == 0) ? 0 : regs->gpr[rA]; | 740 | tmp = (rA == 0) ? 0 : regs->gpr[rA]; |
| 741 | bit = (regs->ccr >> (31 - BC)) & 0x1; | 741 | bit = (regs->ccr >> (31 - BC)) & 0x1; |
| 742 | 742 | ||
| 743 | regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; | 743 | regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; |
| 744 | 744 | ||
| 745 | return 0; | 745 | return 0; |
| 746 | } | 746 | } |
| 747 | 747 | ||
| 748 | static int emulate_instruction(struct pt_regs *regs) | 748 | static int emulate_instruction(struct pt_regs *regs) |
| 749 | { | 749 | { |
| 750 | u32 instword; | 750 | u32 instword; |
| 751 | u32 rd; | 751 | u32 rd; |
| 752 | 752 | ||
| 753 | if (!user_mode(regs) || (regs->msr & MSR_LE)) | 753 | if (!user_mode(regs) || (regs->msr & MSR_LE)) |
| 754 | return -EINVAL; | 754 | return -EINVAL; |
| 755 | CHECK_FULL_REGS(regs); | 755 | CHECK_FULL_REGS(regs); |
| 756 | 756 | ||
| 757 | if (get_user(instword, (u32 __user *)(regs->nip))) | 757 | if (get_user(instword, (u32 __user *)(regs->nip))) |
| 758 | return -EFAULT; | 758 | return -EFAULT; |
| 759 | 759 | ||
| 760 | /* Emulate the mfspr rD, PVR. */ | 760 | /* Emulate the mfspr rD, PVR. */ |
| 761 | if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { | 761 | if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { |
| 762 | PPC_WARN_EMULATED(mfpvr); | 762 | PPC_WARN_EMULATED(mfpvr, regs); |
| 763 | rd = (instword >> 21) & 0x1f; | 763 | rd = (instword >> 21) & 0x1f; |
| 764 | regs->gpr[rd] = mfspr(SPRN_PVR); | 764 | regs->gpr[rd] = mfspr(SPRN_PVR); |
| 765 | return 0; | 765 | return 0; |
| 766 | } | 766 | } |
| 767 | 767 | ||
| 768 | /* Emulating the dcba insn is just a no-op. */ | 768 | /* Emulating the dcba insn is just a no-op. */ |
| 769 | if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { | 769 | if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { |
| 770 | PPC_WARN_EMULATED(dcba); | 770 | PPC_WARN_EMULATED(dcba, regs); |
| 771 | return 0; | 771 | return 0; |
| 772 | } | 772 | } |
| 773 | 773 | ||
| 774 | /* Emulate the mcrxr insn. */ | 774 | /* Emulate the mcrxr insn. */ |
| 775 | if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { | 775 | if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { |
| 776 | int shift = (instword >> 21) & 0x1c; | 776 | int shift = (instword >> 21) & 0x1c; |
| 777 | unsigned long msk = 0xf0000000UL >> shift; | 777 | unsigned long msk = 0xf0000000UL >> shift; |
| 778 | 778 | ||
| 779 | PPC_WARN_EMULATED(mcrxr); | 779 | PPC_WARN_EMULATED(mcrxr, regs); |
| 780 | regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); | 780 | regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); |
| 781 | regs->xer &= ~0xf0000000UL; | 781 | regs->xer &= ~0xf0000000UL; |
| 782 | return 0; | 782 | return 0; |
| 783 | } | 783 | } |
| 784 | 784 | ||
| 785 | /* Emulate load/store string insn. */ | 785 | /* Emulate load/store string insn. */ |
| 786 | if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { | 786 | if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { |
| 787 | PPC_WARN_EMULATED(string); | 787 | PPC_WARN_EMULATED(string, regs); |
| 788 | return emulate_string_inst(regs, instword); | 788 | return emulate_string_inst(regs, instword); |
| 789 | } | 789 | } |
| 790 | 790 | ||
| 791 | /* Emulate the popcntb (Population Count Bytes) instruction. */ | 791 | /* Emulate the popcntb (Population Count Bytes) instruction. */ |
| 792 | if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { | 792 | if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { |
| 793 | PPC_WARN_EMULATED(popcntb); | 793 | PPC_WARN_EMULATED(popcntb, regs); |
| 794 | return emulate_popcntb_inst(regs, instword); | 794 | return emulate_popcntb_inst(regs, instword); |
| 795 | } | 795 | } |
| 796 | 796 | ||
| 797 | /* Emulate isel (Integer Select) instruction */ | 797 | /* Emulate isel (Integer Select) instruction */ |
| 798 | if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { | 798 | if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { |
| 799 | PPC_WARN_EMULATED(isel); | 799 | PPC_WARN_EMULATED(isel, regs); |
| 800 | return emulate_isel(regs, instword); | 800 | return emulate_isel(regs, instword); |
| 801 | } | 801 | } |
| 802 | 802 | ||
| 803 | return -EINVAL; | 803 | return -EINVAL; |
| 804 | } | 804 | } |
| 805 | 805 | ||
| 806 | int is_valid_bugaddr(unsigned long addr) | 806 | int is_valid_bugaddr(unsigned long addr) |
| 807 | { | 807 | { |
| 808 | return is_kernel_addr(addr); | 808 | return is_kernel_addr(addr); |
| 809 | } | 809 | } |
| 810 | 810 | ||
| 811 | void __kprobes program_check_exception(struct pt_regs *regs) | 811 | void __kprobes program_check_exception(struct pt_regs *regs) |
| 812 | { | 812 | { |
| 813 | unsigned int reason = get_reason(regs); | 813 | unsigned int reason = get_reason(regs); |
| 814 | extern int do_mathemu(struct pt_regs *regs); | 814 | extern int do_mathemu(struct pt_regs *regs); |
| 815 | 815 | ||
| 816 | /* We can now get here via a FP Unavailable exception if the core | 816 | /* We can now get here via a FP Unavailable exception if the core |
| 817 | * has no FPU, in that case the reason flags will be 0 */ | 817 | * has no FPU, in that case the reason flags will be 0 */ |
| 818 | 818 | ||
| 819 | if (reason & REASON_FP) { | 819 | if (reason & REASON_FP) { |
| 820 | /* IEEE FP exception */ | 820 | /* IEEE FP exception */ |
| 821 | parse_fpe(regs); | 821 | parse_fpe(regs); |
| 822 | return; | 822 | return; |
| 823 | } | 823 | } |
| 824 | if (reason & REASON_TRAP) { | 824 | if (reason & REASON_TRAP) { |
| 825 | /* trap exception */ | 825 | /* trap exception */ |
| 826 | if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) | 826 | if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) |
| 827 | == NOTIFY_STOP) | 827 | == NOTIFY_STOP) |
| 828 | return; | 828 | return; |
| 829 | if (debugger_bpt(regs)) | 829 | if (debugger_bpt(regs)) |
| 830 | return; | 830 | return; |
| 831 | 831 | ||
| 832 | if (!(regs->msr & MSR_PR) && /* not user-mode */ | 832 | if (!(regs->msr & MSR_PR) && /* not user-mode */ |
| 833 | report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { | 833 | report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { |
| 834 | regs->nip += 4; | 834 | regs->nip += 4; |
| 835 | return; | 835 | return; |
| 836 | } | 836 | } |
| 837 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); | 837 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); |
| 838 | return; | 838 | return; |
| 839 | } | 839 | } |
| 840 | 840 | ||
| 841 | local_irq_enable(); | 841 | local_irq_enable(); |
| 842 | 842 | ||
| 843 | #ifdef CONFIG_MATH_EMULATION | 843 | #ifdef CONFIG_MATH_EMULATION |
| 844 | /* (reason & REASON_ILLEGAL) would be the obvious thing here, | 844 | /* (reason & REASON_ILLEGAL) would be the obvious thing here, |
| 845 | * but there seems to be a hardware bug on the 405GP (RevD) | 845 | * but there seems to be a hardware bug on the 405GP (RevD) |
| 846 | * that means ESR is sometimes set incorrectly - either to | 846 | * that means ESR is sometimes set incorrectly - either to |
| 847 | * ESR_DST (!?) or 0. In the process of chasing this with the | 847 | * ESR_DST (!?) or 0. In the process of chasing this with the |
| 848 | * hardware people - not sure if it can happen on any illegal | 848 | * hardware people - not sure if it can happen on any illegal |
| 849 | * instruction or only on FP instructions, whether there is a | 849 | * instruction or only on FP instructions, whether there is a |
| 850 | * pattern to occurences etc. -dgibson 31/Mar/2003 */ | 850 | * pattern to occurences etc. -dgibson 31/Mar/2003 */ |
| 851 | switch (do_mathemu(regs)) { | 851 | switch (do_mathemu(regs)) { |
| 852 | case 0: | 852 | case 0: |
| 853 | emulate_single_step(regs); | 853 | emulate_single_step(regs); |
| 854 | return; | 854 | return; |
| 855 | case 1: { | 855 | case 1: { |
| 856 | int code = 0; | 856 | int code = 0; |
| 857 | code = __parse_fpscr(current->thread.fpscr.val); | 857 | code = __parse_fpscr(current->thread.fpscr.val); |
| 858 | _exception(SIGFPE, regs, code, regs->nip); | 858 | _exception(SIGFPE, regs, code, regs->nip); |
| 859 | return; | 859 | return; |
| 860 | } | 860 | } |
| 861 | case -EFAULT: | 861 | case -EFAULT: |
| 862 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); | 862 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); |
| 863 | return; | 863 | return; |
| 864 | } | 864 | } |
| 865 | /* fall through on any other errors */ | 865 | /* fall through on any other errors */ |
| 866 | #endif /* CONFIG_MATH_EMULATION */ | 866 | #endif /* CONFIG_MATH_EMULATION */ |
| 867 | 867 | ||
| 868 | /* Try to emulate it if we should. */ | 868 | /* Try to emulate it if we should. */ |
| 869 | if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { | 869 | if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { |
| 870 | switch (emulate_instruction(regs)) { | 870 | switch (emulate_instruction(regs)) { |
| 871 | case 0: | 871 | case 0: |
| 872 | regs->nip += 4; | 872 | regs->nip += 4; |
| 873 | emulate_single_step(regs); | 873 | emulate_single_step(regs); |
| 874 | return; | 874 | return; |
| 875 | case -EFAULT: | 875 | case -EFAULT: |
| 876 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); | 876 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); |
| 877 | return; | 877 | return; |
| 878 | } | 878 | } |
| 879 | } | 879 | } |
| 880 | 880 | ||
| 881 | if (reason & REASON_PRIVILEGED) | 881 | if (reason & REASON_PRIVILEGED) |
| 882 | _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); | 882 | _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); |
| 883 | else | 883 | else |
| 884 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | 884 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); |
| 885 | } | 885 | } |
| 886 | 886 | ||
| 887 | void alignment_exception(struct pt_regs *regs) | 887 | void alignment_exception(struct pt_regs *regs) |
| 888 | { | 888 | { |
| 889 | int sig, code, fixed = 0; | 889 | int sig, code, fixed = 0; |
| 890 | 890 | ||
| 891 | /* we don't implement logging of alignment exceptions */ | 891 | /* we don't implement logging of alignment exceptions */ |
| 892 | if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) | 892 | if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) |
| 893 | fixed = fix_alignment(regs); | 893 | fixed = fix_alignment(regs); |
| 894 | 894 | ||
| 895 | if (fixed == 1) { | 895 | if (fixed == 1) { |
| 896 | regs->nip += 4; /* skip over emulated instruction */ | 896 | regs->nip += 4; /* skip over emulated instruction */ |
| 897 | emulate_single_step(regs); | 897 | emulate_single_step(regs); |
| 898 | return; | 898 | return; |
| 899 | } | 899 | } |
| 900 | 900 | ||
| 901 | /* Operand address was bad */ | 901 | /* Operand address was bad */ |
| 902 | if (fixed == -EFAULT) { | 902 | if (fixed == -EFAULT) { |
| 903 | sig = SIGSEGV; | 903 | sig = SIGSEGV; |
| 904 | code = SEGV_ACCERR; | 904 | code = SEGV_ACCERR; |
| 905 | } else { | 905 | } else { |
| 906 | sig = SIGBUS; | 906 | sig = SIGBUS; |
| 907 | code = BUS_ADRALN; | 907 | code = BUS_ADRALN; |
| 908 | } | 908 | } |
| 909 | if (user_mode(regs)) | 909 | if (user_mode(regs)) |
| 910 | _exception(sig, regs, code, regs->dar); | 910 | _exception(sig, regs, code, regs->dar); |
| 911 | else | 911 | else |
| 912 | bad_page_fault(regs, regs->dar, sig); | 912 | bad_page_fault(regs, regs->dar, sig); |
| 913 | } | 913 | } |
| 914 | 914 | ||
| 915 | void StackOverflow(struct pt_regs *regs) | 915 | void StackOverflow(struct pt_regs *regs) |
| 916 | { | 916 | { |
| 917 | printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", | 917 | printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", |
| 918 | current, regs->gpr[1]); | 918 | current, regs->gpr[1]); |
| 919 | debugger(regs); | 919 | debugger(regs); |
| 920 | show_regs(regs); | 920 | show_regs(regs); |
| 921 | panic("kernel stack overflow"); | 921 | panic("kernel stack overflow"); |
| 922 | } | 922 | } |
| 923 | 923 | ||
| 924 | void nonrecoverable_exception(struct pt_regs *regs) | 924 | void nonrecoverable_exception(struct pt_regs *regs) |
| 925 | { | 925 | { |
| 926 | printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", | 926 | printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", |
| 927 | regs->nip, regs->msr); | 927 | regs->nip, regs->msr); |
| 928 | debugger(regs); | 928 | debugger(regs); |
| 929 | die("nonrecoverable exception", regs, SIGKILL); | 929 | die("nonrecoverable exception", regs, SIGKILL); |
| 930 | } | 930 | } |
| 931 | 931 | ||
| 932 | void trace_syscall(struct pt_regs *regs) | 932 | void trace_syscall(struct pt_regs *regs) |
| 933 | { | 933 | { |
| 934 | printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", | 934 | printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", |
| 935 | current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0], | 935 | current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0], |
| 936 | regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); | 936 | regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); |
| 937 | } | 937 | } |
| 938 | 938 | ||
| 939 | void kernel_fp_unavailable_exception(struct pt_regs *regs) | 939 | void kernel_fp_unavailable_exception(struct pt_regs *regs) |
| 940 | { | 940 | { |
| 941 | printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " | 941 | printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " |
| 942 | "%lx at %lx\n", regs->trap, regs->nip); | 942 | "%lx at %lx\n", regs->trap, regs->nip); |
| 943 | die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); | 943 | die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); |
| 944 | } | 944 | } |
| 945 | 945 | ||
| 946 | void altivec_unavailable_exception(struct pt_regs *regs) | 946 | void altivec_unavailable_exception(struct pt_regs *regs) |
| 947 | { | 947 | { |
| 948 | if (user_mode(regs)) { | 948 | if (user_mode(regs)) { |
| 949 | /* A user program has executed an altivec instruction, | 949 | /* A user program has executed an altivec instruction, |
| 950 | but this kernel doesn't support altivec. */ | 950 | but this kernel doesn't support altivec. */ |
| 951 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | 951 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); |
| 952 | return; | 952 | return; |
| 953 | } | 953 | } |
| 954 | 954 | ||
| 955 | printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " | 955 | printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " |
| 956 | "%lx at %lx\n", regs->trap, regs->nip); | 956 | "%lx at %lx\n", regs->trap, regs->nip); |
| 957 | die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); | 957 | die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); |
| 958 | } | 958 | } |
| 959 | 959 | ||
| 960 | void vsx_unavailable_exception(struct pt_regs *regs) | 960 | void vsx_unavailable_exception(struct pt_regs *regs) |
| 961 | { | 961 | { |
| 962 | if (user_mode(regs)) { | 962 | if (user_mode(regs)) { |
| 963 | /* A user program has executed an vsx instruction, | 963 | /* A user program has executed an vsx instruction, |
| 964 | but this kernel doesn't support vsx. */ | 964 | but this kernel doesn't support vsx. */ |
| 965 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | 965 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); |
| 966 | return; | 966 | return; |
| 967 | } | 967 | } |
| 968 | 968 | ||
| 969 | printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " | 969 | printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " |
| 970 | "%lx at %lx\n", regs->trap, regs->nip); | 970 | "%lx at %lx\n", regs->trap, regs->nip); |
| 971 | die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); | 971 | die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); |
| 972 | } | 972 | } |
| 973 | 973 | ||
| 974 | void performance_monitor_exception(struct pt_regs *regs) | 974 | void performance_monitor_exception(struct pt_regs *regs) |
| 975 | { | 975 | { |
| 976 | perf_irq(regs); | 976 | perf_irq(regs); |
| 977 | } | 977 | } |
| 978 | 978 | ||
| 979 | #ifdef CONFIG_8xx | 979 | #ifdef CONFIG_8xx |
| 980 | void SoftwareEmulation(struct pt_regs *regs) | 980 | void SoftwareEmulation(struct pt_regs *regs) |
| 981 | { | 981 | { |
| 982 | extern int do_mathemu(struct pt_regs *); | 982 | extern int do_mathemu(struct pt_regs *); |
| 983 | extern int Soft_emulate_8xx(struct pt_regs *); | 983 | extern int Soft_emulate_8xx(struct pt_regs *); |
| 984 | #if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU) | 984 | #if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU) |
| 985 | int errcode; | 985 | int errcode; |
| 986 | #endif | 986 | #endif |
| 987 | 987 | ||
| 988 | CHECK_FULL_REGS(regs); | 988 | CHECK_FULL_REGS(regs); |
| 989 | 989 | ||
| 990 | if (!user_mode(regs)) { | 990 | if (!user_mode(regs)) { |
| 991 | debugger(regs); | 991 | debugger(regs); |
| 992 | die("Kernel Mode Software FPU Emulation", regs, SIGFPE); | 992 | die("Kernel Mode Software FPU Emulation", regs, SIGFPE); |
| 993 | } | 993 | } |
| 994 | 994 | ||
| 995 | #ifdef CONFIG_MATH_EMULATION | 995 | #ifdef CONFIG_MATH_EMULATION |
| 996 | errcode = do_mathemu(regs); | 996 | errcode = do_mathemu(regs); |
| 997 | if (errcode >= 0) | 997 | if (errcode >= 0) |
| 998 | PPC_WARN_EMULATED(math); | 998 | PPC_WARN_EMULATED(math, regs); |
| 999 | 999 | ||
| 1000 | switch (errcode) { | 1000 | switch (errcode) { |
| 1001 | case 0: | 1001 | case 0: |
| 1002 | emulate_single_step(regs); | 1002 | emulate_single_step(regs); |
| 1003 | return; | 1003 | return; |
| 1004 | case 1: { | 1004 | case 1: { |
| 1005 | int code = 0; | 1005 | int code = 0; |
| 1006 | code = __parse_fpscr(current->thread.fpscr.val); | 1006 | code = __parse_fpscr(current->thread.fpscr.val); |
| 1007 | _exception(SIGFPE, regs, code, regs->nip); | 1007 | _exception(SIGFPE, regs, code, regs->nip); |
| 1008 | return; | 1008 | return; |
| 1009 | } | 1009 | } |
| 1010 | case -EFAULT: | 1010 | case -EFAULT: |
| 1011 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); | 1011 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); |
| 1012 | return; | 1012 | return; |
| 1013 | default: | 1013 | default: |
| 1014 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | 1014 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); |
| 1015 | return; | 1015 | return; |
| 1016 | } | 1016 | } |
| 1017 | 1017 | ||
| 1018 | #elif defined(CONFIG_8XX_MINIMAL_FPEMU) | 1018 | #elif defined(CONFIG_8XX_MINIMAL_FPEMU) |
| 1019 | errcode = Soft_emulate_8xx(regs); | 1019 | errcode = Soft_emulate_8xx(regs); |
| 1020 | if (errcode >= 0) | 1020 | if (errcode >= 0) |
| 1021 | PPC_WARN_EMULATED(8xx); | 1021 | PPC_WARN_EMULATED(8xx, regs); |
| 1022 | 1022 | ||
| 1023 | switch (errcode) { | 1023 | switch (errcode) { |
| 1024 | case 0: | 1024 | case 0: |
| 1025 | emulate_single_step(regs); | 1025 | emulate_single_step(regs); |
| 1026 | return; | 1026 | return; |
| 1027 | case 1: | 1027 | case 1: |
| 1028 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | 1028 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); |
| 1029 | return; | 1029 | return; |
| 1030 | case -EFAULT: | 1030 | case -EFAULT: |
| 1031 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); | 1031 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); |
| 1032 | return; | 1032 | return; |
| 1033 | } | 1033 | } |
| 1034 | #else | 1034 | #else |
| 1035 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | 1035 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); |
| 1036 | #endif | 1036 | #endif |
| 1037 | } | 1037 | } |
| 1038 | #endif /* CONFIG_8xx */ | 1038 | #endif /* CONFIG_8xx */ |
| 1039 | 1039 | ||
| 1040 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | 1040 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) |
| 1041 | 1041 | ||
| 1042 | void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) | 1042 | void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) |
| 1043 | { | 1043 | { |
| 1044 | /* Hack alert: On BookE, Branch Taken stops on the branch itself, while | 1044 | /* Hack alert: On BookE, Branch Taken stops on the branch itself, while |
| 1045 | * on server, it stops on the target of the branch. In order to simulate | 1045 | * on server, it stops on the target of the branch. In order to simulate |
| 1046 | * the server behaviour, we thus restart right away with a single step | 1046 | * the server behaviour, we thus restart right away with a single step |
| 1047 | * instead of stopping here when hitting a BT | 1047 | * instead of stopping here when hitting a BT |
| 1048 | */ | 1048 | */ |
| 1049 | if (debug_status & DBSR_BT) { | 1049 | if (debug_status & DBSR_BT) { |
| 1050 | regs->msr &= ~MSR_DE; | 1050 | regs->msr &= ~MSR_DE; |
| 1051 | 1051 | ||
| 1052 | /* Disable BT */ | 1052 | /* Disable BT */ |
| 1053 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); | 1053 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); |
| 1054 | /* Clear the BT event */ | 1054 | /* Clear the BT event */ |
| 1055 | mtspr(SPRN_DBSR, DBSR_BT); | 1055 | mtspr(SPRN_DBSR, DBSR_BT); |
| 1056 | 1056 | ||
| 1057 | /* Do the single step trick only when coming from userspace */ | 1057 | /* Do the single step trick only when coming from userspace */ |
| 1058 | if (user_mode(regs)) { | 1058 | if (user_mode(regs)) { |
| 1059 | current->thread.dbcr0 &= ~DBCR0_BT; | 1059 | current->thread.dbcr0 &= ~DBCR0_BT; |
| 1060 | current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; | 1060 | current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; |
| 1061 | regs->msr |= MSR_DE; | 1061 | regs->msr |= MSR_DE; |
| 1062 | return; | 1062 | return; |
| 1063 | } | 1063 | } |
| 1064 | 1064 | ||
| 1065 | if (notify_die(DIE_SSTEP, "block_step", regs, 5, | 1065 | if (notify_die(DIE_SSTEP, "block_step", regs, 5, |
| 1066 | 5, SIGTRAP) == NOTIFY_STOP) { | 1066 | 5, SIGTRAP) == NOTIFY_STOP) { |
| 1067 | return; | 1067 | return; |
| 1068 | } | 1068 | } |
| 1069 | if (debugger_sstep(regs)) | 1069 | if (debugger_sstep(regs)) |
| 1070 | return; | 1070 | return; |
| 1071 | } else if (debug_status & DBSR_IC) { /* Instruction complete */ | 1071 | } else if (debug_status & DBSR_IC) { /* Instruction complete */ |
| 1072 | regs->msr &= ~MSR_DE; | 1072 | regs->msr &= ~MSR_DE; |
| 1073 | 1073 | ||
| 1074 | /* Disable instruction completion */ | 1074 | /* Disable instruction completion */ |
| 1075 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); | 1075 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); |
| 1076 | /* Clear the instruction completion event */ | 1076 | /* Clear the instruction completion event */ |
| 1077 | mtspr(SPRN_DBSR, DBSR_IC); | 1077 | mtspr(SPRN_DBSR, DBSR_IC); |
| 1078 | 1078 | ||
| 1079 | if (notify_die(DIE_SSTEP, "single_step", regs, 5, | 1079 | if (notify_die(DIE_SSTEP, "single_step", regs, 5, |
| 1080 | 5, SIGTRAP) == NOTIFY_STOP) { | 1080 | 5, SIGTRAP) == NOTIFY_STOP) { |
| 1081 | return; | 1081 | return; |
| 1082 | } | 1082 | } |
| 1083 | 1083 | ||
| 1084 | if (debugger_sstep(regs)) | 1084 | if (debugger_sstep(regs)) |
| 1085 | return; | 1085 | return; |
| 1086 | 1086 | ||
| 1087 | if (user_mode(regs)) | 1087 | if (user_mode(regs)) |
| 1088 | current->thread.dbcr0 &= ~(DBCR0_IC); | 1088 | current->thread.dbcr0 &= ~(DBCR0_IC); |
| 1089 | 1089 | ||
| 1090 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); | 1090 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); |
| 1091 | } else if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { | 1091 | } else if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { |
| 1092 | regs->msr &= ~MSR_DE; | 1092 | regs->msr &= ~MSR_DE; |
| 1093 | 1093 | ||
| 1094 | if (user_mode(regs)) { | 1094 | if (user_mode(regs)) { |
| 1095 | current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W | | 1095 | current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W | |
| 1096 | DBCR0_IDM); | 1096 | DBCR0_IDM); |
| 1097 | } else { | 1097 | } else { |
| 1098 | /* Disable DAC interupts */ | 1098 | /* Disable DAC interupts */ |
| 1099 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | | 1099 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | |
| 1100 | DBSR_DAC1W | DBCR0_IDM)); | 1100 | DBSR_DAC1W | DBCR0_IDM)); |
| 1101 | 1101 | ||
| 1102 | /* Clear the DAC event */ | 1102 | /* Clear the DAC event */ |
| 1103 | mtspr(SPRN_DBSR, (DBSR_DAC1R | DBSR_DAC1W)); | 1103 | mtspr(SPRN_DBSR, (DBSR_DAC1R | DBSR_DAC1W)); |
| 1104 | } | 1104 | } |
| 1105 | /* Setup and send the trap to the handler */ | 1105 | /* Setup and send the trap to the handler */ |
| 1106 | do_dabr(regs, mfspr(SPRN_DAC1), debug_status); | 1106 | do_dabr(regs, mfspr(SPRN_DAC1), debug_status); |
| 1107 | } | 1107 | } |
| 1108 | } | 1108 | } |
| 1109 | #endif /* CONFIG_4xx || CONFIG_BOOKE */ | 1109 | #endif /* CONFIG_4xx || CONFIG_BOOKE */ |
| 1110 | 1110 | ||
| 1111 | #if !defined(CONFIG_TAU_INT) | 1111 | #if !defined(CONFIG_TAU_INT) |
| 1112 | void TAUException(struct pt_regs *regs) | 1112 | void TAUException(struct pt_regs *regs) |
| 1113 | { | 1113 | { |
| 1114 | printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", | 1114 | printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", |
| 1115 | regs->nip, regs->msr, regs->trap, print_tainted()); | 1115 | regs->nip, regs->msr, regs->trap, print_tainted()); |
| 1116 | } | 1116 | } |
| 1117 | #endif /* CONFIG_INT_TAU */ | 1117 | #endif /* CONFIG_INT_TAU */ |
| 1118 | 1118 | ||
| 1119 | #ifdef CONFIG_ALTIVEC | 1119 | #ifdef CONFIG_ALTIVEC |
| 1120 | void altivec_assist_exception(struct pt_regs *regs) | 1120 | void altivec_assist_exception(struct pt_regs *regs) |
| 1121 | { | 1121 | { |
| 1122 | int err; | 1122 | int err; |
| 1123 | 1123 | ||
| 1124 | if (!user_mode(regs)) { | 1124 | if (!user_mode(regs)) { |
| 1125 | printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" | 1125 | printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" |
| 1126 | " at %lx\n", regs->nip); | 1126 | " at %lx\n", regs->nip); |
| 1127 | die("Kernel VMX/Altivec assist exception", regs, SIGILL); | 1127 | die("Kernel VMX/Altivec assist exception", regs, SIGILL); |
| 1128 | } | 1128 | } |
| 1129 | 1129 | ||
| 1130 | flush_altivec_to_thread(current); | 1130 | flush_altivec_to_thread(current); |
| 1131 | 1131 | ||
| 1132 | PPC_WARN_EMULATED(altivec); | 1132 | PPC_WARN_EMULATED(altivec, regs); |
| 1133 | err = emulate_altivec(regs); | 1133 | err = emulate_altivec(regs); |
| 1134 | if (err == 0) { | 1134 | if (err == 0) { |
| 1135 | regs->nip += 4; /* skip emulated instruction */ | 1135 | regs->nip += 4; /* skip emulated instruction */ |
| 1136 | emulate_single_step(regs); | 1136 | emulate_single_step(regs); |
| 1137 | return; | 1137 | return; |
| 1138 | } | 1138 | } |
| 1139 | 1139 | ||
| 1140 | if (err == -EFAULT) { | 1140 | if (err == -EFAULT) { |
| 1141 | /* got an error reading the instruction */ | 1141 | /* got an error reading the instruction */ |
| 1142 | _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); | 1142 | _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); |
| 1143 | } else { | 1143 | } else { |
| 1144 | /* didn't recognize the instruction */ | 1144 | /* didn't recognize the instruction */ |
| 1145 | /* XXX quick hack for now: set the non-Java bit in the VSCR */ | 1145 | /* XXX quick hack for now: set the non-Java bit in the VSCR */ |
| 1146 | if (printk_ratelimit()) | 1146 | if (printk_ratelimit()) |
| 1147 | printk(KERN_ERR "Unrecognized altivec instruction " | 1147 | printk(KERN_ERR "Unrecognized altivec instruction " |
| 1148 | "in %s at %lx\n", current->comm, regs->nip); | 1148 | "in %s at %lx\n", current->comm, regs->nip); |
| 1149 | current->thread.vscr.u[3] |= 0x10000; | 1149 | current->thread.vscr.u[3] |= 0x10000; |
| 1150 | } | 1150 | } |
| 1151 | } | 1151 | } |
| 1152 | #endif /* CONFIG_ALTIVEC */ | 1152 | #endif /* CONFIG_ALTIVEC */ |
| 1153 | 1153 | ||
| 1154 | #ifdef CONFIG_VSX | 1154 | #ifdef CONFIG_VSX |
| 1155 | void vsx_assist_exception(struct pt_regs *regs) | 1155 | void vsx_assist_exception(struct pt_regs *regs) |
| 1156 | { | 1156 | { |
| 1157 | if (!user_mode(regs)) { | 1157 | if (!user_mode(regs)) { |
| 1158 | printk(KERN_EMERG "VSX assist exception in kernel mode" | 1158 | printk(KERN_EMERG "VSX assist exception in kernel mode" |
| 1159 | " at %lx\n", regs->nip); | 1159 | " at %lx\n", regs->nip); |
| 1160 | die("Kernel VSX assist exception", regs, SIGILL); | 1160 | die("Kernel VSX assist exception", regs, SIGILL); |
| 1161 | } | 1161 | } |
| 1162 | 1162 | ||
| 1163 | flush_vsx_to_thread(current); | 1163 | flush_vsx_to_thread(current); |
| 1164 | printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip); | 1164 | printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip); |
| 1165 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | 1165 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); |
| 1166 | } | 1166 | } |
| 1167 | #endif /* CONFIG_VSX */ | 1167 | #endif /* CONFIG_VSX */ |
| 1168 | 1168 | ||
| 1169 | #ifdef CONFIG_FSL_BOOKE | 1169 | #ifdef CONFIG_FSL_BOOKE |
| 1170 | 1170 | ||
| 1171 | void doorbell_exception(struct pt_regs *regs) | 1171 | void doorbell_exception(struct pt_regs *regs) |
| 1172 | { | 1172 | { |
| 1173 | #ifdef CONFIG_SMP | 1173 | #ifdef CONFIG_SMP |
| 1174 | int cpu = smp_processor_id(); | 1174 | int cpu = smp_processor_id(); |
| 1175 | int msg; | 1175 | int msg; |
| 1176 | 1176 | ||
| 1177 | if (num_online_cpus() < 2) | 1177 | if (num_online_cpus() < 2) |
| 1178 | return; | 1178 | return; |
| 1179 | 1179 | ||
| 1180 | for (msg = 0; msg < 4; msg++) | 1180 | for (msg = 0; msg < 4; msg++) |
| 1181 | if (test_and_clear_bit(msg, &dbell_smp_message[cpu])) | 1181 | if (test_and_clear_bit(msg, &dbell_smp_message[cpu])) |
| 1182 | smp_message_recv(msg); | 1182 | smp_message_recv(msg); |
| 1183 | #else | 1183 | #else |
| 1184 | printk(KERN_WARNING "Received doorbell on non-smp system\n"); | 1184 | printk(KERN_WARNING "Received doorbell on non-smp system\n"); |
| 1185 | #endif | 1185 | #endif |
| 1186 | } | 1186 | } |
| 1187 | 1187 | ||
| 1188 | void CacheLockingException(struct pt_regs *regs, unsigned long address, | 1188 | void CacheLockingException(struct pt_regs *regs, unsigned long address, |
| 1189 | unsigned long error_code) | 1189 | unsigned long error_code) |
| 1190 | { | 1190 | { |
| 1191 | /* We treat cache locking instructions from the user | 1191 | /* We treat cache locking instructions from the user |
| 1192 | * as priv ops, in the future we could try to do | 1192 | * as priv ops, in the future we could try to do |
| 1193 | * something smarter | 1193 | * something smarter |
| 1194 | */ | 1194 | */ |
| 1195 | if (error_code & (ESR_DLK|ESR_ILK)) | 1195 | if (error_code & (ESR_DLK|ESR_ILK)) |
| 1196 | _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); | 1196 | _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); |
| 1197 | return; | 1197 | return; |
| 1198 | } | 1198 | } |
| 1199 | #endif /* CONFIG_FSL_BOOKE */ | 1199 | #endif /* CONFIG_FSL_BOOKE */ |
| 1200 | 1200 | ||
| 1201 | #ifdef CONFIG_SPE | 1201 | #ifdef CONFIG_SPE |
| 1202 | void SPEFloatingPointException(struct pt_regs *regs) | 1202 | void SPEFloatingPointException(struct pt_regs *regs) |
| 1203 | { | 1203 | { |
| 1204 | extern int do_spe_mathemu(struct pt_regs *regs); | 1204 | extern int do_spe_mathemu(struct pt_regs *regs); |
| 1205 | unsigned long spefscr; | 1205 | unsigned long spefscr; |
| 1206 | int fpexc_mode; | 1206 | int fpexc_mode; |
| 1207 | int code = 0; | 1207 | int code = 0; |
| 1208 | int err; | 1208 | int err; |
| 1209 | 1209 | ||
| 1210 | preempt_disable(); | 1210 | preempt_disable(); |
| 1211 | if (regs->msr & MSR_SPE) | 1211 | if (regs->msr & MSR_SPE) |
| 1212 | giveup_spe(current); | 1212 | giveup_spe(current); |
| 1213 | preempt_enable(); | 1213 | preempt_enable(); |
| 1214 | 1214 | ||
| 1215 | spefscr = current->thread.spefscr; | 1215 | spefscr = current->thread.spefscr; |
| 1216 | fpexc_mode = current->thread.fpexc_mode; | 1216 | fpexc_mode = current->thread.fpexc_mode; |
| 1217 | 1217 | ||
| 1218 | if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { | 1218 | if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { |
| 1219 | code = FPE_FLTOVF; | 1219 | code = FPE_FLTOVF; |
| 1220 | } | 1220 | } |
| 1221 | else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { | 1221 | else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { |
| 1222 | code = FPE_FLTUND; | 1222 | code = FPE_FLTUND; |
| 1223 | } | 1223 | } |
| 1224 | else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) | 1224 | else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) |
| 1225 | code = FPE_FLTDIV; | 1225 | code = FPE_FLTDIV; |
| 1226 | else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { | 1226 | else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { |
| 1227 | code = FPE_FLTINV; | 1227 | code = FPE_FLTINV; |
| 1228 | } | 1228 | } |
| 1229 | else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) | 1229 | else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) |
| 1230 | code = FPE_FLTRES; | 1230 | code = FPE_FLTRES; |
| 1231 | 1231 | ||
| 1232 | err = do_spe_mathemu(regs); | 1232 | err = do_spe_mathemu(regs); |
| 1233 | if (err == 0) { | 1233 | if (err == 0) { |
| 1234 | regs->nip += 4; /* skip emulated instruction */ | 1234 | regs->nip += 4; /* skip emulated instruction */ |
| 1235 | emulate_single_step(regs); | 1235 | emulate_single_step(regs); |
| 1236 | return; | 1236 | return; |
| 1237 | } | 1237 | } |
| 1238 | 1238 | ||
| 1239 | if (err == -EFAULT) { | 1239 | if (err == -EFAULT) { |
| 1240 | /* got an error reading the instruction */ | 1240 | /* got an error reading the instruction */ |
| 1241 | _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); | 1241 | _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); |
| 1242 | } else if (err == -EINVAL) { | 1242 | } else if (err == -EINVAL) { |
| 1243 | /* didn't recognize the instruction */ | 1243 | /* didn't recognize the instruction */ |
| 1244 | printk(KERN_ERR "unrecognized spe instruction " | 1244 | printk(KERN_ERR "unrecognized spe instruction " |
| 1245 | "in %s at %lx\n", current->comm, regs->nip); | 1245 | "in %s at %lx\n", current->comm, regs->nip); |
| 1246 | } else { | 1246 | } else { |
| 1247 | _exception(SIGFPE, regs, code, regs->nip); | 1247 | _exception(SIGFPE, regs, code, regs->nip); |
| 1248 | } | 1248 | } |
| 1249 | 1249 | ||
| 1250 | return; | 1250 | return; |
| 1251 | } | 1251 | } |
| 1252 | 1252 | ||
| 1253 | void SPEFloatingPointRoundException(struct pt_regs *regs) | 1253 | void SPEFloatingPointRoundException(struct pt_regs *regs) |
| 1254 | { | 1254 | { |
| 1255 | extern int speround_handler(struct pt_regs *regs); | 1255 | extern int speround_handler(struct pt_regs *regs); |
| 1256 | int err; | 1256 | int err; |
| 1257 | 1257 | ||
| 1258 | preempt_disable(); | 1258 | preempt_disable(); |
| 1259 | if (regs->msr & MSR_SPE) | 1259 | if (regs->msr & MSR_SPE) |
| 1260 | giveup_spe(current); | 1260 | giveup_spe(current); |
| 1261 | preempt_enable(); | 1261 | preempt_enable(); |
| 1262 | 1262 | ||
| 1263 | regs->nip -= 4; | 1263 | regs->nip -= 4; |
| 1264 | err = speround_handler(regs); | 1264 | err = speround_handler(regs); |
| 1265 | if (err == 0) { | 1265 | if (err == 0) { |
| 1266 | regs->nip += 4; /* skip emulated instruction */ | 1266 | regs->nip += 4; /* skip emulated instruction */ |
| 1267 | emulate_single_step(regs); | 1267 | emulate_single_step(regs); |
| 1268 | return; | 1268 | return; |
| 1269 | } | 1269 | } |
| 1270 | 1270 | ||
| 1271 | if (err == -EFAULT) { | 1271 | if (err == -EFAULT) { |
| 1272 | /* got an error reading the instruction */ | 1272 | /* got an error reading the instruction */ |
| 1273 | _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); | 1273 | _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); |
| 1274 | } else if (err == -EINVAL) { | 1274 | } else if (err == -EINVAL) { |
| 1275 | /* didn't recognize the instruction */ | 1275 | /* didn't recognize the instruction */ |
| 1276 | printk(KERN_ERR "unrecognized spe instruction " | 1276 | printk(KERN_ERR "unrecognized spe instruction " |
| 1277 | "in %s at %lx\n", current->comm, regs->nip); | 1277 | "in %s at %lx\n", current->comm, regs->nip); |
| 1278 | } else { | 1278 | } else { |
| 1279 | _exception(SIGFPE, regs, 0, regs->nip); | 1279 | _exception(SIGFPE, regs, 0, regs->nip); |
| 1280 | return; | 1280 | return; |
| 1281 | } | 1281 | } |
| 1282 | } | 1282 | } |
| 1283 | #endif | 1283 | #endif |
| 1284 | 1284 | ||
| 1285 | /* | 1285 | /* |
| 1286 | * We enter here if we get an unrecoverable exception, that is, one | 1286 | * We enter here if we get an unrecoverable exception, that is, one |
| 1287 | * that happened at a point where the RI (recoverable interrupt) bit | 1287 | * that happened at a point where the RI (recoverable interrupt) bit |
| 1288 | * in the MSR is 0. This indicates that SRR0/1 are live, and that | 1288 | * in the MSR is 0. This indicates that SRR0/1 are live, and that |
| 1289 | * we therefore lost state by taking this exception. | 1289 | * we therefore lost state by taking this exception. |
| 1290 | */ | 1290 | */ |
| 1291 | void unrecoverable_exception(struct pt_regs *regs) | 1291 | void unrecoverable_exception(struct pt_regs *regs) |
| 1292 | { | 1292 | { |
| 1293 | printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", | 1293 | printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", |
| 1294 | regs->trap, regs->nip); | 1294 | regs->trap, regs->nip); |
| 1295 | die("Unrecoverable exception", regs, SIGABRT); | 1295 | die("Unrecoverable exception", regs, SIGABRT); |
| 1296 | } | 1296 | } |
| 1297 | 1297 | ||
| 1298 | #ifdef CONFIG_BOOKE_WDT | 1298 | #ifdef CONFIG_BOOKE_WDT |
| 1299 | /* | 1299 | /* |
| 1300 | * Default handler for a Watchdog exception, | 1300 | * Default handler for a Watchdog exception, |
| 1301 | * spins until a reboot occurs | 1301 | * spins until a reboot occurs |
| 1302 | */ | 1302 | */ |
| 1303 | void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) | 1303 | void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) |
| 1304 | { | 1304 | { |
| 1305 | /* Generic WatchdogHandler, implement your own */ | 1305 | /* Generic WatchdogHandler, implement your own */ |
| 1306 | mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); | 1306 | mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); |
| 1307 | return; | 1307 | return; |
| 1308 | } | 1308 | } |
| 1309 | 1309 | ||
| 1310 | void WatchdogException(struct pt_regs *regs) | 1310 | void WatchdogException(struct pt_regs *regs) |
| 1311 | { | 1311 | { |
| 1312 | printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); | 1312 | printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); |
| 1313 | WatchdogHandler(regs); | 1313 | WatchdogHandler(regs); |
| 1314 | } | 1314 | } |
| 1315 | #endif | 1315 | #endif |
| 1316 | 1316 | ||
| 1317 | /* | 1317 | /* |
| 1318 | * We enter here if we discover during exception entry that we are | 1318 | * We enter here if we discover during exception entry that we are |
| 1319 | * running in supervisor mode with a userspace value in the stack pointer. | 1319 | * running in supervisor mode with a userspace value in the stack pointer. |
| 1320 | */ | 1320 | */ |
| 1321 | void kernel_bad_stack(struct pt_regs *regs) | 1321 | void kernel_bad_stack(struct pt_regs *regs) |
| 1322 | { | 1322 | { |
| 1323 | printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", | 1323 | printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", |
| 1324 | regs->gpr[1], regs->nip); | 1324 | regs->gpr[1], regs->nip); |
| 1325 | die("Bad kernel stack pointer", regs, SIGABRT); | 1325 | die("Bad kernel stack pointer", regs, SIGABRT); |
| 1326 | } | 1326 | } |
| 1327 | 1327 | ||
| 1328 | void __init trap_init(void) | 1328 | void __init trap_init(void) |
| 1329 | { | 1329 | { |
| 1330 | } | 1330 | } |
| 1331 | 1331 | ||
| 1332 | 1332 | ||
| 1333 | #ifdef CONFIG_PPC_EMULATED_STATS | 1333 | #ifdef CONFIG_PPC_EMULATED_STATS |
| 1334 | 1334 | ||
| 1335 | #define WARN_EMULATED_SETUP(type) .type = { .name = #type } | 1335 | #define WARN_EMULATED_SETUP(type) .type = { .name = #type } |
| 1336 | 1336 | ||
| 1337 | struct ppc_emulated ppc_emulated = { | 1337 | struct ppc_emulated ppc_emulated = { |
| 1338 | #ifdef CONFIG_ALTIVEC | 1338 | #ifdef CONFIG_ALTIVEC |
| 1339 | WARN_EMULATED_SETUP(altivec), | 1339 | WARN_EMULATED_SETUP(altivec), |
| 1340 | #endif | 1340 | #endif |
| 1341 | WARN_EMULATED_SETUP(dcba), | 1341 | WARN_EMULATED_SETUP(dcba), |
| 1342 | WARN_EMULATED_SETUP(dcbz), | 1342 | WARN_EMULATED_SETUP(dcbz), |
| 1343 | WARN_EMULATED_SETUP(fp_pair), | 1343 | WARN_EMULATED_SETUP(fp_pair), |
| 1344 | WARN_EMULATED_SETUP(isel), | 1344 | WARN_EMULATED_SETUP(isel), |
| 1345 | WARN_EMULATED_SETUP(mcrxr), | 1345 | WARN_EMULATED_SETUP(mcrxr), |
| 1346 | WARN_EMULATED_SETUP(mfpvr), | 1346 | WARN_EMULATED_SETUP(mfpvr), |
| 1347 | WARN_EMULATED_SETUP(multiple), | 1347 | WARN_EMULATED_SETUP(multiple), |
| 1348 | WARN_EMULATED_SETUP(popcntb), | 1348 | WARN_EMULATED_SETUP(popcntb), |
| 1349 | WARN_EMULATED_SETUP(spe), | 1349 | WARN_EMULATED_SETUP(spe), |
| 1350 | WARN_EMULATED_SETUP(string), | 1350 | WARN_EMULATED_SETUP(string), |
| 1351 | WARN_EMULATED_SETUP(unaligned), | 1351 | WARN_EMULATED_SETUP(unaligned), |
| 1352 | #ifdef CONFIG_MATH_EMULATION | 1352 | #ifdef CONFIG_MATH_EMULATION |
| 1353 | WARN_EMULATED_SETUP(math), | 1353 | WARN_EMULATED_SETUP(math), |
| 1354 | #elif defined(CONFIG_8XX_MINIMAL_FPEMU) | 1354 | #elif defined(CONFIG_8XX_MINIMAL_FPEMU) |
| 1355 | WARN_EMULATED_SETUP(8xx), | 1355 | WARN_EMULATED_SETUP(8xx), |
| 1356 | #endif | 1356 | #endif |
| 1357 | #ifdef CONFIG_VSX | 1357 | #ifdef CONFIG_VSX |
| 1358 | WARN_EMULATED_SETUP(vsx), | 1358 | WARN_EMULATED_SETUP(vsx), |
| 1359 | #endif | 1359 | #endif |
| 1360 | }; | 1360 | }; |
| 1361 | 1361 | ||
| 1362 | u32 ppc_warn_emulated; | 1362 | u32 ppc_warn_emulated; |
| 1363 | 1363 | ||
| 1364 | void ppc_warn_emulated_print(const char *type) | 1364 | void ppc_warn_emulated_print(const char *type) |
| 1365 | { | 1365 | { |
| 1366 | if (printk_ratelimit()) | 1366 | if (printk_ratelimit()) |
| 1367 | pr_warning("%s used emulated %s instruction\n", current->comm, | 1367 | pr_warning("%s used emulated %s instruction\n", current->comm, |
| 1368 | type); | 1368 | type); |
| 1369 | } | 1369 | } |
| 1370 | 1370 | ||
| 1371 | static int __init ppc_warn_emulated_init(void) | 1371 | static int __init ppc_warn_emulated_init(void) |
| 1372 | { | 1372 | { |
| 1373 | struct dentry *dir, *d; | 1373 | struct dentry *dir, *d; |
| 1374 | unsigned int i; | 1374 | unsigned int i; |
| 1375 | struct ppc_emulated_entry *entries = (void *)&ppc_emulated; | 1375 | struct ppc_emulated_entry *entries = (void *)&ppc_emulated; |
| 1376 | 1376 | ||
| 1377 | if (!powerpc_debugfs_root) | 1377 | if (!powerpc_debugfs_root) |
| 1378 | return -ENODEV; | 1378 | return -ENODEV; |
| 1379 | 1379 | ||
| 1380 | dir = debugfs_create_dir("emulated_instructions", | 1380 | dir = debugfs_create_dir("emulated_instructions", |
| 1381 | powerpc_debugfs_root); | 1381 | powerpc_debugfs_root); |
| 1382 | if (!dir) | 1382 | if (!dir) |
| 1383 | return -ENOMEM; | 1383 | return -ENOMEM; |
| 1384 | 1384 | ||
| 1385 | d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, | 1385 | d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, |
| 1386 | &ppc_warn_emulated); | 1386 | &ppc_warn_emulated); |
| 1387 | if (!d) | 1387 | if (!d) |
| 1388 | goto fail; | 1388 | goto fail; |
| 1389 | 1389 | ||
| 1390 | for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { | 1390 | for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { |
| 1391 | d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, | 1391 | d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, |
| 1392 | (u32 *)&entries[i].val.counter); | 1392 | (u32 *)&entries[i].val.counter); |
| 1393 | if (!d) | 1393 | if (!d) |
| 1394 | goto fail; | 1394 | goto fail; |
| 1395 | } | 1395 | } |
| 1396 | 1396 | ||
| 1397 | return 0; | 1397 | return 0; |
| 1398 | 1398 | ||
| 1399 | fail: | 1399 | fail: |
| 1400 | debugfs_remove_recursive(dir); | 1400 | debugfs_remove_recursive(dir); |
| 1401 | return -ENOMEM; | 1401 | return -ENOMEM; |
| 1402 | } | 1402 | } |
| 1403 | 1403 | ||
| 1404 | device_initcall(ppc_warn_emulated_init); | 1404 | device_initcall(ppc_warn_emulated_init); |
| 1405 | 1405 | ||
| 1406 | #endif /* CONFIG_PPC_EMULATED_STATS */ | 1406 | #endif /* CONFIG_PPC_EMULATED_STATS */ |
| 1407 | 1407 |
arch/powerpc/lib/copypage_64.S
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 2008 Mark Nelson, IBM Corp. | 2 | * Copyright (C) 2008 Mark Nelson, IBM Corp. |
| 3 | * | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License | 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation; either version | 6 | * as published by the Free Software Foundation; either version |
| 7 | * 2 of the License, or (at your option) any later version. | 7 | * 2 of the License, or (at your option) any later version. |
| 8 | */ | 8 | */ |
| 9 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
| 10 | #include <asm/ppc_asm.h> | 10 | #include <asm/ppc_asm.h> |
| 11 | #include <asm/asm-offsets.h> | 11 | #include <asm/asm-offsets.h> |
| 12 | 12 | ||
| 13 | .section ".toc","aw" | 13 | .section ".toc","aw" |
| 14 | PPC64_CACHES: | 14 | PPC64_CACHES: |
| 15 | .tc ppc64_caches[TC],ppc64_caches | 15 | .tc ppc64_caches[TC],ppc64_caches |
| 16 | .section ".text" | 16 | .section ".text" |
| 17 | 17 | ||
| 18 | 18 | ||
| 19 | _GLOBAL(copy_4K_page) | 19 | _GLOBAL(copy_4K_page) |
| 20 | li r5,4096 /* 4K page size */ | 20 | li r5,4096 /* 4K page size */ |
| 21 | BEGIN_FTR_SECTION | 21 | BEGIN_FTR_SECTION |
| 22 | ld r10,PPC64_CACHES@toc(r2) | 22 | ld r10,PPC64_CACHES@toc(r2) |
| 23 | lwz r11,DCACHEL1LOGLINESIZE(r10) /* log2 of cache line size */ | 23 | lwz r11,DCACHEL1LOGLINESIZE(r10) /* log2 of cache line size */ |
| 24 | lwz r12,DCACHEL1LINESIZE(r10) /* get cache line size */ | 24 | lwz r12,DCACHEL1LINESIZE(r10) /* get cache line size */ |
| 25 | li r9,0 | 25 | li r9,0 |
| 26 | srd r8,r5,r11 | 26 | srd r8,r5,r11 |
| 27 | 27 | ||
| 28 | mtctr r8 | 28 | mtctr r8 |
| 29 | setup: | 29 | .Lsetup: |
| 30 | dcbt r9,r4 | 30 | dcbt r9,r4 |
| 31 | dcbz r9,r3 | 31 | dcbz r9,r3 |
| 32 | add r9,r9,r12 | 32 | add r9,r9,r12 |
| 33 | bdnz setup | 33 | bdnz .Lsetup |
| 34 | END_FTR_SECTION_IFSET(CPU_FTR_CP_USE_DCBTZ) | 34 | END_FTR_SECTION_IFSET(CPU_FTR_CP_USE_DCBTZ) |
| 35 | addi r3,r3,-8 | 35 | addi r3,r3,-8 |
| 36 | srdi r8,r5,7 /* page is copied in 128 byte strides */ | 36 | srdi r8,r5,7 /* page is copied in 128 byte strides */ |
| 37 | addi r8,r8,-1 /* one stride copied outside loop */ | 37 | addi r8,r8,-1 /* one stride copied outside loop */ |
| 38 | 38 | ||
| 39 | mtctr r8 | 39 | mtctr r8 |
| 40 | 40 | ||
| 41 | ld r5,0(r4) | 41 | ld r5,0(r4) |
| 42 | ld r6,8(r4) | 42 | ld r6,8(r4) |
| 43 | ld r7,16(r4) | 43 | ld r7,16(r4) |
| 44 | ldu r8,24(r4) | 44 | ldu r8,24(r4) |
| 45 | 1: std r5,8(r3) | 45 | 1: std r5,8(r3) |
| 46 | ld r9,8(r4) | 46 | ld r9,8(r4) |
| 47 | std r6,16(r3) | 47 | std r6,16(r3) |
| 48 | ld r10,16(r4) | 48 | ld r10,16(r4) |
| 49 | std r7,24(r3) | 49 | std r7,24(r3) |
| 50 | ld r11,24(r4) | 50 | ld r11,24(r4) |
| 51 | std r8,32(r3) | 51 | std r8,32(r3) |
| 52 | ld r12,32(r4) | 52 | ld r12,32(r4) |
| 53 | std r9,40(r3) | 53 | std r9,40(r3) |
| 54 | ld r5,40(r4) | 54 | ld r5,40(r4) |
| 55 | std r10,48(r3) | 55 | std r10,48(r3) |
| 56 | ld r6,48(r4) | 56 | ld r6,48(r4) |
| 57 | std r11,56(r3) | 57 | std r11,56(r3) |
| 58 | ld r7,56(r4) | 58 | ld r7,56(r4) |
| 59 | std r12,64(r3) | 59 | std r12,64(r3) |
| 60 | ld r8,64(r4) | 60 | ld r8,64(r4) |
| 61 | std r5,72(r3) | 61 | std r5,72(r3) |
| 62 | ld r9,72(r4) | 62 | ld r9,72(r4) |
| 63 | std r6,80(r3) | 63 | std r6,80(r3) |
| 64 | ld r10,80(r4) | 64 | ld r10,80(r4) |
| 65 | std r7,88(r3) | 65 | std r7,88(r3) |
| 66 | ld r11,88(r4) | 66 | ld r11,88(r4) |
| 67 | std r8,96(r3) | 67 | std r8,96(r3) |
| 68 | ld r12,96(r4) | 68 | ld r12,96(r4) |
| 69 | std r9,104(r3) | 69 | std r9,104(r3) |
| 70 | ld r5,104(r4) | 70 | ld r5,104(r4) |
| 71 | std r10,112(r3) | 71 | std r10,112(r3) |
| 72 | ld r6,112(r4) | 72 | ld r6,112(r4) |
| 73 | std r11,120(r3) | 73 | std r11,120(r3) |
| 74 | ld r7,120(r4) | 74 | ld r7,120(r4) |
| 75 | stdu r12,128(r3) | 75 | stdu r12,128(r3) |
| 76 | ldu r8,128(r4) | 76 | ldu r8,128(r4) |
| 77 | bdnz 1b | 77 | bdnz 1b |
| 78 | 78 | ||
| 79 | std r5,8(r3) | 79 | std r5,8(r3) |
| 80 | ld r9,8(r4) | 80 | ld r9,8(r4) |
| 81 | std r6,16(r3) | 81 | std r6,16(r3) |
| 82 | ld r10,16(r4) | 82 | ld r10,16(r4) |
| 83 | std r7,24(r3) | 83 | std r7,24(r3) |
| 84 | ld r11,24(r4) | 84 | ld r11,24(r4) |
| 85 | std r8,32(r3) | 85 | std r8,32(r3) |
| 86 | ld r12,32(r4) | 86 | ld r12,32(r4) |
| 87 | std r9,40(r3) | 87 | std r9,40(r3) |
| 88 | ld r5,40(r4) | 88 | ld r5,40(r4) |
| 89 | std r10,48(r3) | 89 | std r10,48(r3) |
| 90 | ld r6,48(r4) | 90 | ld r6,48(r4) |
| 91 | std r11,56(r3) | 91 | std r11,56(r3) |
| 92 | ld r7,56(r4) | 92 | ld r7,56(r4) |
| 93 | std r12,64(r3) | 93 | std r12,64(r3) |
| 94 | ld r8,64(r4) | 94 | ld r8,64(r4) |
| 95 | std r5,72(r3) | 95 | std r5,72(r3) |
| 96 | ld r9,72(r4) | 96 | ld r9,72(r4) |
| 97 | std r6,80(r3) | 97 | std r6,80(r3) |
| 98 | ld r10,80(r4) | 98 | ld r10,80(r4) |
| 99 | std r7,88(r3) | 99 | std r7,88(r3) |
| 100 | ld r11,88(r4) | 100 | ld r11,88(r4) |
| 101 | std r8,96(r3) | 101 | std r8,96(r3) |
| 102 | ld r12,96(r4) | 102 | ld r12,96(r4) |
| 103 | std r9,104(r3) | 103 | std r9,104(r3) |
| 104 | std r10,112(r3) | 104 | std r10,112(r3) |
| 105 | std r11,120(r3) | 105 | std r11,120(r3) |
| 106 | std r12,128(r3) | 106 | std r12,128(r3) |
| 107 | blr | 107 | blr |
| 108 | 108 |
arch/powerpc/platforms/pseries/hvCall.S
| 1 | /* | 1 | /* |
| 2 | * This file contains the generic code to perform a call to the | 2 | * This file contains the generic code to perform a call to the |
| 3 | * pSeries LPAR hypervisor. | 3 | * pSeries LPAR hypervisor. |
| 4 | * | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License | 6 | * modify it under the terms of the GNU General Public License |
| 7 | * as published by the Free Software Foundation; either version | 7 | * as published by the Free Software Foundation; either version |
| 8 | * 2 of the License, or (at your option) any later version. | 8 | * 2 of the License, or (at your option) any later version. |
| 9 | */ | 9 | */ |
| 10 | #include <asm/hvcall.h> | 10 | #include <asm/hvcall.h> |
| 11 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
| 12 | #include <asm/ppc_asm.h> | 12 | #include <asm/ppc_asm.h> |
| 13 | #include <asm/asm-offsets.h> | 13 | #include <asm/asm-offsets.h> |
| 14 | 14 | ||
| 15 | #define STK_PARM(i) (48 + ((i)-3)*8) | 15 | #define STK_PARM(i) (48 + ((i)-3)*8) |
| 16 | 16 | ||
| 17 | #ifdef CONFIG_HCALL_STATS | 17 | #ifdef CONFIG_TRACEPOINTS |
| 18 | |||
| 19 | .section ".toc","aw" | ||
| 20 | |||
| 21 | .globl hcall_tracepoint_refcount | ||
| 22 | hcall_tracepoint_refcount: | ||
| 23 | .llong 0 | ||
| 24 | |||
| 25 | .section ".text" | ||
| 26 | |||
| 18 | /* | 27 | /* |
| 19 | * precall must preserve all registers. use unused STK_PARM() | 28 | * precall must preserve all registers. use unused STK_PARM() |
| 20 | * areas to save snapshots and opcode. | 29 | * areas to save snapshots and opcode. We branch around this |
| 30 | * in early init (eg when populating the MMU hashtable) by using an | ||
| 31 | * unconditional cpu feature. | ||
| 21 | */ | 32 | */ |
| 22 | #define HCALL_INST_PRECALL \ | 33 | #define HCALL_INST_PRECALL(FIRST_REG) \ |
| 23 | std r3,STK_PARM(r3)(r1); /* save opcode */ \ | ||
| 24 | mftb r0; /* get timebase and */ \ | ||
| 25 | std r0,STK_PARM(r5)(r1); /* save for later */ \ | ||
| 26 | BEGIN_FTR_SECTION; \ | 34 | BEGIN_FTR_SECTION; \ |
| 27 | mfspr r0,SPRN_PURR; /* get PURR and */ \ | 35 | b 1f; \ |
| 28 | std r0,STK_PARM(r6)(r1); /* save for later */ \ | 36 | END_FTR_SECTION(0, 1); \ |
| 29 | END_FTR_SECTION_IFSET(CPU_FTR_PURR); | 37 | ld r12,hcall_tracepoint_refcount@toc(r2); \ |
| 30 | 38 | cmpdi r12,0; \ | |
| 39 | beq+ 1f; \ | ||
| 40 | mflr r0; \ | ||
| 41 | std r3,STK_PARM(r3)(r1); \ | ||
| 42 | std r4,STK_PARM(r4)(r1); \ | ||
| 43 | std r5,STK_PARM(r5)(r1); \ | ||
| 44 | std r6,STK_PARM(r6)(r1); \ | ||
| 45 | std r7,STK_PARM(r7)(r1); \ | ||
| 46 | std r8,STK_PARM(r8)(r1); \ | ||
| 47 | std r9,STK_PARM(r9)(r1); \ | ||
| 48 | std r10,STK_PARM(r10)(r1); \ | ||
| 49 | std r0,16(r1); \ | ||
| 50 | addi r4,r1,STK_PARM(FIRST_REG); \ | ||
| 51 | stdu r1,-STACK_FRAME_OVERHEAD(r1); \ | ||
| 52 | bl .__trace_hcall_entry; \ | ||
| 53 | addi r1,r1,STACK_FRAME_OVERHEAD; \ | ||
| 54 | ld r0,16(r1); \ | ||
| 55 | ld r3,STK_PARM(r3)(r1); \ | ||
| 56 | ld r4,STK_PARM(r4)(r1); \ | ||
| 57 | ld r5,STK_PARM(r5)(r1); \ | ||
| 58 | ld r6,STK_PARM(r6)(r1); \ | ||
| 59 | ld r7,STK_PARM(r7)(r1); \ | ||
| 60 | ld r8,STK_PARM(r8)(r1); \ | ||
| 61 | ld r9,STK_PARM(r9)(r1); \ | ||
| 62 | ld r10,STK_PARM(r10)(r1); \ | ||
| 63 | mtlr r0; \ | ||
| 64 | 1: | ||
| 65 | |||
| 31 | /* | 66 | /* |
| 32 | * postcall is performed immediately before function return which | 67 | * postcall is performed immediately before function return which |
| 33 | * allows liberal use of volatile registers. We branch around this | 68 | * allows liberal use of volatile registers. We branch around this |
| 34 | * in early init (eg when populating the MMU hashtable) by using an | 69 | * in early init (eg when populating the MMU hashtable) by using an |
| 35 | * unconditional cpu feature. | 70 | * unconditional cpu feature. |
| 36 | */ | 71 | */ |
| 37 | #define HCALL_INST_POSTCALL \ | 72 | #define __HCALL_INST_POSTCALL \ |
| 38 | BEGIN_FTR_SECTION; \ | 73 | BEGIN_FTR_SECTION; \ |
| 39 | b 1f; \ | 74 | b 1f; \ |
| 40 | END_FTR_SECTION(0, 1); \ | 75 | END_FTR_SECTION(0, 1); \ |
| 41 | ld r4,STK_PARM(r3)(r1); /* validate opcode */ \ | 76 | ld r12,hcall_tracepoint_refcount@toc(r2); \ |
| 42 | cmpldi cr7,r4,MAX_HCALL_OPCODE; \ | 77 | cmpdi r12,0; \ |
| 43 | bgt- cr7,1f; \ | 78 | beq+ 1f; \ |
| 44 | \ | 79 | mflr r0; \ |
| 45 | /* get time and PURR snapshots after hcall */ \ | 80 | ld r6,STK_PARM(r3)(r1); \ |
| 46 | mftb r7; /* timebase after */ \ | 81 | std r3,STK_PARM(r3)(r1); \ |
| 47 | BEGIN_FTR_SECTION; \ | 82 | mr r4,r3; \ |
| 48 | mfspr r8,SPRN_PURR; /* PURR after */ \ | 83 | mr r3,r6; \ |
| 49 | ld r6,STK_PARM(r6)(r1); /* PURR before */ \ | 84 | std r0,16(r1); \ |
| 50 | subf r6,r6,r8; /* delta */ \ | 85 | stdu r1,-STACK_FRAME_OVERHEAD(r1); \ |
| 51 | END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ | 86 | bl .__trace_hcall_exit; \ |
| 52 | ld r5,STK_PARM(r5)(r1); /* timebase before */ \ | 87 | addi r1,r1,STACK_FRAME_OVERHEAD; \ |
| 53 | subf r5,r5,r7; /* time delta */ \ | 88 | ld r0,16(r1); \ |
| 54 | \ | 89 | ld r3,STK_PARM(r3)(r1); \ |
| 55 | /* calculate address of stat structure r4 = opcode */ \ | 90 | mtlr r0; \ |
| 56 | srdi r4,r4,2; /* index into array */ \ | ||
| 57 | mulli r4,r4,HCALL_STAT_SIZE; \ | ||
| 58 | LOAD_REG_ADDR(r7, per_cpu__hcall_stats); \ | ||
| 59 | add r4,r4,r7; \ | ||
| 60 | ld r7,PACA_DATA_OFFSET(r13); /* per cpu offset */ \ | ||
| 61 | add r4,r4,r7; \ | ||
| 62 | \ | ||
| 63 | /* update stats */ \ | ||
| 64 | ld r7,HCALL_STAT_CALLS(r4); /* count */ \ | ||
| 65 | addi r7,r7,1; \ | ||
| 66 | std r7,HCALL_STAT_CALLS(r4); \ | ||
| 67 | ld r7,HCALL_STAT_TB(r4); /* timebase */ \ | ||
| 68 | add r7,r7,r5; \ | ||
| 69 | std r7,HCALL_STAT_TB(r4); \ | ||
| 70 | BEGIN_FTR_SECTION; \ | ||
| 71 | ld r7,HCALL_STAT_PURR(r4); /* PURR */ \ | ||
| 72 | add r7,r7,r6; \ | ||
| 73 | std r7,HCALL_STAT_PURR(r4); \ | ||
| 74 | END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ | ||
| 75 | 1: | 91 | 1: |
| 92 | |||
| 93 | #define HCALL_INST_POSTCALL_NORETS \ | ||
| 94 | li r5,0; \ | ||
| 95 | __HCALL_INST_POSTCALL | ||
| 96 | |||
| 97 | #define HCALL_INST_POSTCALL(BUFREG) \ | ||
| 98 | mr r5,BUFREG; \ | ||
| 99 | __HCALL_INST_POSTCALL | ||
| 100 | |||
| 76 | #else | 101 | #else |
| 77 | #define HCALL_INST_PRECALL | 102 | #define HCALL_INST_PRECALL(FIRST_ARG) |
| 78 | #define HCALL_INST_POSTCALL | 103 | #define HCALL_INST_POSTCALL_NORETS |
| 104 | #define HCALL_INST_POSTCALL(BUFREG) | ||
| 79 | #endif | 105 | #endif |
| 80 | 106 | ||
| 81 | .text | 107 | .text |
| 82 | 108 | ||
| 83 | _GLOBAL(plpar_hcall_norets) | 109 | _GLOBAL(plpar_hcall_norets) |
| 84 | HMT_MEDIUM | 110 | HMT_MEDIUM |
| 85 | 111 | ||
| 86 | mfcr r0 | 112 | mfcr r0 |
| 87 | stw r0,8(r1) | 113 | stw r0,8(r1) |
| 88 | 114 | ||
| 89 | HCALL_INST_PRECALL | 115 | HCALL_INST_PRECALL(r4) |
| 90 | 116 | ||
| 91 | HVSC /* invoke the hypervisor */ | 117 | HVSC /* invoke the hypervisor */ |
| 92 | 118 | ||
| 93 | HCALL_INST_POSTCALL | 119 | HCALL_INST_POSTCALL_NORETS |
| 94 | 120 | ||
| 95 | lwz r0,8(r1) | 121 | lwz r0,8(r1) |
| 96 | mtcrf 0xff,r0 | 122 | mtcrf 0xff,r0 |
| 97 | blr /* return r3 = status */ | 123 | blr /* return r3 = status */ |
| 98 | 124 | ||
| 99 | _GLOBAL(plpar_hcall) | 125 | _GLOBAL(plpar_hcall) |
| 100 | HMT_MEDIUM | 126 | HMT_MEDIUM |
| 101 | 127 | ||
| 102 | mfcr r0 | 128 | mfcr r0 |
| 103 | stw r0,8(r1) | 129 | stw r0,8(r1) |
| 104 | 130 | ||
| 105 | HCALL_INST_PRECALL | 131 | HCALL_INST_PRECALL(r5) |
| 106 | 132 | ||
| 107 | std r4,STK_PARM(r4)(r1) /* Save ret buffer */ | 133 | std r4,STK_PARM(r4)(r1) /* Save ret buffer */ |
| 108 | 134 | ||
| 109 | mr r4,r5 | 135 | mr r4,r5 |
| 110 | mr r5,r6 | 136 | mr r5,r6 |
| 111 | mr r6,r7 | 137 | mr r6,r7 |
| 112 | mr r7,r8 | 138 | mr r7,r8 |
| 113 | mr r8,r9 | 139 | mr r8,r9 |
| 114 | mr r9,r10 | 140 | mr r9,r10 |
| 115 | 141 | ||
| 116 | HVSC /* invoke the hypervisor */ | 142 | HVSC /* invoke the hypervisor */ |
| 117 | 143 | ||
| 118 | ld r12,STK_PARM(r4)(r1) | 144 | ld r12,STK_PARM(r4)(r1) |
| 119 | std r4, 0(r12) | 145 | std r4, 0(r12) |
| 120 | std r5, 8(r12) | 146 | std r5, 8(r12) |
| 121 | std r6, 16(r12) | 147 | std r6, 16(r12) |
| 122 | std r7, 24(r12) | 148 | std r7, 24(r12) |
| 123 | 149 | ||
| 124 | HCALL_INST_POSTCALL | 150 | HCALL_INST_POSTCALL(r12) |
| 125 | 151 | ||
| 126 | lwz r0,8(r1) | 152 | lwz r0,8(r1) |
| 127 | mtcrf 0xff,r0 | 153 | mtcrf 0xff,r0 |
| 128 | 154 | ||
| 129 | blr /* return r3 = status */ | 155 | blr /* return r3 = status */ |
| 130 | 156 | ||
| 131 | /* | 157 | /* |
| 132 | * plpar_hcall_raw can be called in real mode. kexec/kdump need some | 158 | * plpar_hcall_raw can be called in real mode. kexec/kdump need some |
| 133 | * hypervisor calls to be executed in real mode. So plpar_hcall_raw | 159 | * hypervisor calls to be executed in real mode. So plpar_hcall_raw |
| 134 | * does not access the per cpu hypervisor call statistics variables, | 160 | * does not access the per cpu hypervisor call statistics variables, |
| 135 | * since these variables may not be present in the RMO region. | 161 | * since these variables may not be present in the RMO region. |
| 136 | */ | 162 | */ |
| 137 | _GLOBAL(plpar_hcall_raw) | 163 | _GLOBAL(plpar_hcall_raw) |
| 138 | HMT_MEDIUM | 164 | HMT_MEDIUM |
| 139 | 165 | ||
| 140 | mfcr r0 | 166 | mfcr r0 |
| 141 | stw r0,8(r1) | 167 | stw r0,8(r1) |
| 142 | 168 | ||
| 143 | std r4,STK_PARM(r4)(r1) /* Save ret buffer */ | 169 | std r4,STK_PARM(r4)(r1) /* Save ret buffer */ |
| 144 | 170 | ||
| 145 | mr r4,r5 | 171 | mr r4,r5 |
| 146 | mr r5,r6 | 172 | mr r5,r6 |
| 147 | mr r6,r7 | 173 | mr r6,r7 |
| 148 | mr r7,r8 | 174 | mr r7,r8 |
| 149 | mr r8,r9 | 175 | mr r8,r9 |
| 150 | mr r9,r10 | 176 | mr r9,r10 |
| 151 | 177 | ||
| 152 | HVSC /* invoke the hypervisor */ | 178 | HVSC /* invoke the hypervisor */ |
| 153 | 179 | ||
| 154 | ld r12,STK_PARM(r4)(r1) | 180 | ld r12,STK_PARM(r4)(r1) |
| 155 | std r4, 0(r12) | 181 | std r4, 0(r12) |
| 156 | std r5, 8(r12) | 182 | std r5, 8(r12) |
| 157 | std r6, 16(r12) | 183 | std r6, 16(r12) |
| 158 | std r7, 24(r12) | 184 | std r7, 24(r12) |
| 159 | 185 | ||
| 160 | lwz r0,8(r1) | 186 | lwz r0,8(r1) |
| 161 | mtcrf 0xff,r0 | 187 | mtcrf 0xff,r0 |
| 162 | 188 | ||
| 163 | blr /* return r3 = status */ | 189 | blr /* return r3 = status */ |
| 164 | 190 | ||
| 165 | _GLOBAL(plpar_hcall9) | 191 | _GLOBAL(plpar_hcall9) |
| 166 | HMT_MEDIUM | 192 | HMT_MEDIUM |
| 167 | 193 | ||
| 168 | mfcr r0 | 194 | mfcr r0 |
| 169 | stw r0,8(r1) | 195 | stw r0,8(r1) |
| 170 | 196 | ||
| 171 | HCALL_INST_PRECALL | 197 | HCALL_INST_PRECALL(r5) |
| 172 | 198 | ||
| 173 | std r4,STK_PARM(r4)(r1) /* Save ret buffer */ | 199 | std r4,STK_PARM(r4)(r1) /* Save ret buffer */ |
| 174 | 200 | ||
| 175 | mr r4,r5 | 201 | mr r4,r5 |
| 176 | mr r5,r6 | 202 | mr r5,r6 |
| 177 | mr r6,r7 | 203 | mr r6,r7 |
| 178 | mr r7,r8 | 204 | mr r7,r8 |
| 179 | mr r8,r9 | 205 | mr r8,r9 |
| 180 | mr r9,r10 | 206 | mr r9,r10 |
| 181 | ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */ | 207 | ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */ |
| 182 | ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */ | 208 | ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */ |
| 183 | ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */ | 209 | ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */ |
arch/powerpc/platforms/pseries/hvCall_inst.c
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 2006 Mike Kravetz IBM Corporation | 2 | * Copyright (C) 2006 Mike Kravetz IBM Corporation |
| 3 | * | 3 | * |
| 4 | * Hypervisor Call Instrumentation | 4 | * Hypervisor Call Instrumentation |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * the Free Software Foundation; either version 2 of the License, or |
| 9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
| 10 | * | 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
| 15 | * | 15 | * |
| 16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
| 17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
| 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 19 | */ | 19 | */ |
| 20 | 20 | ||
| 21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
| 22 | #include <linux/percpu.h> | 22 | #include <linux/percpu.h> |
| 23 | #include <linux/debugfs.h> | 23 | #include <linux/debugfs.h> |
| 24 | #include <linux/seq_file.h> | 24 | #include <linux/seq_file.h> |
| 25 | #include <linux/cpumask.h> | 25 | #include <linux/cpumask.h> |
| 26 | #include <asm/hvcall.h> | 26 | #include <asm/hvcall.h> |
| 27 | #include <asm/firmware.h> | 27 | #include <asm/firmware.h> |
| 28 | #include <asm/cputable.h> | 28 | #include <asm/cputable.h> |
| 29 | #include <asm/trace.h> | ||
| 29 | 30 | ||
| 30 | DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats); | 31 | DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats); |
| 31 | 32 | ||
| 32 | /* | 33 | /* |
| 33 | * Routines for displaying the statistics in debugfs | 34 | * Routines for displaying the statistics in debugfs |
| 34 | */ | 35 | */ |
| 35 | static void *hc_start(struct seq_file *m, loff_t *pos) | 36 | static void *hc_start(struct seq_file *m, loff_t *pos) |
| 36 | { | 37 | { |
| 37 | if ((int)*pos < (HCALL_STAT_ARRAY_SIZE-1)) | 38 | if ((int)*pos < (HCALL_STAT_ARRAY_SIZE-1)) |
| 38 | return (void *)(unsigned long)(*pos + 1); | 39 | return (void *)(unsigned long)(*pos + 1); |
| 39 | 40 | ||
| 40 | return NULL; | 41 | return NULL; |
| 41 | } | 42 | } |
| 42 | 43 | ||
| 43 | static void *hc_next(struct seq_file *m, void *p, loff_t * pos) | 44 | static void *hc_next(struct seq_file *m, void *p, loff_t * pos) |
| 44 | { | 45 | { |
| 45 | ++*pos; | 46 | ++*pos; |
| 46 | 47 | ||
| 47 | return hc_start(m, pos); | 48 | return hc_start(m, pos); |
| 48 | } | 49 | } |
| 49 | 50 | ||
| 50 | static void hc_stop(struct seq_file *m, void *p) | 51 | static void hc_stop(struct seq_file *m, void *p) |
| 51 | { | 52 | { |
| 52 | } | 53 | } |
| 53 | 54 | ||
| 54 | static int hc_show(struct seq_file *m, void *p) | 55 | static int hc_show(struct seq_file *m, void *p) |
| 55 | { | 56 | { |
| 56 | unsigned long h_num = (unsigned long)p; | 57 | unsigned long h_num = (unsigned long)p; |
| 57 | struct hcall_stats *hs = (struct hcall_stats *)m->private; | 58 | struct hcall_stats *hs = (struct hcall_stats *)m->private; |
| 58 | 59 | ||
| 59 | if (hs[h_num].num_calls) { | 60 | if (hs[h_num].num_calls) { |
| 60 | if (cpu_has_feature(CPU_FTR_PURR)) | 61 | if (cpu_has_feature(CPU_FTR_PURR)) |
| 61 | seq_printf(m, "%lu %lu %lu %lu\n", h_num<<2, | 62 | seq_printf(m, "%lu %lu %lu %lu\n", h_num<<2, |
| 62 | hs[h_num].num_calls, | 63 | hs[h_num].num_calls, |
| 63 | hs[h_num].tb_total, | 64 | hs[h_num].tb_total, |
| 64 | hs[h_num].purr_total); | 65 | hs[h_num].purr_total); |
| 65 | else | 66 | else |
| 66 | seq_printf(m, "%lu %lu %lu\n", h_num<<2, | 67 | seq_printf(m, "%lu %lu %lu\n", h_num<<2, |
| 67 | hs[h_num].num_calls, | 68 | hs[h_num].num_calls, |
| 68 | hs[h_num].tb_total); | 69 | hs[h_num].tb_total); |
| 69 | } | 70 | } |
| 70 | 71 | ||
| 71 | return 0; | 72 | return 0; |
| 72 | } | 73 | } |
| 73 | 74 | ||
| 74 | static const struct seq_operations hcall_inst_seq_ops = { | 75 | static const struct seq_operations hcall_inst_seq_ops = { |
| 75 | .start = hc_start, | 76 | .start = hc_start, |
| 76 | .next = hc_next, | 77 | .next = hc_next, |
| 77 | .stop = hc_stop, | 78 | .stop = hc_stop, |
| 78 | .show = hc_show | 79 | .show = hc_show |
| 79 | }; | 80 | }; |
| 80 | 81 | ||
| 81 | static int hcall_inst_seq_open(struct inode *inode, struct file *file) | 82 | static int hcall_inst_seq_open(struct inode *inode, struct file *file) |
| 82 | { | 83 | { |
| 83 | int rc; | 84 | int rc; |
| 84 | struct seq_file *seq; | 85 | struct seq_file *seq; |
| 85 | 86 | ||
| 86 | rc = seq_open(file, &hcall_inst_seq_ops); | 87 | rc = seq_open(file, &hcall_inst_seq_ops); |
| 87 | seq = file->private_data; | 88 | seq = file->private_data; |
| 88 | seq->private = file->f_path.dentry->d_inode->i_private; | 89 | seq->private = file->f_path.dentry->d_inode->i_private; |
| 89 | 90 | ||
| 90 | return rc; | 91 | return rc; |
| 91 | } | 92 | } |
| 92 | 93 | ||
| 93 | static const struct file_operations hcall_inst_seq_fops = { | 94 | static const struct file_operations hcall_inst_seq_fops = { |
| 94 | .open = hcall_inst_seq_open, | 95 | .open = hcall_inst_seq_open, |
| 95 | .read = seq_read, | 96 | .read = seq_read, |
| 96 | .llseek = seq_lseek, | 97 | .llseek = seq_lseek, |
| 97 | .release = seq_release, | 98 | .release = seq_release, |
| 98 | }; | 99 | }; |
| 99 | 100 | ||
| 100 | #define HCALL_ROOT_DIR "hcall_inst" | 101 | #define HCALL_ROOT_DIR "hcall_inst" |
| 101 | #define CPU_NAME_BUF_SIZE 32 | 102 | #define CPU_NAME_BUF_SIZE 32 |
| 102 | 103 | ||
| 104 | |||
| 105 | static void probe_hcall_entry(unsigned long opcode, unsigned long *args) | ||
| 106 | { | ||
| 107 | struct hcall_stats *h; | ||
| 108 | |||
| 109 | if (opcode > MAX_HCALL_OPCODE) | ||
| 110 | return; | ||
| 111 | |||
| 112 | h = &get_cpu_var(hcall_stats)[opcode / 4]; | ||
| 113 | h->tb_start = mftb(); | ||
| 114 | h->purr_start = mfspr(SPRN_PURR); | ||
| 115 | } | ||
| 116 | |||
| 117 | static void probe_hcall_exit(unsigned long opcode, unsigned long retval, | ||
| 118 | unsigned long *retbuf) | ||
| 119 | { | ||
| 120 | struct hcall_stats *h; | ||
| 121 | |||
| 122 | if (opcode > MAX_HCALL_OPCODE) | ||
| 123 | return; | ||
| 124 | |||
| 125 | h = &__get_cpu_var(hcall_stats)[opcode / 4]; | ||
| 126 | h->num_calls++; | ||
| 127 | h->tb_total = mftb() - h->tb_start; | ||
| 128 | h->purr_total = mfspr(SPRN_PURR) - h->purr_start; | ||
| 129 | |||
| 130 | put_cpu_var(hcall_stats); | ||
| 131 | } | ||
| 132 | |||
| 103 | static int __init hcall_inst_init(void) | 133 | static int __init hcall_inst_init(void) |
| 104 | { | 134 | { |
| 105 | struct dentry *hcall_root; | 135 | struct dentry *hcall_root; |
| 106 | struct dentry *hcall_file; | 136 | struct dentry *hcall_file; |
| 107 | char cpu_name_buf[CPU_NAME_BUF_SIZE]; | 137 | char cpu_name_buf[CPU_NAME_BUF_SIZE]; |
| 108 | int cpu; | 138 | int cpu; |
| 109 | 139 | ||
| 110 | if (!firmware_has_feature(FW_FEATURE_LPAR)) | 140 | if (!firmware_has_feature(FW_FEATURE_LPAR)) |
| 111 | return 0; | 141 | return 0; |
| 142 | |||
| 143 | if (register_trace_hcall_entry(probe_hcall_entry)) | ||
| 144 | return -EINVAL; | ||
| 145 | |||
| 146 | if (register_trace_hcall_exit(probe_hcall_exit)) { | ||
| 147 | unregister_trace_hcall_entry(probe_hcall_entry); | ||
| 148 | return -EINVAL; | ||
| 149 | } | ||
| 112 | 150 | ||
| 113 | hcall_root = debugfs_create_dir(HCALL_ROOT_DIR, NULL); | 151 | hcall_root = debugfs_create_dir(HCALL_ROOT_DIR, NULL); |
| 114 | if (!hcall_root) | 152 | if (!hcall_root) |
| 115 | return -ENOMEM; | 153 | return -ENOMEM; |
| 116 | 154 | ||
| 117 | for_each_possible_cpu(cpu) { | 155 | for_each_possible_cpu(cpu) { |
| 118 | snprintf(cpu_name_buf, CPU_NAME_BUF_SIZE, "cpu%d", cpu); | 156 | snprintf(cpu_name_buf, CPU_NAME_BUF_SIZE, "cpu%d", cpu); |
| 119 | hcall_file = debugfs_create_file(cpu_name_buf, S_IRUGO, | 157 | hcall_file = debugfs_create_file(cpu_name_buf, S_IRUGO, |
| 120 | hcall_root, | 158 | hcall_root, |
| 121 | per_cpu(hcall_stats, cpu), | 159 | per_cpu(hcall_stats, cpu), |
| 122 | &hcall_inst_seq_fops); | 160 | &hcall_inst_seq_fops); |
| 123 | if (!hcall_file) | 161 | if (!hcall_file) |
| 124 | return -ENOMEM; | 162 | return -ENOMEM; |
| 125 | } | 163 | } |
| 126 | 164 | ||
| 127 | return 0; | 165 | return 0; |
| 128 | } | 166 | } |
| 129 | __initcall(hcall_inst_init); | 167 | __initcall(hcall_inst_init); |
| 130 | 168 |
arch/powerpc/platforms/pseries/lpar.c
| 1 | /* | 1 | /* |
| 2 | * pSeries_lpar.c | 2 | * pSeries_lpar.c |
| 3 | * Copyright (C) 2001 Todd Inglett, IBM Corporation | 3 | * Copyright (C) 2001 Todd Inglett, IBM Corporation |
| 4 | * | 4 | * |
| 5 | * pSeries LPAR support. | 5 | * pSeries LPAR support. |
| 6 | * | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
| 9 | * the Free Software Foundation; either version 2 of the License, or | 9 | * the Free Software Foundation; either version 2 of the License, or |
| 10 | * (at your option) any later version. | 10 | * (at your option) any later version. |
| 11 | * | 11 | * |
| 12 | * This program is distributed in the hope that it will be useful, | 12 | * This program is distributed in the hope that it will be useful, |
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 15 | * GNU General Public License for more details. | 15 | * GNU General Public License for more details. |
| 16 | * | 16 | * |
| 17 | * You should have received a copy of the GNU General Public License | 17 | * You should have received a copy of the GNU General Public License |
| 18 | * along with this program; if not, write to the Free Software | 18 | * along with this program; if not, write to the Free Software |
| 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 20 | */ | 20 | */ |
| 21 | 21 | ||
| 22 | /* Enables debugging of low-level hash table routines - careful! */ | 22 | /* Enables debugging of low-level hash table routines - careful! */ |
| 23 | #undef DEBUG | 23 | #undef DEBUG |
| 24 | 24 | ||
| 25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
| 26 | #include <linux/dma-mapping.h> | 26 | #include <linux/dma-mapping.h> |
| 27 | #include <linux/console.h> | 27 | #include <linux/console.h> |
| 28 | #include <asm/processor.h> | 28 | #include <asm/processor.h> |
| 29 | #include <asm/mmu.h> | 29 | #include <asm/mmu.h> |
| 30 | #include <asm/page.h> | 30 | #include <asm/page.h> |
| 31 | #include <asm/pgtable.h> | 31 | #include <asm/pgtable.h> |
| 32 | #include <asm/machdep.h> | 32 | #include <asm/machdep.h> |
| 33 | #include <asm/abs_addr.h> | 33 | #include <asm/abs_addr.h> |
| 34 | #include <asm/mmu_context.h> | 34 | #include <asm/mmu_context.h> |
| 35 | #include <asm/iommu.h> | 35 | #include <asm/iommu.h> |
| 36 | #include <asm/tlbflush.h> | 36 | #include <asm/tlbflush.h> |
| 37 | #include <asm/tlb.h> | 37 | #include <asm/tlb.h> |
| 38 | #include <asm/prom.h> | 38 | #include <asm/prom.h> |
| 39 | #include <asm/cputable.h> | 39 | #include <asm/cputable.h> |
| 40 | #include <asm/udbg.h> | 40 | #include <asm/udbg.h> |
| 41 | #include <asm/smp.h> | 41 | #include <asm/smp.h> |
| 42 | #include <asm/trace.h> | ||
| 42 | 43 | ||
| 43 | #include "plpar_wrappers.h" | 44 | #include "plpar_wrappers.h" |
| 44 | #include "pseries.h" | 45 | #include "pseries.h" |
| 45 | 46 | ||
| 46 | 47 | ||
| 47 | /* in hvCall.S */ | 48 | /* in hvCall.S */ |
| 48 | EXPORT_SYMBOL(plpar_hcall); | 49 | EXPORT_SYMBOL(plpar_hcall); |
| 49 | EXPORT_SYMBOL(plpar_hcall9); | 50 | EXPORT_SYMBOL(plpar_hcall9); |
| 50 | EXPORT_SYMBOL(plpar_hcall_norets); | 51 | EXPORT_SYMBOL(plpar_hcall_norets); |
| 51 | 52 | ||
| 52 | extern void pSeries_find_serial_port(void); | 53 | extern void pSeries_find_serial_port(void); |
| 53 | 54 | ||
| 54 | 55 | ||
| 55 | static int vtermno; /* virtual terminal# for udbg */ | 56 | static int vtermno; /* virtual terminal# for udbg */ |
| 56 | 57 | ||
| 57 | #define __ALIGNED__ __attribute__((__aligned__(sizeof(long)))) | 58 | #define __ALIGNED__ __attribute__((__aligned__(sizeof(long)))) |
| 58 | static void udbg_hvsi_putc(char c) | 59 | static void udbg_hvsi_putc(char c) |
| 59 | { | 60 | { |
| 60 | /* packet's seqno isn't used anyways */ | 61 | /* packet's seqno isn't used anyways */ |
| 61 | uint8_t packet[] __ALIGNED__ = { 0xff, 5, 0, 0, c }; | 62 | uint8_t packet[] __ALIGNED__ = { 0xff, 5, 0, 0, c }; |
| 62 | int rc; | 63 | int rc; |
| 63 | 64 | ||
| 64 | if (c == '\n') | 65 | if (c == '\n') |
| 65 | udbg_hvsi_putc('\r'); | 66 | udbg_hvsi_putc('\r'); |
| 66 | 67 | ||
| 67 | do { | 68 | do { |
| 68 | rc = plpar_put_term_char(vtermno, sizeof(packet), packet); | 69 | rc = plpar_put_term_char(vtermno, sizeof(packet), packet); |
| 69 | } while (rc == H_BUSY); | 70 | } while (rc == H_BUSY); |
| 70 | } | 71 | } |
| 71 | 72 | ||
| 72 | static long hvsi_udbg_buf_len; | 73 | static long hvsi_udbg_buf_len; |
| 73 | static uint8_t hvsi_udbg_buf[256]; | 74 | static uint8_t hvsi_udbg_buf[256]; |
| 74 | 75 | ||
| 75 | static int udbg_hvsi_getc_poll(void) | 76 | static int udbg_hvsi_getc_poll(void) |
| 76 | { | 77 | { |
| 77 | unsigned char ch; | 78 | unsigned char ch; |
| 78 | int rc, i; | 79 | int rc, i; |
| 79 | 80 | ||
| 80 | if (hvsi_udbg_buf_len == 0) { | 81 | if (hvsi_udbg_buf_len == 0) { |
| 81 | rc = plpar_get_term_char(vtermno, &hvsi_udbg_buf_len, hvsi_udbg_buf); | 82 | rc = plpar_get_term_char(vtermno, &hvsi_udbg_buf_len, hvsi_udbg_buf); |
| 82 | if (rc != H_SUCCESS || hvsi_udbg_buf[0] != 0xff) { | 83 | if (rc != H_SUCCESS || hvsi_udbg_buf[0] != 0xff) { |
| 83 | /* bad read or non-data packet */ | 84 | /* bad read or non-data packet */ |
| 84 | hvsi_udbg_buf_len = 0; | 85 | hvsi_udbg_buf_len = 0; |
| 85 | } else { | 86 | } else { |
| 86 | /* remove the packet header */ | 87 | /* remove the packet header */ |
| 87 | for (i = 4; i < hvsi_udbg_buf_len; i++) | 88 | for (i = 4; i < hvsi_udbg_buf_len; i++) |
| 88 | hvsi_udbg_buf[i-4] = hvsi_udbg_buf[i]; | 89 | hvsi_udbg_buf[i-4] = hvsi_udbg_buf[i]; |
| 89 | hvsi_udbg_buf_len -= 4; | 90 | hvsi_udbg_buf_len -= 4; |
| 90 | } | 91 | } |
| 91 | } | 92 | } |
| 92 | 93 | ||
| 93 | if (hvsi_udbg_buf_len <= 0 || hvsi_udbg_buf_len > 256) { | 94 | if (hvsi_udbg_buf_len <= 0 || hvsi_udbg_buf_len > 256) { |
| 94 | /* no data ready */ | 95 | /* no data ready */ |
| 95 | hvsi_udbg_buf_len = 0; | 96 | hvsi_udbg_buf_len = 0; |
| 96 | return -1; | 97 | return -1; |
| 97 | } | 98 | } |
| 98 | 99 | ||
| 99 | ch = hvsi_udbg_buf[0]; | 100 | ch = hvsi_udbg_buf[0]; |
| 100 | /* shift remaining data down */ | 101 | /* shift remaining data down */ |
| 101 | for (i = 1; i < hvsi_udbg_buf_len; i++) { | 102 | for (i = 1; i < hvsi_udbg_buf_len; i++) { |
| 102 | hvsi_udbg_buf[i-1] = hvsi_udbg_buf[i]; | 103 | hvsi_udbg_buf[i-1] = hvsi_udbg_buf[i]; |
| 103 | } | 104 | } |
| 104 | hvsi_udbg_buf_len--; | 105 | hvsi_udbg_buf_len--; |
| 105 | 106 | ||
| 106 | return ch; | 107 | return ch; |
| 107 | } | 108 | } |
| 108 | 109 | ||
| 109 | static int udbg_hvsi_getc(void) | 110 | static int udbg_hvsi_getc(void) |
| 110 | { | 111 | { |
| 111 | int ch; | 112 | int ch; |
| 112 | for (;;) { | 113 | for (;;) { |
| 113 | ch = udbg_hvsi_getc_poll(); | 114 | ch = udbg_hvsi_getc_poll(); |
| 114 | if (ch == -1) { | 115 | if (ch == -1) { |
| 115 | /* This shouldn't be needed...but... */ | 116 | /* This shouldn't be needed...but... */ |
| 116 | volatile unsigned long delay; | 117 | volatile unsigned long delay; |
| 117 | for (delay=0; delay < 2000000; delay++) | 118 | for (delay=0; delay < 2000000; delay++) |
| 118 | ; | 119 | ; |
| 119 | } else { | 120 | } else { |
| 120 | return ch; | 121 | return ch; |
| 121 | } | 122 | } |
| 122 | } | 123 | } |
| 123 | } | 124 | } |
| 124 | 125 | ||
| 125 | static void udbg_putcLP(char c) | 126 | static void udbg_putcLP(char c) |
| 126 | { | 127 | { |
| 127 | char buf[16]; | 128 | char buf[16]; |
| 128 | unsigned long rc; | 129 | unsigned long rc; |
| 129 | 130 | ||
| 130 | if (c == '\n') | 131 | if (c == '\n') |
| 131 | udbg_putcLP('\r'); | 132 | udbg_putcLP('\r'); |
| 132 | 133 | ||
| 133 | buf[0] = c; | 134 | buf[0] = c; |
| 134 | do { | 135 | do { |
| 135 | rc = plpar_put_term_char(vtermno, 1, buf); | 136 | rc = plpar_put_term_char(vtermno, 1, buf); |
| 136 | } while(rc == H_BUSY); | 137 | } while(rc == H_BUSY); |
| 137 | } | 138 | } |
| 138 | 139 | ||
| 139 | /* Buffered chars getc */ | 140 | /* Buffered chars getc */ |
| 140 | static long inbuflen; | 141 | static long inbuflen; |
| 141 | static long inbuf[2]; /* must be 2 longs */ | 142 | static long inbuf[2]; /* must be 2 longs */ |
| 142 | 143 | ||
| 143 | static int udbg_getc_pollLP(void) | 144 | static int udbg_getc_pollLP(void) |
| 144 | { | 145 | { |
| 145 | /* The interface is tricky because it may return up to 16 chars. | 146 | /* The interface is tricky because it may return up to 16 chars. |
| 146 | * We save them statically for future calls to udbg_getc(). | 147 | * We save them statically for future calls to udbg_getc(). |
| 147 | */ | 148 | */ |
| 148 | char ch, *buf = (char *)inbuf; | 149 | char ch, *buf = (char *)inbuf; |
| 149 | int i; | 150 | int i; |
| 150 | long rc; | 151 | long rc; |
| 151 | if (inbuflen == 0) { | 152 | if (inbuflen == 0) { |
| 152 | /* get some more chars. */ | 153 | /* get some more chars. */ |
| 153 | inbuflen = 0; | 154 | inbuflen = 0; |
| 154 | rc = plpar_get_term_char(vtermno, &inbuflen, buf); | 155 | rc = plpar_get_term_char(vtermno, &inbuflen, buf); |
| 155 | if (rc != H_SUCCESS) | 156 | if (rc != H_SUCCESS) |
| 156 | inbuflen = 0; /* otherwise inbuflen is garbage */ | 157 | inbuflen = 0; /* otherwise inbuflen is garbage */ |
| 157 | } | 158 | } |
| 158 | if (inbuflen <= 0 || inbuflen > 16) { | 159 | if (inbuflen <= 0 || inbuflen > 16) { |
| 159 | /* Catch error case as well as other oddities (corruption) */ | 160 | /* Catch error case as well as other oddities (corruption) */ |
| 160 | inbuflen = 0; | 161 | inbuflen = 0; |
| 161 | return -1; | 162 | return -1; |
| 162 | } | 163 | } |
| 163 | ch = buf[0]; | 164 | ch = buf[0]; |
| 164 | for (i = 1; i < inbuflen; i++) /* shuffle them down. */ | 165 | for (i = 1; i < inbuflen; i++) /* shuffle them down. */ |
| 165 | buf[i-1] = buf[i]; | 166 | buf[i-1] = buf[i]; |
| 166 | inbuflen--; | 167 | inbuflen--; |
| 167 | return ch; | 168 | return ch; |
| 168 | } | 169 | } |
| 169 | 170 | ||
| 170 | static int udbg_getcLP(void) | 171 | static int udbg_getcLP(void) |
| 171 | { | 172 | { |
| 172 | int ch; | 173 | int ch; |
| 173 | for (;;) { | 174 | for (;;) { |
| 174 | ch = udbg_getc_pollLP(); | 175 | ch = udbg_getc_pollLP(); |
| 175 | if (ch == -1) { | 176 | if (ch == -1) { |
| 176 | /* This shouldn't be needed...but... */ | 177 | /* This shouldn't be needed...but... */ |
| 177 | volatile unsigned long delay; | 178 | volatile unsigned long delay; |
| 178 | for (delay=0; delay < 2000000; delay++) | 179 | for (delay=0; delay < 2000000; delay++) |
| 179 | ; | 180 | ; |
| 180 | } else { | 181 | } else { |
| 181 | return ch; | 182 | return ch; |
| 182 | } | 183 | } |
| 183 | } | 184 | } |
| 184 | } | 185 | } |
| 185 | 186 | ||
| 186 | /* call this from early_init() for a working debug console on | 187 | /* call this from early_init() for a working debug console on |
| 187 | * vterm capable LPAR machines | 188 | * vterm capable LPAR machines |
| 188 | */ | 189 | */ |
| 189 | void __init udbg_init_debug_lpar(void) | 190 | void __init udbg_init_debug_lpar(void) |
| 190 | { | 191 | { |
| 191 | vtermno = 0; | 192 | vtermno = 0; |
| 192 | udbg_putc = udbg_putcLP; | 193 | udbg_putc = udbg_putcLP; |
| 193 | udbg_getc = udbg_getcLP; | 194 | udbg_getc = udbg_getcLP; |
| 194 | udbg_getc_poll = udbg_getc_pollLP; | 195 | udbg_getc_poll = udbg_getc_pollLP; |
| 195 | 196 | ||
| 196 | register_early_udbg_console(); | 197 | register_early_udbg_console(); |
| 197 | } | 198 | } |
| 198 | 199 | ||
| 199 | /* returns 0 if couldn't find or use /chosen/stdout as console */ | 200 | /* returns 0 if couldn't find or use /chosen/stdout as console */ |
| 200 | void __init find_udbg_vterm(void) | 201 | void __init find_udbg_vterm(void) |
| 201 | { | 202 | { |
| 202 | struct device_node *stdout_node; | 203 | struct device_node *stdout_node; |
| 203 | const u32 *termno; | 204 | const u32 *termno; |
| 204 | const char *name; | 205 | const char *name; |
| 205 | 206 | ||
| 206 | /* find the boot console from /chosen/stdout */ | 207 | /* find the boot console from /chosen/stdout */ |
| 207 | if (!of_chosen) | 208 | if (!of_chosen) |
| 208 | return; | 209 | return; |
| 209 | name = of_get_property(of_chosen, "linux,stdout-path", NULL); | 210 | name = of_get_property(of_chosen, "linux,stdout-path", NULL); |
| 210 | if (name == NULL) | 211 | if (name == NULL) |
| 211 | return; | 212 | return; |
| 212 | stdout_node = of_find_node_by_path(name); | 213 | stdout_node = of_find_node_by_path(name); |
| 213 | if (!stdout_node) | 214 | if (!stdout_node) |
| 214 | return; | 215 | return; |
| 215 | name = of_get_property(stdout_node, "name", NULL); | 216 | name = of_get_property(stdout_node, "name", NULL); |
| 216 | if (!name) { | 217 | if (!name) { |
| 217 | printk(KERN_WARNING "stdout node missing 'name' property!\n"); | 218 | printk(KERN_WARNING "stdout node missing 'name' property!\n"); |
| 218 | goto out; | 219 | goto out; |
| 219 | } | 220 | } |
| 220 | 221 | ||
| 221 | /* Check if it's a virtual terminal */ | 222 | /* Check if it's a virtual terminal */ |
| 222 | if (strncmp(name, "vty", 3) != 0) | 223 | if (strncmp(name, "vty", 3) != 0) |
| 223 | goto out; | 224 | goto out; |
| 224 | termno = of_get_property(stdout_node, "reg", NULL); | 225 | termno = of_get_property(stdout_node, "reg", NULL); |
| 225 | if (termno == NULL) | 226 | if (termno == NULL) |
| 226 | goto out; | 227 | goto out; |
| 227 | vtermno = termno[0]; | 228 | vtermno = termno[0]; |
| 228 | 229 | ||
| 229 | if (of_device_is_compatible(stdout_node, "hvterm1")) { | 230 | if (of_device_is_compatible(stdout_node, "hvterm1")) { |
| 230 | udbg_putc = udbg_putcLP; | 231 | udbg_putc = udbg_putcLP; |
| 231 | udbg_getc = udbg_getcLP; | 232 | udbg_getc = udbg_getcLP; |
| 232 | udbg_getc_poll = udbg_getc_pollLP; | 233 | udbg_getc_poll = udbg_getc_pollLP; |
| 233 | add_preferred_console("hvc", termno[0] & 0xff, NULL); | 234 | add_preferred_console("hvc", termno[0] & 0xff, NULL); |
| 234 | } else if (of_device_is_compatible(stdout_node, "hvterm-protocol")) { | 235 | } else if (of_device_is_compatible(stdout_node, "hvterm-protocol")) { |
| 235 | vtermno = termno[0]; | 236 | vtermno = termno[0]; |
| 236 | udbg_putc = udbg_hvsi_putc; | 237 | udbg_putc = udbg_hvsi_putc; |
| 237 | udbg_getc = udbg_hvsi_getc; | 238 | udbg_getc = udbg_hvsi_getc; |
| 238 | udbg_getc_poll = udbg_hvsi_getc_poll; | 239 | udbg_getc_poll = udbg_hvsi_getc_poll; |
| 239 | add_preferred_console("hvsi", termno[0] & 0xff, NULL); | 240 | add_preferred_console("hvsi", termno[0] & 0xff, NULL); |
| 240 | } | 241 | } |
| 241 | out: | 242 | out: |
| 242 | of_node_put(stdout_node); | 243 | of_node_put(stdout_node); |
| 243 | } | 244 | } |
| 244 | 245 | ||
| 245 | void vpa_init(int cpu) | 246 | void vpa_init(int cpu) |
| 246 | { | 247 | { |
| 247 | int hwcpu = get_hard_smp_processor_id(cpu); | 248 | int hwcpu = get_hard_smp_processor_id(cpu); |
| 248 | unsigned long addr; | 249 | unsigned long addr; |
| 249 | long ret; | 250 | long ret; |
| 250 | 251 | ||
| 251 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | 252 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
| 252 | lppaca[cpu].vmxregs_in_use = 1; | 253 | lppaca[cpu].vmxregs_in_use = 1; |
| 253 | 254 | ||
| 254 | addr = __pa(&lppaca[cpu]); | 255 | addr = __pa(&lppaca[cpu]); |
| 255 | ret = register_vpa(hwcpu, addr); | 256 | ret = register_vpa(hwcpu, addr); |
| 256 | 257 | ||
| 257 | if (ret) { | 258 | if (ret) { |
| 258 | printk(KERN_ERR "WARNING: vpa_init: VPA registration for " | 259 | printk(KERN_ERR "WARNING: vpa_init: VPA registration for " |
| 259 | "cpu %d (hw %d) of area %lx returns %ld\n", | 260 | "cpu %d (hw %d) of area %lx returns %ld\n", |
| 260 | cpu, hwcpu, addr, ret); | 261 | cpu, hwcpu, addr, ret); |
| 261 | return; | 262 | return; |
| 262 | } | 263 | } |
| 263 | /* | 264 | /* |
| 264 | * PAPR says this feature is SLB-Buffer but firmware never | 265 | * PAPR says this feature is SLB-Buffer but firmware never |
| 265 | * reports that. All SPLPAR support SLB shadow buffer. | 266 | * reports that. All SPLPAR support SLB shadow buffer. |
| 266 | */ | 267 | */ |
| 267 | addr = __pa(&slb_shadow[cpu]); | 268 | addr = __pa(&slb_shadow[cpu]); |
| 268 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { | 269 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { |
| 269 | ret = register_slb_shadow(hwcpu, addr); | 270 | ret = register_slb_shadow(hwcpu, addr); |
| 270 | if (ret) | 271 | if (ret) |
| 271 | printk(KERN_ERR | 272 | printk(KERN_ERR |
| 272 | "WARNING: vpa_init: SLB shadow buffer " | 273 | "WARNING: vpa_init: SLB shadow buffer " |
| 273 | "registration for cpu %d (hw %d) of area %lx " | 274 | "registration for cpu %d (hw %d) of area %lx " |
| 274 | "returns %ld\n", cpu, hwcpu, addr, ret); | 275 | "returns %ld\n", cpu, hwcpu, addr, ret); |
| 275 | } | 276 | } |
| 276 | } | 277 | } |
| 277 | 278 | ||
| 278 | static long pSeries_lpar_hpte_insert(unsigned long hpte_group, | 279 | static long pSeries_lpar_hpte_insert(unsigned long hpte_group, |
| 279 | unsigned long va, unsigned long pa, | 280 | unsigned long va, unsigned long pa, |
| 280 | unsigned long rflags, unsigned long vflags, | 281 | unsigned long rflags, unsigned long vflags, |
| 281 | int psize, int ssize) | 282 | int psize, int ssize) |
| 282 | { | 283 | { |
| 283 | unsigned long lpar_rc; | 284 | unsigned long lpar_rc; |
| 284 | unsigned long flags; | 285 | unsigned long flags; |
| 285 | unsigned long slot; | 286 | unsigned long slot; |
| 286 | unsigned long hpte_v, hpte_r; | 287 | unsigned long hpte_v, hpte_r; |
| 287 | 288 | ||
| 288 | if (!(vflags & HPTE_V_BOLTED)) | 289 | if (!(vflags & HPTE_V_BOLTED)) |
| 289 | pr_devel("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " | 290 | pr_devel("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " |
| 290 | "rflags=%lx, vflags=%lx, psize=%d)\n", | 291 | "rflags=%lx, vflags=%lx, psize=%d)\n", |
| 291 | hpte_group, va, pa, rflags, vflags, psize); | 292 | hpte_group, va, pa, rflags, vflags, psize); |
| 292 | 293 | ||
| 293 | hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; | 294 | hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; |
| 294 | hpte_r = hpte_encode_r(pa, psize) | rflags; | 295 | hpte_r = hpte_encode_r(pa, psize) | rflags; |
| 295 | 296 | ||
| 296 | if (!(vflags & HPTE_V_BOLTED)) | 297 | if (!(vflags & HPTE_V_BOLTED)) |
| 297 | pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); | 298 | pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); |
| 298 | 299 | ||
| 299 | /* Now fill in the actual HPTE */ | 300 | /* Now fill in the actual HPTE */ |
| 300 | /* Set CEC cookie to 0 */ | 301 | /* Set CEC cookie to 0 */ |
| 301 | /* Zero page = 0 */ | 302 | /* Zero page = 0 */ |
| 302 | /* I-cache Invalidate = 0 */ | 303 | /* I-cache Invalidate = 0 */ |
| 303 | /* I-cache synchronize = 0 */ | 304 | /* I-cache synchronize = 0 */ |
| 304 | /* Exact = 0 */ | 305 | /* Exact = 0 */ |
| 305 | flags = 0; | 306 | flags = 0; |
| 306 | 307 | ||
| 307 | /* Make pHyp happy */ | 308 | /* Make pHyp happy */ |
| 308 | if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU)) | 309 | if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU)) |
| 309 | hpte_r &= ~_PAGE_COHERENT; | 310 | hpte_r &= ~_PAGE_COHERENT; |
| 310 | 311 | ||
| 311 | lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); | 312 | lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); |
| 312 | if (unlikely(lpar_rc == H_PTEG_FULL)) { | 313 | if (unlikely(lpar_rc == H_PTEG_FULL)) { |
| 313 | if (!(vflags & HPTE_V_BOLTED)) | 314 | if (!(vflags & HPTE_V_BOLTED)) |
| 314 | pr_devel(" full\n"); | 315 | pr_devel(" full\n"); |
| 315 | return -1; | 316 | return -1; |
| 316 | } | 317 | } |
| 317 | 318 | ||
| 318 | /* | 319 | /* |
| 319 | * Since we try and ioremap PHBs we don't own, the pte insert | 320 | * Since we try and ioremap PHBs we don't own, the pte insert |
| 320 | * will fail. However we must catch the failure in hash_page | 321 | * will fail. However we must catch the failure in hash_page |
| 321 | * or we will loop forever, so return -2 in this case. | 322 | * or we will loop forever, so return -2 in this case. |
| 322 | */ | 323 | */ |
| 323 | if (unlikely(lpar_rc != H_SUCCESS)) { | 324 | if (unlikely(lpar_rc != H_SUCCESS)) { |
| 324 | if (!(vflags & HPTE_V_BOLTED)) | 325 | if (!(vflags & HPTE_V_BOLTED)) |
| 325 | pr_devel(" lpar err %lu\n", lpar_rc); | 326 | pr_devel(" lpar err %lu\n", lpar_rc); |
| 326 | return -2; | 327 | return -2; |
| 327 | } | 328 | } |
| 328 | if (!(vflags & HPTE_V_BOLTED)) | 329 | if (!(vflags & HPTE_V_BOLTED)) |
| 329 | pr_devel(" -> slot: %lu\n", slot & 7); | 330 | pr_devel(" -> slot: %lu\n", slot & 7); |
| 330 | 331 | ||
| 331 | /* Because of iSeries, we have to pass down the secondary | 332 | /* Because of iSeries, we have to pass down the secondary |
| 332 | * bucket bit here as well | 333 | * bucket bit here as well |
| 333 | */ | 334 | */ |
| 334 | return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3); | 335 | return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3); |
| 335 | } | 336 | } |
| 336 | 337 | ||
| 337 | static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock); | 338 | static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock); |
| 338 | 339 | ||
| 339 | static long pSeries_lpar_hpte_remove(unsigned long hpte_group) | 340 | static long pSeries_lpar_hpte_remove(unsigned long hpte_group) |
| 340 | { | 341 | { |
| 341 | unsigned long slot_offset; | 342 | unsigned long slot_offset; |
| 342 | unsigned long lpar_rc; | 343 | unsigned long lpar_rc; |
| 343 | int i; | 344 | int i; |
| 344 | unsigned long dummy1, dummy2; | 345 | unsigned long dummy1, dummy2; |
| 345 | 346 | ||
| 346 | /* pick a random slot to start at */ | 347 | /* pick a random slot to start at */ |
| 347 | slot_offset = mftb() & 0x7; | 348 | slot_offset = mftb() & 0x7; |
| 348 | 349 | ||
| 349 | for (i = 0; i < HPTES_PER_GROUP; i++) { | 350 | for (i = 0; i < HPTES_PER_GROUP; i++) { |
| 350 | 351 | ||
| 351 | /* don't remove a bolted entry */ | 352 | /* don't remove a bolted entry */ |
| 352 | lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset, | 353 | lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset, |
| 353 | (0x1UL << 4), &dummy1, &dummy2); | 354 | (0x1UL << 4), &dummy1, &dummy2); |
| 354 | if (lpar_rc == H_SUCCESS) | 355 | if (lpar_rc == H_SUCCESS) |
| 355 | return i; | 356 | return i; |
| 356 | BUG_ON(lpar_rc != H_NOT_FOUND); | 357 | BUG_ON(lpar_rc != H_NOT_FOUND); |
| 357 | 358 | ||
| 358 | slot_offset++; | 359 | slot_offset++; |
| 359 | slot_offset &= 0x7; | 360 | slot_offset &= 0x7; |
| 360 | } | 361 | } |
| 361 | 362 | ||
| 362 | return -1; | 363 | return -1; |
| 363 | } | 364 | } |
| 364 | 365 | ||
| 365 | static void pSeries_lpar_hptab_clear(void) | 366 | static void pSeries_lpar_hptab_clear(void) |
| 366 | { | 367 | { |
| 367 | unsigned long size_bytes = 1UL << ppc64_pft_size; | 368 | unsigned long size_bytes = 1UL << ppc64_pft_size; |
| 368 | unsigned long hpte_count = size_bytes >> 4; | 369 | unsigned long hpte_count = size_bytes >> 4; |
| 369 | unsigned long dummy1, dummy2, dword0; | 370 | unsigned long dummy1, dummy2, dword0; |
| 370 | long lpar_rc; | 371 | long lpar_rc; |
| 371 | int i; | 372 | int i; |
| 372 | 373 | ||
| 373 | /* TODO: Use bulk call */ | 374 | /* TODO: Use bulk call */ |
| 374 | for (i = 0; i < hpte_count; i++) { | 375 | for (i = 0; i < hpte_count; i++) { |
| 375 | /* dont remove HPTEs with VRMA mappings */ | 376 | /* dont remove HPTEs with VRMA mappings */ |
| 376 | lpar_rc = plpar_pte_remove_raw(H_ANDCOND, i, HPTE_V_1TB_SEG, | 377 | lpar_rc = plpar_pte_remove_raw(H_ANDCOND, i, HPTE_V_1TB_SEG, |
| 377 | &dummy1, &dummy2); | 378 | &dummy1, &dummy2); |
| 378 | if (lpar_rc == H_NOT_FOUND) { | 379 | if (lpar_rc == H_NOT_FOUND) { |
| 379 | lpar_rc = plpar_pte_read_raw(0, i, &dword0, &dummy1); | 380 | lpar_rc = plpar_pte_read_raw(0, i, &dword0, &dummy1); |
| 380 | if (!lpar_rc && ((dword0 & HPTE_V_VRMA_MASK) | 381 | if (!lpar_rc && ((dword0 & HPTE_V_VRMA_MASK) |
| 381 | != HPTE_V_VRMA_MASK)) | 382 | != HPTE_V_VRMA_MASK)) |
| 382 | /* Can be hpte for 1TB Seg. So remove it */ | 383 | /* Can be hpte for 1TB Seg. So remove it */ |
| 383 | plpar_pte_remove_raw(0, i, 0, &dummy1, &dummy2); | 384 | plpar_pte_remove_raw(0, i, 0, &dummy1, &dummy2); |
| 384 | } | 385 | } |
| 385 | } | 386 | } |
| 386 | } | 387 | } |
| 387 | 388 | ||
| 388 | /* | 389 | /* |
| 389 | * This computes the AVPN and B fields of the first dword of a HPTE, | 390 | * This computes the AVPN and B fields of the first dword of a HPTE, |
| 390 | * for use when we want to match an existing PTE. The bottom 7 bits | 391 | * for use when we want to match an existing PTE. The bottom 7 bits |
| 391 | * of the returned value are zero. | 392 | * of the returned value are zero. |
| 392 | */ | 393 | */ |
| 393 | static inline unsigned long hpte_encode_avpn(unsigned long va, int psize, | 394 | static inline unsigned long hpte_encode_avpn(unsigned long va, int psize, |
| 394 | int ssize) | 395 | int ssize) |
| 395 | { | 396 | { |
| 396 | unsigned long v; | 397 | unsigned long v; |
| 397 | 398 | ||
| 398 | v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); | 399 | v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); |
| 399 | v <<= HPTE_V_AVPN_SHIFT; | 400 | v <<= HPTE_V_AVPN_SHIFT; |
| 400 | v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT; | 401 | v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT; |
| 401 | return v; | 402 | return v; |
| 402 | } | 403 | } |
| 403 | 404 | ||
| 404 | /* | 405 | /* |
| 405 | * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and | 406 | * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and |
| 406 | * the low 3 bits of flags happen to line up. So no transform is needed. | 407 | * the low 3 bits of flags happen to line up. So no transform is needed. |
| 407 | * We can probably optimize here and assume the high bits of newpp are | 408 | * We can probably optimize here and assume the high bits of newpp are |
| 408 | * already zero. For now I am paranoid. | 409 | * already zero. For now I am paranoid. |
| 409 | */ | 410 | */ |
| 410 | static long pSeries_lpar_hpte_updatepp(unsigned long slot, | 411 | static long pSeries_lpar_hpte_updatepp(unsigned long slot, |
| 411 | unsigned long newpp, | 412 | unsigned long newpp, |
| 412 | unsigned long va, | 413 | unsigned long va, |
| 413 | int psize, int ssize, int local) | 414 | int psize, int ssize, int local) |
| 414 | { | 415 | { |
| 415 | unsigned long lpar_rc; | 416 | unsigned long lpar_rc; |
| 416 | unsigned long flags = (newpp & 7) | H_AVPN; | 417 | unsigned long flags = (newpp & 7) | H_AVPN; |
| 417 | unsigned long want_v; | 418 | unsigned long want_v; |
| 418 | 419 | ||
| 419 | want_v = hpte_encode_avpn(va, psize, ssize); | 420 | want_v = hpte_encode_avpn(va, psize, ssize); |
| 420 | 421 | ||
| 421 | pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...", | 422 | pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...", |
| 422 | want_v, slot, flags, psize); | 423 | want_v, slot, flags, psize); |
| 423 | 424 | ||
| 424 | lpar_rc = plpar_pte_protect(flags, slot, want_v); | 425 | lpar_rc = plpar_pte_protect(flags, slot, want_v); |
| 425 | 426 | ||
| 426 | if (lpar_rc == H_NOT_FOUND) { | 427 | if (lpar_rc == H_NOT_FOUND) { |
| 427 | pr_devel("not found !\n"); | 428 | pr_devel("not found !\n"); |
| 428 | return -1; | 429 | return -1; |
| 429 | } | 430 | } |
| 430 | 431 | ||
| 431 | pr_devel("ok\n"); | 432 | pr_devel("ok\n"); |
| 432 | 433 | ||
| 433 | BUG_ON(lpar_rc != H_SUCCESS); | 434 | BUG_ON(lpar_rc != H_SUCCESS); |
| 434 | 435 | ||
| 435 | return 0; | 436 | return 0; |
| 436 | } | 437 | } |
| 437 | 438 | ||
| 438 | static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot) | 439 | static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot) |
| 439 | { | 440 | { |
| 440 | unsigned long dword0; | 441 | unsigned long dword0; |
| 441 | unsigned long lpar_rc; | 442 | unsigned long lpar_rc; |
| 442 | unsigned long dummy_word1; | 443 | unsigned long dummy_word1; |
| 443 | unsigned long flags; | 444 | unsigned long flags; |
| 444 | 445 | ||
| 445 | /* Read 1 pte at a time */ | 446 | /* Read 1 pte at a time */ |
| 446 | /* Do not need RPN to logical page translation */ | 447 | /* Do not need RPN to logical page translation */ |
| 447 | /* No cross CEC PFT access */ | 448 | /* No cross CEC PFT access */ |
| 448 | flags = 0; | 449 | flags = 0; |
| 449 | 450 | ||
| 450 | lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1); | 451 | lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1); |
| 451 | 452 | ||
| 452 | BUG_ON(lpar_rc != H_SUCCESS); | 453 | BUG_ON(lpar_rc != H_SUCCESS); |
| 453 | 454 | ||
| 454 | return dword0; | 455 | return dword0; |
| 455 | } | 456 | } |
| 456 | 457 | ||
| 457 | static long pSeries_lpar_hpte_find(unsigned long va, int psize, int ssize) | 458 | static long pSeries_lpar_hpte_find(unsigned long va, int psize, int ssize) |
| 458 | { | 459 | { |
| 459 | unsigned long hash; | 460 | unsigned long hash; |
| 460 | unsigned long i; | 461 | unsigned long i; |
| 461 | long slot; | 462 | long slot; |
| 462 | unsigned long want_v, hpte_v; | 463 | unsigned long want_v, hpte_v; |
| 463 | 464 | ||
| 464 | hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize); | 465 | hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize); |
| 465 | want_v = hpte_encode_avpn(va, psize, ssize); | 466 | want_v = hpte_encode_avpn(va, psize, ssize); |
| 466 | 467 | ||
| 467 | /* Bolted entries are always in the primary group */ | 468 | /* Bolted entries are always in the primary group */ |
| 468 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | 469 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; |
| 469 | for (i = 0; i < HPTES_PER_GROUP; i++) { | 470 | for (i = 0; i < HPTES_PER_GROUP; i++) { |
| 470 | hpte_v = pSeries_lpar_hpte_getword0(slot); | 471 | hpte_v = pSeries_lpar_hpte_getword0(slot); |
| 471 | 472 | ||
| 472 | if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) | 473 | if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) |
| 473 | /* HPTE matches */ | 474 | /* HPTE matches */ |
| 474 | return slot; | 475 | return slot; |
| 475 | ++slot; | 476 | ++slot; |
| 476 | } | 477 | } |
| 477 | 478 | ||
| 478 | return -1; | 479 | return -1; |
| 479 | } | 480 | } |
| 480 | 481 | ||
| 481 | static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, | 482 | static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, |
| 482 | unsigned long ea, | 483 | unsigned long ea, |
| 483 | int psize, int ssize) | 484 | int psize, int ssize) |
| 484 | { | 485 | { |
| 485 | unsigned long lpar_rc, slot, vsid, va, flags; | 486 | unsigned long lpar_rc, slot, vsid, va, flags; |
| 486 | 487 | ||
| 487 | vsid = get_kernel_vsid(ea, ssize); | 488 | vsid = get_kernel_vsid(ea, ssize); |
| 488 | va = hpt_va(ea, vsid, ssize); | 489 | va = hpt_va(ea, vsid, ssize); |
| 489 | 490 | ||
| 490 | slot = pSeries_lpar_hpte_find(va, psize, ssize); | 491 | slot = pSeries_lpar_hpte_find(va, psize, ssize); |
| 491 | BUG_ON(slot == -1); | 492 | BUG_ON(slot == -1); |
| 492 | 493 | ||
| 493 | flags = newpp & 7; | 494 | flags = newpp & 7; |
| 494 | lpar_rc = plpar_pte_protect(flags, slot, 0); | 495 | lpar_rc = plpar_pte_protect(flags, slot, 0); |
| 495 | 496 | ||
| 496 | BUG_ON(lpar_rc != H_SUCCESS); | 497 | BUG_ON(lpar_rc != H_SUCCESS); |
| 497 | } | 498 | } |
| 498 | 499 | ||
| 499 | static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va, | 500 | static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va, |
| 500 | int psize, int ssize, int local) | 501 | int psize, int ssize, int local) |
| 501 | { | 502 | { |
| 502 | unsigned long want_v; | 503 | unsigned long want_v; |
| 503 | unsigned long lpar_rc; | 504 | unsigned long lpar_rc; |
| 504 | unsigned long dummy1, dummy2; | 505 | unsigned long dummy1, dummy2; |
| 505 | 506 | ||
| 506 | pr_devel(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", | 507 | pr_devel(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", |
| 507 | slot, va, psize, local); | 508 | slot, va, psize, local); |
| 508 | 509 | ||
| 509 | want_v = hpte_encode_avpn(va, psize, ssize); | 510 | want_v = hpte_encode_avpn(va, psize, ssize); |
| 510 | lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2); | 511 | lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2); |
| 511 | if (lpar_rc == H_NOT_FOUND) | 512 | if (lpar_rc == H_NOT_FOUND) |
| 512 | return; | 513 | return; |
| 513 | 514 | ||
| 514 | BUG_ON(lpar_rc != H_SUCCESS); | 515 | BUG_ON(lpar_rc != H_SUCCESS); |
| 515 | } | 516 | } |
| 516 | 517 | ||
| 517 | static void pSeries_lpar_hpte_removebolted(unsigned long ea, | 518 | static void pSeries_lpar_hpte_removebolted(unsigned long ea, |
| 518 | int psize, int ssize) | 519 | int psize, int ssize) |
| 519 | { | 520 | { |
| 520 | unsigned long slot, vsid, va; | 521 | unsigned long slot, vsid, va; |
| 521 | 522 | ||
| 522 | vsid = get_kernel_vsid(ea, ssize); | 523 | vsid = get_kernel_vsid(ea, ssize); |
| 523 | va = hpt_va(ea, vsid, ssize); | 524 | va = hpt_va(ea, vsid, ssize); |
| 524 | 525 | ||
| 525 | slot = pSeries_lpar_hpte_find(va, psize, ssize); | 526 | slot = pSeries_lpar_hpte_find(va, psize, ssize); |
| 526 | BUG_ON(slot == -1); | 527 | BUG_ON(slot == -1); |
| 527 | 528 | ||
| 528 | pSeries_lpar_hpte_invalidate(slot, va, psize, ssize, 0); | 529 | pSeries_lpar_hpte_invalidate(slot, va, psize, ssize, 0); |
| 529 | } | 530 | } |
| 530 | 531 | ||
| 531 | /* Flag bits for H_BULK_REMOVE */ | 532 | /* Flag bits for H_BULK_REMOVE */ |
| 532 | #define HBR_REQUEST 0x4000000000000000UL | 533 | #define HBR_REQUEST 0x4000000000000000UL |
| 533 | #define HBR_RESPONSE 0x8000000000000000UL | 534 | #define HBR_RESPONSE 0x8000000000000000UL |
| 534 | #define HBR_END 0xc000000000000000UL | 535 | #define HBR_END 0xc000000000000000UL |
| 535 | #define HBR_AVPN 0x0200000000000000UL | 536 | #define HBR_AVPN 0x0200000000000000UL |
| 536 | #define HBR_ANDCOND 0x0100000000000000UL | 537 | #define HBR_ANDCOND 0x0100000000000000UL |
| 537 | 538 | ||
| 538 | /* | 539 | /* |
| 539 | * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie | 540 | * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie |
| 540 | * lock. | 541 | * lock. |
| 541 | */ | 542 | */ |
| 542 | static void pSeries_lpar_flush_hash_range(unsigned long number, int local) | 543 | static void pSeries_lpar_flush_hash_range(unsigned long number, int local) |
| 543 | { | 544 | { |
| 544 | unsigned long i, pix, rc; | 545 | unsigned long i, pix, rc; |
| 545 | unsigned long flags = 0; | 546 | unsigned long flags = 0; |
| 546 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | 547 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
| 547 | int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); | 548 | int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); |
| 548 | unsigned long param[9]; | 549 | unsigned long param[9]; |
| 549 | unsigned long va; | 550 | unsigned long va; |
| 550 | unsigned long hash, index, shift, hidx, slot; | 551 | unsigned long hash, index, shift, hidx, slot; |
| 551 | real_pte_t pte; | 552 | real_pte_t pte; |
| 552 | int psize, ssize; | 553 | int psize, ssize; |
| 553 | 554 | ||
| 554 | if (lock_tlbie) | 555 | if (lock_tlbie) |
| 555 | spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); | 556 | spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); |
| 556 | 557 | ||
| 557 | psize = batch->psize; | 558 | psize = batch->psize; |
| 558 | ssize = batch->ssize; | 559 | ssize = batch->ssize; |
| 559 | pix = 0; | 560 | pix = 0; |
| 560 | for (i = 0; i < number; i++) { | 561 | for (i = 0; i < number; i++) { |
| 561 | va = batch->vaddr[i]; | 562 | va = batch->vaddr[i]; |
| 562 | pte = batch->pte[i]; | 563 | pte = batch->pte[i]; |
| 563 | pte_iterate_hashed_subpages(pte, psize, va, index, shift) { | 564 | pte_iterate_hashed_subpages(pte, psize, va, index, shift) { |
| 564 | hash = hpt_hash(va, shift, ssize); | 565 | hash = hpt_hash(va, shift, ssize); |
| 565 | hidx = __rpte_to_hidx(pte, index); | 566 | hidx = __rpte_to_hidx(pte, index); |
| 566 | if (hidx & _PTEIDX_SECONDARY) | 567 | if (hidx & _PTEIDX_SECONDARY) |
| 567 | hash = ~hash; | 568 | hash = ~hash; |
| 568 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | 569 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; |
| 569 | slot += hidx & _PTEIDX_GROUP_IX; | 570 | slot += hidx & _PTEIDX_GROUP_IX; |
| 570 | if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { | 571 | if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { |
| 571 | pSeries_lpar_hpte_invalidate(slot, va, psize, | 572 | pSeries_lpar_hpte_invalidate(slot, va, psize, |
| 572 | ssize, local); | 573 | ssize, local); |
| 573 | } else { | 574 | } else { |
| 574 | param[pix] = HBR_REQUEST | HBR_AVPN | slot; | 575 | param[pix] = HBR_REQUEST | HBR_AVPN | slot; |
| 575 | param[pix+1] = hpte_encode_avpn(va, psize, | 576 | param[pix+1] = hpte_encode_avpn(va, psize, |
| 576 | ssize); | 577 | ssize); |
| 577 | pix += 2; | 578 | pix += 2; |
| 578 | if (pix == 8) { | 579 | if (pix == 8) { |
| 579 | rc = plpar_hcall9(H_BULK_REMOVE, param, | 580 | rc = plpar_hcall9(H_BULK_REMOVE, param, |
| 580 | param[0], param[1], param[2], | 581 | param[0], param[1], param[2], |
| 581 | param[3], param[4], param[5], | 582 | param[3], param[4], param[5], |
| 582 | param[6], param[7]); | 583 | param[6], param[7]); |
| 583 | BUG_ON(rc != H_SUCCESS); | 584 | BUG_ON(rc != H_SUCCESS); |
| 584 | pix = 0; | 585 | pix = 0; |
| 585 | } | 586 | } |
| 586 | } | 587 | } |
| 587 | } pte_iterate_hashed_end(); | 588 | } pte_iterate_hashed_end(); |
| 588 | } | 589 | } |
| 589 | if (pix) { | 590 | if (pix) { |
| 590 | param[pix] = HBR_END; | 591 | param[pix] = HBR_END; |
| 591 | rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], | 592 | rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], |
| 592 | param[2], param[3], param[4], param[5], | 593 | param[2], param[3], param[4], param[5], |
| 593 | param[6], param[7]); | 594 | param[6], param[7]); |
| 594 | BUG_ON(rc != H_SUCCESS); | 595 | BUG_ON(rc != H_SUCCESS); |
| 595 | } | 596 | } |
| 596 | 597 | ||
| 597 | if (lock_tlbie) | 598 | if (lock_tlbie) |
| 598 | spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); | 599 | spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); |
| 599 | } | 600 | } |
| 600 | 601 | ||
| 601 | void __init hpte_init_lpar(void) | 602 | void __init hpte_init_lpar(void) |
| 602 | { | 603 | { |
| 603 | ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate; | 604 | ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate; |
| 604 | ppc_md.hpte_updatepp = pSeries_lpar_hpte_updatepp; | 605 | ppc_md.hpte_updatepp = pSeries_lpar_hpte_updatepp; |
| 605 | ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp; | 606 | ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp; |
| 606 | ppc_md.hpte_insert = pSeries_lpar_hpte_insert; | 607 | ppc_md.hpte_insert = pSeries_lpar_hpte_insert; |
| 607 | ppc_md.hpte_remove = pSeries_lpar_hpte_remove; | 608 | ppc_md.hpte_remove = pSeries_lpar_hpte_remove; |
| 608 | ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted; | 609 | ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted; |
| 609 | ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range; | 610 | ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range; |
| 610 | ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear; | 611 | ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear; |
| 611 | } | 612 | } |
| 612 | 613 | ||
| 613 | #ifdef CONFIG_PPC_SMLPAR | 614 | #ifdef CONFIG_PPC_SMLPAR |
| 614 | #define CMO_FREE_HINT_DEFAULT 1 | 615 | #define CMO_FREE_HINT_DEFAULT 1 |
| 615 | static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT; | 616 | static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT; |
| 616 | 617 | ||
| 617 | static int __init cmo_free_hint(char *str) | 618 | static int __init cmo_free_hint(char *str) |
| 618 | { | 619 | { |
| 619 | char *parm; | 620 | char *parm; |
| 620 | parm = strstrip(str); | 621 | parm = strstrip(str); |
| 621 | 622 | ||
| 622 | if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) { | 623 | if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) { |
| 623 | printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n"); | 624 | printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n"); |
| 624 | cmo_free_hint_flag = 0; | 625 | cmo_free_hint_flag = 0; |
| 625 | return 1; | 626 | return 1; |
| 626 | } | 627 | } |
| 627 | 628 | ||
| 628 | cmo_free_hint_flag = 1; | 629 | cmo_free_hint_flag = 1; |
| 629 | printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n"); | 630 | printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n"); |
| 630 | 631 | ||
| 631 | if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0) | 632 | if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0) |
| 632 | return 1; | 633 | return 1; |
| 633 | 634 | ||
| 634 | return 0; | 635 | return 0; |
| 635 | } | 636 | } |
| 636 | 637 | ||
| 637 | __setup("cmo_free_hint=", cmo_free_hint); | 638 | __setup("cmo_free_hint=", cmo_free_hint); |
| 638 | 639 | ||
| 639 | static void pSeries_set_page_state(struct page *page, int order, | 640 | static void pSeries_set_page_state(struct page *page, int order, |
| 640 | unsigned long state) | 641 | unsigned long state) |
| 641 | { | 642 | { |
| 642 | int i, j; | 643 | int i, j; |
| 643 | unsigned long cmo_page_sz, addr; | 644 | unsigned long cmo_page_sz, addr; |
| 644 | 645 | ||
| 645 | cmo_page_sz = cmo_get_page_size(); | 646 | cmo_page_sz = cmo_get_page_size(); |
| 646 | addr = __pa((unsigned long)page_address(page)); | 647 | addr = __pa((unsigned long)page_address(page)); |
| 647 | 648 | ||
| 648 | for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) { | 649 | for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) { |
| 649 | for (j = 0; j < PAGE_SIZE; j += cmo_page_sz) | 650 | for (j = 0; j < PAGE_SIZE; j += cmo_page_sz) |
| 650 | plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0); | 651 | plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0); |
| 651 | } | 652 | } |
| 652 | } | 653 | } |
| 653 | 654 | ||
| 654 | void arch_free_page(struct page *page, int order) | 655 | void arch_free_page(struct page *page, int order) |
| 655 | { | 656 | { |
| 656 | if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO)) | 657 | if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO)) |
| 657 | return; | 658 | return; |
| 658 | 659 | ||
| 659 | pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED); | 660 | pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED); |
| 660 | } | 661 | } |
| 661 | EXPORT_SYMBOL(arch_free_page); | 662 | EXPORT_SYMBOL(arch_free_page); |
| 662 | 663 | ||
| 664 | #endif | ||
| 665 | |||
| 666 | #ifdef CONFIG_TRACEPOINTS | ||
| 667 | /* | ||
| 668 | * We optimise our hcall path by placing hcall_tracepoint_refcount | ||
| 669 | * directly in the TOC so we can check if the hcall tracepoints are | ||
| 670 | * enabled via a single load. | ||
| 671 | */ | ||
| 672 | |||
| 673 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ | ||
| 674 | extern long hcall_tracepoint_refcount; | ||
| 675 | |||
| 676 | void hcall_tracepoint_regfunc(void) | ||
| 677 | { | ||
| 678 | hcall_tracepoint_refcount++; | ||
| 679 | } | ||
| 680 | |||
| 681 | void hcall_tracepoint_unregfunc(void) | ||
| 682 | { | ||
| 683 | hcall_tracepoint_refcount--; | ||
| 684 | } | ||
| 685 | |||
| 686 | void __trace_hcall_entry(unsigned long opcode, unsigned long *args) | ||
| 687 | { | ||
| 688 | trace_hcall_entry(opcode, args); | ||
| 689 | } | ||
| 690 | |||
| 691 | void __trace_hcall_exit(long opcode, unsigned long retval, | ||
| 692 | unsigned long *retbuf) | ||
| 693 | { | ||
| 694 | trace_hcall_exit(opcode, retval, retbuf); | ||
| 695 | } | ||
| 663 | #endif | 696 | #endif |
| 664 | 697 |
include/linux/perf_counter.h
| 1 | /* | 1 | /* |
| 2 | * NOTE: this file will be removed in a future kernel release, it is | 2 | * NOTE: this file will be removed in a future kernel release, it is |
| 3 | * provided as a courtesy copy of user-space code that relies on the | 3 | * provided as a courtesy copy of user-space code that relies on the |
| 4 | * old (pre-rename) symbols and constants. | 4 | * old (pre-rename) symbols and constants. |
| 5 | * | 5 | * |
| 6 | * Performance events: | 6 | * Performance events: |
| 7 | * | 7 | * |
| 8 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | 8 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> |
| 9 | * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar | 9 | * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar |
| 10 | * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra | 10 | * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra |
| 11 | * | 11 | * |
| 12 | * Data type definitions, declarations, prototypes. | 12 | * Data type definitions, declarations, prototypes. |
| 13 | * | 13 | * |
| 14 | * Started by: Thomas Gleixner and Ingo Molnar | 14 | * Started by: Thomas Gleixner and Ingo Molnar |
| 15 | * | 15 | * |
| 16 | * For licencing details see kernel-base/COPYING | 16 | * For licencing details see kernel-base/COPYING |
| 17 | */ | 17 | */ |
| 18 | #ifndef _LINUX_PERF_COUNTER_H | 18 | #ifndef _LINUX_PERF_COUNTER_H |
| 19 | #define _LINUX_PERF_COUNTER_H | 19 | #define _LINUX_PERF_COUNTER_H |
| 20 | 20 | ||
| 21 | #include <linux/types.h> | 21 | #include <linux/types.h> |
| 22 | #include <linux/ioctl.h> | 22 | #include <linux/ioctl.h> |
| 23 | #include <asm/byteorder.h> | 23 | #include <asm/byteorder.h> |
| 24 | 24 | ||
| 25 | /* | 25 | /* |
| 26 | * User-space ABI bits: | 26 | * User-space ABI bits: |
| 27 | */ | 27 | */ |
| 28 | 28 | ||
| 29 | /* | 29 | /* |
| 30 | * attr.type | 30 | * attr.type |
| 31 | */ | 31 | */ |
| 32 | enum perf_type_id { | 32 | enum perf_type_id { |
| 33 | PERF_TYPE_HARDWARE = 0, | 33 | PERF_TYPE_HARDWARE = 0, |
| 34 | PERF_TYPE_SOFTWARE = 1, | 34 | PERF_TYPE_SOFTWARE = 1, |
| 35 | PERF_TYPE_TRACEPOINT = 2, | 35 | PERF_TYPE_TRACEPOINT = 2, |
| 36 | PERF_TYPE_HW_CACHE = 3, | 36 | PERF_TYPE_HW_CACHE = 3, |
| 37 | PERF_TYPE_RAW = 4, | 37 | PERF_TYPE_RAW = 4, |
| 38 | 38 | ||
| 39 | PERF_TYPE_MAX, /* non-ABI */ | 39 | PERF_TYPE_MAX, /* non-ABI */ |
| 40 | }; | 40 | }; |
| 41 | 41 | ||
| 42 | /* | 42 | /* |
| 43 | * Generalized performance counter event types, used by the | 43 | * Generalized performance counter event types, used by the |
| 44 | * attr.event_id parameter of the sys_perf_counter_open() | 44 | * attr.event_id parameter of the sys_perf_counter_open() |
| 45 | * syscall: | 45 | * syscall: |
| 46 | */ | 46 | */ |
| 47 | enum perf_hw_id { | 47 | enum perf_hw_id { |
| 48 | /* | 48 | /* |
| 49 | * Common hardware events, generalized by the kernel: | 49 | * Common hardware events, generalized by the kernel: |
| 50 | */ | 50 | */ |
| 51 | PERF_COUNT_HW_CPU_CYCLES = 0, | 51 | PERF_COUNT_HW_CPU_CYCLES = 0, |
| 52 | PERF_COUNT_HW_INSTRUCTIONS = 1, | 52 | PERF_COUNT_HW_INSTRUCTIONS = 1, |
| 53 | PERF_COUNT_HW_CACHE_REFERENCES = 2, | 53 | PERF_COUNT_HW_CACHE_REFERENCES = 2, |
| 54 | PERF_COUNT_HW_CACHE_MISSES = 3, | 54 | PERF_COUNT_HW_CACHE_MISSES = 3, |
| 55 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, | 55 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, |
| 56 | PERF_COUNT_HW_BRANCH_MISSES = 5, | 56 | PERF_COUNT_HW_BRANCH_MISSES = 5, |
| 57 | PERF_COUNT_HW_BUS_CYCLES = 6, | 57 | PERF_COUNT_HW_BUS_CYCLES = 6, |
| 58 | 58 | ||
| 59 | PERF_COUNT_HW_MAX, /* non-ABI */ | 59 | PERF_COUNT_HW_MAX, /* non-ABI */ |
| 60 | }; | 60 | }; |
| 61 | 61 | ||
| 62 | /* | 62 | /* |
| 63 | * Generalized hardware cache counters: | 63 | * Generalized hardware cache counters: |
| 64 | * | 64 | * |
| 65 | * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x | 65 | * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x |
| 66 | * { read, write, prefetch } x | 66 | * { read, write, prefetch } x |
| 67 | * { accesses, misses } | 67 | * { accesses, misses } |
| 68 | */ | 68 | */ |
| 69 | enum perf_hw_cache_id { | 69 | enum perf_hw_cache_id { |
| 70 | PERF_COUNT_HW_CACHE_L1D = 0, | 70 | PERF_COUNT_HW_CACHE_L1D = 0, |
| 71 | PERF_COUNT_HW_CACHE_L1I = 1, | 71 | PERF_COUNT_HW_CACHE_L1I = 1, |
| 72 | PERF_COUNT_HW_CACHE_LL = 2, | 72 | PERF_COUNT_HW_CACHE_LL = 2, |
| 73 | PERF_COUNT_HW_CACHE_DTLB = 3, | 73 | PERF_COUNT_HW_CACHE_DTLB = 3, |
| 74 | PERF_COUNT_HW_CACHE_ITLB = 4, | 74 | PERF_COUNT_HW_CACHE_ITLB = 4, |
| 75 | PERF_COUNT_HW_CACHE_BPU = 5, | 75 | PERF_COUNT_HW_CACHE_BPU = 5, |
| 76 | 76 | ||
| 77 | PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ | 77 | PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ |
| 78 | }; | 78 | }; |
| 79 | 79 | ||
| 80 | enum perf_hw_cache_op_id { | 80 | enum perf_hw_cache_op_id { |
| 81 | PERF_COUNT_HW_CACHE_OP_READ = 0, | 81 | PERF_COUNT_HW_CACHE_OP_READ = 0, |
| 82 | PERF_COUNT_HW_CACHE_OP_WRITE = 1, | 82 | PERF_COUNT_HW_CACHE_OP_WRITE = 1, |
| 83 | PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, | 83 | PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, |
| 84 | 84 | ||
| 85 | PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ | 85 | PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ |
| 86 | }; | 86 | }; |
| 87 | 87 | ||
| 88 | enum perf_hw_cache_op_result_id { | 88 | enum perf_hw_cache_op_result_id { |
| 89 | PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, | 89 | PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, |
| 90 | PERF_COUNT_HW_CACHE_RESULT_MISS = 1, | 90 | PERF_COUNT_HW_CACHE_RESULT_MISS = 1, |
| 91 | 91 | ||
| 92 | PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ | 92 | PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ |
| 93 | }; | 93 | }; |
| 94 | 94 | ||
| 95 | /* | 95 | /* |
| 96 | * Special "software" counters provided by the kernel, even if the hardware | 96 | * Special "software" counters provided by the kernel, even if the hardware |
| 97 | * does not support performance counters. These counters measure various | 97 | * does not support performance counters. These counters measure various |
| 98 | * physical and sw events of the kernel (and allow the profiling of them as | 98 | * physical and sw events of the kernel (and allow the profiling of them as |
| 99 | * well): | 99 | * well): |
| 100 | */ | 100 | */ |
| 101 | enum perf_sw_ids { | 101 | enum perf_sw_ids { |
| 102 | PERF_COUNT_SW_CPU_CLOCK = 0, | 102 | PERF_COUNT_SW_CPU_CLOCK = 0, |
| 103 | PERF_COUNT_SW_TASK_CLOCK = 1, | 103 | PERF_COUNT_SW_TASK_CLOCK = 1, |
| 104 | PERF_COUNT_SW_PAGE_FAULTS = 2, | 104 | PERF_COUNT_SW_PAGE_FAULTS = 2, |
| 105 | PERF_COUNT_SW_CONTEXT_SWITCHES = 3, | 105 | PERF_COUNT_SW_CONTEXT_SWITCHES = 3, |
| 106 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, | 106 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, |
| 107 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, | 107 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, |
| 108 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, | 108 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, |
| 109 | PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, | ||
| 110 | PERF_COUNT_SW_EMULATION_FAULTS = 8, | ||
| 109 | 111 | ||
| 110 | PERF_COUNT_SW_MAX, /* non-ABI */ | 112 | PERF_COUNT_SW_MAX, /* non-ABI */ |
| 111 | }; | 113 | }; |
| 112 | 114 | ||
| 113 | /* | 115 | /* |
| 114 | * Bits that can be set in attr.sample_type to request information | 116 | * Bits that can be set in attr.sample_type to request information |
| 115 | * in the overflow packets. | 117 | * in the overflow packets. |
| 116 | */ | 118 | */ |
| 117 | enum perf_counter_sample_format { | 119 | enum perf_counter_sample_format { |
| 118 | PERF_SAMPLE_IP = 1U << 0, | 120 | PERF_SAMPLE_IP = 1U << 0, |
| 119 | PERF_SAMPLE_TID = 1U << 1, | 121 | PERF_SAMPLE_TID = 1U << 1, |
| 120 | PERF_SAMPLE_TIME = 1U << 2, | 122 | PERF_SAMPLE_TIME = 1U << 2, |
| 121 | PERF_SAMPLE_ADDR = 1U << 3, | 123 | PERF_SAMPLE_ADDR = 1U << 3, |
| 122 | PERF_SAMPLE_READ = 1U << 4, | 124 | PERF_SAMPLE_READ = 1U << 4, |
| 123 | PERF_SAMPLE_CALLCHAIN = 1U << 5, | 125 | PERF_SAMPLE_CALLCHAIN = 1U << 5, |
| 124 | PERF_SAMPLE_ID = 1U << 6, | 126 | PERF_SAMPLE_ID = 1U << 6, |
| 125 | PERF_SAMPLE_CPU = 1U << 7, | 127 | PERF_SAMPLE_CPU = 1U << 7, |
| 126 | PERF_SAMPLE_PERIOD = 1U << 8, | 128 | PERF_SAMPLE_PERIOD = 1U << 8, |
| 127 | PERF_SAMPLE_STREAM_ID = 1U << 9, | 129 | PERF_SAMPLE_STREAM_ID = 1U << 9, |
| 128 | PERF_SAMPLE_RAW = 1U << 10, | 130 | PERF_SAMPLE_RAW = 1U << 10, |
| 129 | 131 | ||
| 130 | PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ | 132 | PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ |
| 131 | }; | 133 | }; |
| 132 | 134 | ||
| 133 | /* | 135 | /* |
| 134 | * The format of the data returned by read() on a perf counter fd, | 136 | * The format of the data returned by read() on a perf counter fd, |
| 135 | * as specified by attr.read_format: | 137 | * as specified by attr.read_format: |
| 136 | * | 138 | * |
| 137 | * struct read_format { | 139 | * struct read_format { |
| 138 | * { u64 value; | 140 | * { u64 value; |
| 139 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | 141 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED |
| 140 | * { u64 time_running; } && PERF_FORMAT_RUNNING | 142 | * { u64 time_running; } && PERF_FORMAT_RUNNING |
| 141 | * { u64 id; } && PERF_FORMAT_ID | 143 | * { u64 id; } && PERF_FORMAT_ID |
| 142 | * } && !PERF_FORMAT_GROUP | 144 | * } && !PERF_FORMAT_GROUP |
| 143 | * | 145 | * |
| 144 | * { u64 nr; | 146 | * { u64 nr; |
| 145 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | 147 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED |
| 146 | * { u64 time_running; } && PERF_FORMAT_RUNNING | 148 | * { u64 time_running; } && PERF_FORMAT_RUNNING |
| 147 | * { u64 value; | 149 | * { u64 value; |
| 148 | * { u64 id; } && PERF_FORMAT_ID | 150 | * { u64 id; } && PERF_FORMAT_ID |
| 149 | * } cntr[nr]; | 151 | * } cntr[nr]; |
| 150 | * } && PERF_FORMAT_GROUP | 152 | * } && PERF_FORMAT_GROUP |
| 151 | * }; | 153 | * }; |
| 152 | */ | 154 | */ |
| 153 | enum perf_counter_read_format { | 155 | enum perf_counter_read_format { |
| 154 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, | 156 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, |
| 155 | PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, | 157 | PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, |
| 156 | PERF_FORMAT_ID = 1U << 2, | 158 | PERF_FORMAT_ID = 1U << 2, |
| 157 | PERF_FORMAT_GROUP = 1U << 3, | 159 | PERF_FORMAT_GROUP = 1U << 3, |
| 158 | 160 | ||
| 159 | PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ | 161 | PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ |
| 160 | }; | 162 | }; |
| 161 | 163 | ||
| 162 | #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ | 164 | #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ |
| 163 | 165 | ||
| 164 | /* | 166 | /* |
| 165 | * Hardware event to monitor via a performance monitoring counter: | 167 | * Hardware event to monitor via a performance monitoring counter: |
| 166 | */ | 168 | */ |
| 167 | struct perf_counter_attr { | 169 | struct perf_counter_attr { |
| 168 | 170 | ||
| 169 | /* | 171 | /* |
| 170 | * Major type: hardware/software/tracepoint/etc. | 172 | * Major type: hardware/software/tracepoint/etc. |
| 171 | */ | 173 | */ |
| 172 | __u32 type; | 174 | __u32 type; |
| 173 | 175 | ||
| 174 | /* | 176 | /* |
| 175 | * Size of the attr structure, for fwd/bwd compat. | 177 | * Size of the attr structure, for fwd/bwd compat. |
| 176 | */ | 178 | */ |
| 177 | __u32 size; | 179 | __u32 size; |
| 178 | 180 | ||
| 179 | /* | 181 | /* |
| 180 | * Type specific configuration information. | 182 | * Type specific configuration information. |
| 181 | */ | 183 | */ |
| 182 | __u64 config; | 184 | __u64 config; |
| 183 | 185 | ||
| 184 | union { | 186 | union { |
| 185 | __u64 sample_period; | 187 | __u64 sample_period; |
| 186 | __u64 sample_freq; | 188 | __u64 sample_freq; |
| 187 | }; | 189 | }; |
| 188 | 190 | ||
| 189 | __u64 sample_type; | 191 | __u64 sample_type; |
| 190 | __u64 read_format; | 192 | __u64 read_format; |
| 191 | 193 | ||
| 192 | __u64 disabled : 1, /* off by default */ | 194 | __u64 disabled : 1, /* off by default */ |
| 193 | inherit : 1, /* children inherit it */ | 195 | inherit : 1, /* children inherit it */ |
| 194 | pinned : 1, /* must always be on PMU */ | 196 | pinned : 1, /* must always be on PMU */ |
| 195 | exclusive : 1, /* only group on PMU */ | 197 | exclusive : 1, /* only group on PMU */ |
| 196 | exclude_user : 1, /* don't count user */ | 198 | exclude_user : 1, /* don't count user */ |
| 197 | exclude_kernel : 1, /* ditto kernel */ | 199 | exclude_kernel : 1, /* ditto kernel */ |
| 198 | exclude_hv : 1, /* ditto hypervisor */ | 200 | exclude_hv : 1, /* ditto hypervisor */ |
| 199 | exclude_idle : 1, /* don't count when idle */ | 201 | exclude_idle : 1, /* don't count when idle */ |
| 200 | mmap : 1, /* include mmap data */ | 202 | mmap : 1, /* include mmap data */ |
| 201 | comm : 1, /* include comm data */ | 203 | comm : 1, /* include comm data */ |
| 202 | freq : 1, /* use freq, not period */ | 204 | freq : 1, /* use freq, not period */ |
| 203 | inherit_stat : 1, /* per task counts */ | 205 | inherit_stat : 1, /* per task counts */ |
| 204 | enable_on_exec : 1, /* next exec enables */ | 206 | enable_on_exec : 1, /* next exec enables */ |
| 205 | task : 1, /* trace fork/exit */ | 207 | task : 1, /* trace fork/exit */ |
| 206 | watermark : 1, /* wakeup_watermark */ | 208 | watermark : 1, /* wakeup_watermark */ |
| 207 | 209 | ||
| 208 | __reserved_1 : 49; | 210 | __reserved_1 : 49; |
| 209 | 211 | ||
| 210 | union { | 212 | union { |
| 211 | __u32 wakeup_events; /* wakeup every n events */ | 213 | __u32 wakeup_events; /* wakeup every n events */ |
| 212 | __u32 wakeup_watermark; /* bytes before wakeup */ | 214 | __u32 wakeup_watermark; /* bytes before wakeup */ |
| 213 | }; | 215 | }; |
| 214 | __u32 __reserved_2; | 216 | __u32 __reserved_2; |
| 215 | 217 | ||
| 216 | __u64 __reserved_3; | 218 | __u64 __reserved_3; |
| 217 | }; | 219 | }; |
| 218 | 220 | ||
| 219 | /* | 221 | /* |
| 220 | * Ioctls that can be done on a perf counter fd: | 222 | * Ioctls that can be done on a perf counter fd: |
| 221 | */ | 223 | */ |
| 222 | #define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) | 224 | #define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) |
| 223 | #define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) | 225 | #define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) |
| 224 | #define PERF_COUNTER_IOC_REFRESH _IO ('$', 2) | 226 | #define PERF_COUNTER_IOC_REFRESH _IO ('$', 2) |
| 225 | #define PERF_COUNTER_IOC_RESET _IO ('$', 3) | 227 | #define PERF_COUNTER_IOC_RESET _IO ('$', 3) |
| 226 | #define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) | 228 | #define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) |
| 227 | #define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5) | 229 | #define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5) |
| 228 | #define PERF_COUNTER_IOC_SET_FILTER _IOW('$', 6, char *) | 230 | #define PERF_COUNTER_IOC_SET_FILTER _IOW('$', 6, char *) |
| 229 | 231 | ||
| 230 | enum perf_counter_ioc_flags { | 232 | enum perf_counter_ioc_flags { |
| 231 | PERF_IOC_FLAG_GROUP = 1U << 0, | 233 | PERF_IOC_FLAG_GROUP = 1U << 0, |
| 232 | }; | 234 | }; |
| 233 | 235 | ||
| 234 | /* | 236 | /* |
| 235 | * Structure of the page that can be mapped via mmap | 237 | * Structure of the page that can be mapped via mmap |
| 236 | */ | 238 | */ |
| 237 | struct perf_counter_mmap_page { | 239 | struct perf_counter_mmap_page { |
| 238 | __u32 version; /* version number of this structure */ | 240 | __u32 version; /* version number of this structure */ |
| 239 | __u32 compat_version; /* lowest version this is compat with */ | 241 | __u32 compat_version; /* lowest version this is compat with */ |
| 240 | 242 | ||
| 241 | /* | 243 | /* |
| 242 | * Bits needed to read the hw counters in user-space. | 244 | * Bits needed to read the hw counters in user-space. |
| 243 | * | 245 | * |
| 244 | * u32 seq; | 246 | * u32 seq; |
| 245 | * s64 count; | 247 | * s64 count; |
| 246 | * | 248 | * |
| 247 | * do { | 249 | * do { |
| 248 | * seq = pc->lock; | 250 | * seq = pc->lock; |
| 249 | * | 251 | * |
| 250 | * barrier() | 252 | * barrier() |
| 251 | * if (pc->index) { | 253 | * if (pc->index) { |
| 252 | * count = pmc_read(pc->index - 1); | 254 | * count = pmc_read(pc->index - 1); |
| 253 | * count += pc->offset; | 255 | * count += pc->offset; |
| 254 | * } else | 256 | * } else |
| 255 | * goto regular_read; | 257 | * goto regular_read; |
| 256 | * | 258 | * |
| 257 | * barrier(); | 259 | * barrier(); |
| 258 | * } while (pc->lock != seq); | 260 | * } while (pc->lock != seq); |
| 259 | * | 261 | * |
| 260 | * NOTE: for obvious reason this only works on self-monitoring | 262 | * NOTE: for obvious reason this only works on self-monitoring |
| 261 | * processes. | 263 | * processes. |
| 262 | */ | 264 | */ |
| 263 | __u32 lock; /* seqlock for synchronization */ | 265 | __u32 lock; /* seqlock for synchronization */ |
| 264 | __u32 index; /* hardware counter identifier */ | 266 | __u32 index; /* hardware counter identifier */ |
| 265 | __s64 offset; /* add to hardware counter value */ | 267 | __s64 offset; /* add to hardware counter value */ |
| 266 | __u64 time_enabled; /* time counter active */ | 268 | __u64 time_enabled; /* time counter active */ |
| 267 | __u64 time_running; /* time counter on cpu */ | 269 | __u64 time_running; /* time counter on cpu */ |
| 268 | 270 | ||
| 269 | /* | 271 | /* |
| 270 | * Hole for extension of the self monitor capabilities | 272 | * Hole for extension of the self monitor capabilities |
| 271 | */ | 273 | */ |
| 272 | 274 | ||
| 273 | __u64 __reserved[123]; /* align to 1k */ | 275 | __u64 __reserved[123]; /* align to 1k */ |
| 274 | 276 | ||
| 275 | /* | 277 | /* |
| 276 | * Control data for the mmap() data buffer. | 278 | * Control data for the mmap() data buffer. |
| 277 | * | 279 | * |
| 278 | * User-space reading the @data_head value should issue an rmb(), on | 280 | * User-space reading the @data_head value should issue an rmb(), on |
| 279 | * SMP capable platforms, after reading this value -- see | 281 | * SMP capable platforms, after reading this value -- see |
| 280 | * perf_counter_wakeup(). | 282 | * perf_counter_wakeup(). |
| 281 | * | 283 | * |
| 282 | * When the mapping is PROT_WRITE the @data_tail value should be | 284 | * When the mapping is PROT_WRITE the @data_tail value should be |
| 283 | * written by userspace to reflect the last read data. In this case | 285 | * written by userspace to reflect the last read data. In this case |
| 284 | * the kernel will not over-write unread data. | 286 | * the kernel will not over-write unread data. |
| 285 | */ | 287 | */ |
| 286 | __u64 data_head; /* head in the data section */ | 288 | __u64 data_head; /* head in the data section */ |
| 287 | __u64 data_tail; /* user-space written tail */ | 289 | __u64 data_tail; /* user-space written tail */ |
| 288 | }; | 290 | }; |
| 289 | 291 | ||
| 290 | #define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) | 292 | #define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) |
| 291 | #define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0) | 293 | #define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0) |
| 292 | #define PERF_EVENT_MISC_KERNEL (1 << 0) | 294 | #define PERF_EVENT_MISC_KERNEL (1 << 0) |
| 293 | #define PERF_EVENT_MISC_USER (2 << 0) | 295 | #define PERF_EVENT_MISC_USER (2 << 0) |
| 294 | #define PERF_EVENT_MISC_HYPERVISOR (3 << 0) | 296 | #define PERF_EVENT_MISC_HYPERVISOR (3 << 0) |
| 295 | 297 | ||
| 296 | struct perf_event_header { | 298 | struct perf_event_header { |
| 297 | __u32 type; | 299 | __u32 type; |
| 298 | __u16 misc; | 300 | __u16 misc; |
| 299 | __u16 size; | 301 | __u16 size; |
| 300 | }; | 302 | }; |
| 301 | 303 | ||
| 302 | enum perf_event_type { | 304 | enum perf_event_type { |
| 303 | 305 | ||
| 304 | /* | 306 | /* |
| 305 | * The MMAP events record the PROT_EXEC mappings so that we can | 307 | * The MMAP events record the PROT_EXEC mappings so that we can |
| 306 | * correlate userspace IPs to code. They have the following structure: | 308 | * correlate userspace IPs to code. They have the following structure: |
| 307 | * | 309 | * |
| 308 | * struct { | 310 | * struct { |
| 309 | * struct perf_event_header header; | 311 | * struct perf_event_header header; |
| 310 | * | 312 | * |
| 311 | * u32 pid, tid; | 313 | * u32 pid, tid; |
| 312 | * u64 addr; | 314 | * u64 addr; |
| 313 | * u64 len; | 315 | * u64 len; |
| 314 | * u64 pgoff; | 316 | * u64 pgoff; |
| 315 | * char filename[]; | 317 | * char filename[]; |
| 316 | * }; | 318 | * }; |
| 317 | */ | 319 | */ |
| 318 | PERF_EVENT_MMAP = 1, | 320 | PERF_EVENT_MMAP = 1, |
| 319 | 321 | ||
| 320 | /* | 322 | /* |
| 321 | * struct { | 323 | * struct { |
| 322 | * struct perf_event_header header; | 324 | * struct perf_event_header header; |
| 323 | * u64 id; | 325 | * u64 id; |
| 324 | * u64 lost; | 326 | * u64 lost; |
| 325 | * }; | 327 | * }; |
| 326 | */ | 328 | */ |
| 327 | PERF_EVENT_LOST = 2, | 329 | PERF_EVENT_LOST = 2, |
| 328 | 330 | ||
| 329 | /* | 331 | /* |
| 330 | * struct { | 332 | * struct { |
| 331 | * struct perf_event_header header; | 333 | * struct perf_event_header header; |
| 332 | * | 334 | * |
| 333 | * u32 pid, tid; | 335 | * u32 pid, tid; |
| 334 | * char comm[]; | 336 | * char comm[]; |
| 335 | * }; | 337 | * }; |
| 336 | */ | 338 | */ |
| 337 | PERF_EVENT_COMM = 3, | 339 | PERF_EVENT_COMM = 3, |
| 338 | 340 | ||
| 339 | /* | 341 | /* |
| 340 | * struct { | 342 | * struct { |
| 341 | * struct perf_event_header header; | 343 | * struct perf_event_header header; |
| 342 | * u32 pid, ppid; | 344 | * u32 pid, ppid; |
| 343 | * u32 tid, ptid; | 345 | * u32 tid, ptid; |
| 344 | * u64 time; | 346 | * u64 time; |
| 345 | * }; | 347 | * }; |
| 346 | */ | 348 | */ |
| 347 | PERF_EVENT_EXIT = 4, | 349 | PERF_EVENT_EXIT = 4, |
| 348 | 350 | ||
| 349 | /* | 351 | /* |
| 350 | * struct { | 352 | * struct { |
| 351 | * struct perf_event_header header; | 353 | * struct perf_event_header header; |
| 352 | * u64 time; | 354 | * u64 time; |
| 353 | * u64 id; | 355 | * u64 id; |
| 354 | * u64 stream_id; | 356 | * u64 stream_id; |
| 355 | * }; | 357 | * }; |
| 356 | */ | 358 | */ |
| 357 | PERF_EVENT_THROTTLE = 5, | 359 | PERF_EVENT_THROTTLE = 5, |
| 358 | PERF_EVENT_UNTHROTTLE = 6, | 360 | PERF_EVENT_UNTHROTTLE = 6, |
| 359 | 361 | ||
| 360 | /* | 362 | /* |
| 361 | * struct { | 363 | * struct { |
| 362 | * struct perf_event_header header; | 364 | * struct perf_event_header header; |
| 363 | * u32 pid, ppid; | 365 | * u32 pid, ppid; |
| 364 | * u32 tid, ptid; | 366 | * u32 tid, ptid; |
| 365 | * u64 time; | 367 | * u64 time; |
| 366 | * }; | 368 | * }; |
| 367 | */ | 369 | */ |
| 368 | PERF_EVENT_FORK = 7, | 370 | PERF_EVENT_FORK = 7, |
| 369 | 371 | ||
| 370 | /* | 372 | /* |
| 371 | * struct { | 373 | * struct { |
| 372 | * struct perf_event_header header; | 374 | * struct perf_event_header header; |
| 373 | * u32 pid, tid; | 375 | * u32 pid, tid; |
| 374 | * | 376 | * |
| 375 | * struct read_format values; | 377 | * struct read_format values; |
| 376 | * }; | 378 | * }; |
| 377 | */ | 379 | */ |
| 378 | PERF_EVENT_READ = 8, | 380 | PERF_EVENT_READ = 8, |
| 379 | 381 | ||
| 380 | /* | 382 | /* |
| 381 | * struct { | 383 | * struct { |
| 382 | * struct perf_event_header header; | 384 | * struct perf_event_header header; |
| 383 | * | 385 | * |
| 384 | * { u64 ip; } && PERF_SAMPLE_IP | 386 | * { u64 ip; } && PERF_SAMPLE_IP |
| 385 | * { u32 pid, tid; } && PERF_SAMPLE_TID | 387 | * { u32 pid, tid; } && PERF_SAMPLE_TID |
| 386 | * { u64 time; } && PERF_SAMPLE_TIME | 388 | * { u64 time; } && PERF_SAMPLE_TIME |
| 387 | * { u64 addr; } && PERF_SAMPLE_ADDR | 389 | * { u64 addr; } && PERF_SAMPLE_ADDR |
| 388 | * { u64 id; } && PERF_SAMPLE_ID | 390 | * { u64 id; } && PERF_SAMPLE_ID |
| 389 | * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID | 391 | * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID |
| 390 | * { u32 cpu, res; } && PERF_SAMPLE_CPU | 392 | * { u32 cpu, res; } && PERF_SAMPLE_CPU |
| 391 | * { u64 period; } && PERF_SAMPLE_PERIOD | 393 | * { u64 period; } && PERF_SAMPLE_PERIOD |
| 392 | * | 394 | * |
| 393 | * { struct read_format values; } && PERF_SAMPLE_READ | 395 | * { struct read_format values; } && PERF_SAMPLE_READ |
| 394 | * | 396 | * |
| 395 | * { u64 nr, | 397 | * { u64 nr, |
| 396 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN | 398 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN |
| 397 | * | 399 | * |
| 398 | * # | 400 | * # |
| 399 | * # The RAW record below is opaque data wrt the ABI | 401 | * # The RAW record below is opaque data wrt the ABI |
| 400 | * # | 402 | * # |
| 401 | * # That is, the ABI doesn't make any promises wrt to | 403 | * # That is, the ABI doesn't make any promises wrt to |
| 402 | * # the stability of its content, it may vary depending | 404 | * # the stability of its content, it may vary depending |
| 403 | * # on event, hardware, kernel version and phase of | 405 | * # on event, hardware, kernel version and phase of |
| 404 | * # the moon. | 406 | * # the moon. |
| 405 | * # | 407 | * # |
| 406 | * # In other words, PERF_SAMPLE_RAW contents are not an ABI. | 408 | * # In other words, PERF_SAMPLE_RAW contents are not an ABI. |
| 407 | * # | 409 | * # |
| 408 | * | 410 | * |
| 409 | * { u32 size; | 411 | * { u32 size; |
| 410 | * char data[size];}&& PERF_SAMPLE_RAW | 412 | * char data[size];}&& PERF_SAMPLE_RAW |
| 411 | * }; | 413 | * }; |
| 412 | */ | 414 | */ |
| 413 | PERF_EVENT_SAMPLE = 9, | 415 | PERF_EVENT_SAMPLE = 9, |
| 414 | 416 | ||
| 415 | PERF_EVENT_MAX, /* non-ABI */ | 417 | PERF_EVENT_MAX, /* non-ABI */ |
| 416 | }; | 418 | }; |
| 417 | 419 | ||
| 418 | enum perf_callchain_context { | 420 | enum perf_callchain_context { |
| 419 | PERF_CONTEXT_HV = (__u64)-32, | 421 | PERF_CONTEXT_HV = (__u64)-32, |
| 420 | PERF_CONTEXT_KERNEL = (__u64)-128, | 422 | PERF_CONTEXT_KERNEL = (__u64)-128, |
| 421 | PERF_CONTEXT_USER = (__u64)-512, | 423 | PERF_CONTEXT_USER = (__u64)-512, |
| 422 | 424 | ||
| 423 | PERF_CONTEXT_GUEST = (__u64)-2048, | 425 | PERF_CONTEXT_GUEST = (__u64)-2048, |
| 424 | PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, | 426 | PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, |
| 425 | PERF_CONTEXT_GUEST_USER = (__u64)-2560, | 427 | PERF_CONTEXT_GUEST_USER = (__u64)-2560, |
| 426 | 428 | ||
| 427 | PERF_CONTEXT_MAX = (__u64)-4095, | 429 | PERF_CONTEXT_MAX = (__u64)-4095, |
| 428 | }; | 430 | }; |
| 429 | 431 | ||
| 430 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) | 432 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) |
| 431 | #define PERF_FLAG_FD_OUTPUT (1U << 1) | 433 | #define PERF_FLAG_FD_OUTPUT (1U << 1) |
| 432 | 434 | ||
| 433 | /* | 435 | /* |
| 434 | * In case some app still references the old symbols: | 436 | * In case some app still references the old symbols: |
| 435 | */ | 437 | */ |
| 436 | 438 | ||
| 437 | #define __NR_perf_counter_open __NR_perf_event_open | 439 | #define __NR_perf_counter_open __NR_perf_event_open |
| 438 | 440 | ||
| 439 | #define PR_TASK_PERF_COUNTERS_DISABLE PR_TASK_PERF_EVENTS_DISABLE | 441 | #define PR_TASK_PERF_COUNTERS_DISABLE PR_TASK_PERF_EVENTS_DISABLE |
| 440 | #define PR_TASK_PERF_COUNTERS_ENABLE PR_TASK_PERF_EVENTS_ENABLE | 442 | #define PR_TASK_PERF_COUNTERS_ENABLE PR_TASK_PERF_EVENTS_ENABLE |
| 441 | 443 | ||
| 442 | #endif /* _LINUX_PERF_COUNTER_H */ | 444 | #endif /* _LINUX_PERF_COUNTER_H */ |
| 443 | 445 |
include/linux/perf_event.h
| 1 | /* | 1 | /* |
| 2 | * Performance events: | 2 | * Performance events: |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | 4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar | 5 | * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar |
| 6 | * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra | 6 | * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra |
| 7 | * | 7 | * |
| 8 | * Data type definitions, declarations, prototypes. | 8 | * Data type definitions, declarations, prototypes. |
| 9 | * | 9 | * |
| 10 | * Started by: Thomas Gleixner and Ingo Molnar | 10 | * Started by: Thomas Gleixner and Ingo Molnar |
| 11 | * | 11 | * |
| 12 | * For licencing details see kernel-base/COPYING | 12 | * For licencing details see kernel-base/COPYING |
| 13 | */ | 13 | */ |
| 14 | #ifndef _LINUX_PERF_EVENT_H | 14 | #ifndef _LINUX_PERF_EVENT_H |
| 15 | #define _LINUX_PERF_EVENT_H | 15 | #define _LINUX_PERF_EVENT_H |
| 16 | 16 | ||
| 17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
| 18 | #include <linux/ioctl.h> | 18 | #include <linux/ioctl.h> |
| 19 | #include <asm/byteorder.h> | 19 | #include <asm/byteorder.h> |
| 20 | 20 | ||
| 21 | /* | 21 | /* |
| 22 | * User-space ABI bits: | 22 | * User-space ABI bits: |
| 23 | */ | 23 | */ |
| 24 | 24 | ||
| 25 | /* | 25 | /* |
| 26 | * attr.type | 26 | * attr.type |
| 27 | */ | 27 | */ |
| 28 | enum perf_type_id { | 28 | enum perf_type_id { |
| 29 | PERF_TYPE_HARDWARE = 0, | 29 | PERF_TYPE_HARDWARE = 0, |
| 30 | PERF_TYPE_SOFTWARE = 1, | 30 | PERF_TYPE_SOFTWARE = 1, |
| 31 | PERF_TYPE_TRACEPOINT = 2, | 31 | PERF_TYPE_TRACEPOINT = 2, |
| 32 | PERF_TYPE_HW_CACHE = 3, | 32 | PERF_TYPE_HW_CACHE = 3, |
| 33 | PERF_TYPE_RAW = 4, | 33 | PERF_TYPE_RAW = 4, |
| 34 | 34 | ||
| 35 | PERF_TYPE_MAX, /* non-ABI */ | 35 | PERF_TYPE_MAX, /* non-ABI */ |
| 36 | }; | 36 | }; |
| 37 | 37 | ||
| 38 | /* | 38 | /* |
| 39 | * Generalized performance event event_id types, used by the | 39 | * Generalized performance event event_id types, used by the |
| 40 | * attr.event_id parameter of the sys_perf_event_open() | 40 | * attr.event_id parameter of the sys_perf_event_open() |
| 41 | * syscall: | 41 | * syscall: |
| 42 | */ | 42 | */ |
| 43 | enum perf_hw_id { | 43 | enum perf_hw_id { |
| 44 | /* | 44 | /* |
| 45 | * Common hardware events, generalized by the kernel: | 45 | * Common hardware events, generalized by the kernel: |
| 46 | */ | 46 | */ |
| 47 | PERF_COUNT_HW_CPU_CYCLES = 0, | 47 | PERF_COUNT_HW_CPU_CYCLES = 0, |
| 48 | PERF_COUNT_HW_INSTRUCTIONS = 1, | 48 | PERF_COUNT_HW_INSTRUCTIONS = 1, |
| 49 | PERF_COUNT_HW_CACHE_REFERENCES = 2, | 49 | PERF_COUNT_HW_CACHE_REFERENCES = 2, |
| 50 | PERF_COUNT_HW_CACHE_MISSES = 3, | 50 | PERF_COUNT_HW_CACHE_MISSES = 3, |
| 51 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, | 51 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, |
| 52 | PERF_COUNT_HW_BRANCH_MISSES = 5, | 52 | PERF_COUNT_HW_BRANCH_MISSES = 5, |
| 53 | PERF_COUNT_HW_BUS_CYCLES = 6, | 53 | PERF_COUNT_HW_BUS_CYCLES = 6, |
| 54 | 54 | ||
| 55 | PERF_COUNT_HW_MAX, /* non-ABI */ | 55 | PERF_COUNT_HW_MAX, /* non-ABI */ |
| 56 | }; | 56 | }; |
| 57 | 57 | ||
| 58 | /* | 58 | /* |
| 59 | * Generalized hardware cache events: | 59 | * Generalized hardware cache events: |
| 60 | * | 60 | * |
| 61 | * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x | 61 | * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x |
| 62 | * { read, write, prefetch } x | 62 | * { read, write, prefetch } x |
| 63 | * { accesses, misses } | 63 | * { accesses, misses } |
| 64 | */ | 64 | */ |
| 65 | enum perf_hw_cache_id { | 65 | enum perf_hw_cache_id { |
| 66 | PERF_COUNT_HW_CACHE_L1D = 0, | 66 | PERF_COUNT_HW_CACHE_L1D = 0, |
| 67 | PERF_COUNT_HW_CACHE_L1I = 1, | 67 | PERF_COUNT_HW_CACHE_L1I = 1, |
| 68 | PERF_COUNT_HW_CACHE_LL = 2, | 68 | PERF_COUNT_HW_CACHE_LL = 2, |
| 69 | PERF_COUNT_HW_CACHE_DTLB = 3, | 69 | PERF_COUNT_HW_CACHE_DTLB = 3, |
| 70 | PERF_COUNT_HW_CACHE_ITLB = 4, | 70 | PERF_COUNT_HW_CACHE_ITLB = 4, |
| 71 | PERF_COUNT_HW_CACHE_BPU = 5, | 71 | PERF_COUNT_HW_CACHE_BPU = 5, |
| 72 | 72 | ||
| 73 | PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ | 73 | PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ |
| 74 | }; | 74 | }; |
| 75 | 75 | ||
| 76 | enum perf_hw_cache_op_id { | 76 | enum perf_hw_cache_op_id { |
| 77 | PERF_COUNT_HW_CACHE_OP_READ = 0, | 77 | PERF_COUNT_HW_CACHE_OP_READ = 0, |
| 78 | PERF_COUNT_HW_CACHE_OP_WRITE = 1, | 78 | PERF_COUNT_HW_CACHE_OP_WRITE = 1, |
| 79 | PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, | 79 | PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, |
| 80 | 80 | ||
| 81 | PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ | 81 | PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ |
| 82 | }; | 82 | }; |
| 83 | 83 | ||
| 84 | enum perf_hw_cache_op_result_id { | 84 | enum perf_hw_cache_op_result_id { |
| 85 | PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, | 85 | PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, |
| 86 | PERF_COUNT_HW_CACHE_RESULT_MISS = 1, | 86 | PERF_COUNT_HW_CACHE_RESULT_MISS = 1, |
| 87 | 87 | ||
| 88 | PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ | 88 | PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ |
| 89 | }; | 89 | }; |
| 90 | 90 | ||
| 91 | /* | 91 | /* |
| 92 | * Special "software" events provided by the kernel, even if the hardware | 92 | * Special "software" events provided by the kernel, even if the hardware |
| 93 | * does not support performance events. These events measure various | 93 | * does not support performance events. These events measure various |
| 94 | * physical and sw events of the kernel (and allow the profiling of them as | 94 | * physical and sw events of the kernel (and allow the profiling of them as |
| 95 | * well): | 95 | * well): |
| 96 | */ | 96 | */ |
| 97 | enum perf_sw_ids { | 97 | enum perf_sw_ids { |
| 98 | PERF_COUNT_SW_CPU_CLOCK = 0, | 98 | PERF_COUNT_SW_CPU_CLOCK = 0, |
| 99 | PERF_COUNT_SW_TASK_CLOCK = 1, | 99 | PERF_COUNT_SW_TASK_CLOCK = 1, |
| 100 | PERF_COUNT_SW_PAGE_FAULTS = 2, | 100 | PERF_COUNT_SW_PAGE_FAULTS = 2, |
| 101 | PERF_COUNT_SW_CONTEXT_SWITCHES = 3, | 101 | PERF_COUNT_SW_CONTEXT_SWITCHES = 3, |
| 102 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, | 102 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, |
| 103 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, | 103 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, |
| 104 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, | 104 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, |
| 105 | PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, | ||
| 106 | PERF_COUNT_SW_EMULATION_FAULTS = 8, | ||
| 105 | 107 | ||
| 106 | PERF_COUNT_SW_MAX, /* non-ABI */ | 108 | PERF_COUNT_SW_MAX, /* non-ABI */ |
| 107 | }; | 109 | }; |
| 108 | 110 | ||
| 109 | /* | 111 | /* |
| 110 | * Bits that can be set in attr.sample_type to request information | 112 | * Bits that can be set in attr.sample_type to request information |
| 111 | * in the overflow packets. | 113 | * in the overflow packets. |
| 112 | */ | 114 | */ |
| 113 | enum perf_event_sample_format { | 115 | enum perf_event_sample_format { |
| 114 | PERF_SAMPLE_IP = 1U << 0, | 116 | PERF_SAMPLE_IP = 1U << 0, |
| 115 | PERF_SAMPLE_TID = 1U << 1, | 117 | PERF_SAMPLE_TID = 1U << 1, |
| 116 | PERF_SAMPLE_TIME = 1U << 2, | 118 | PERF_SAMPLE_TIME = 1U << 2, |
| 117 | PERF_SAMPLE_ADDR = 1U << 3, | 119 | PERF_SAMPLE_ADDR = 1U << 3, |
| 118 | PERF_SAMPLE_READ = 1U << 4, | 120 | PERF_SAMPLE_READ = 1U << 4, |
| 119 | PERF_SAMPLE_CALLCHAIN = 1U << 5, | 121 | PERF_SAMPLE_CALLCHAIN = 1U << 5, |
| 120 | PERF_SAMPLE_ID = 1U << 6, | 122 | PERF_SAMPLE_ID = 1U << 6, |
| 121 | PERF_SAMPLE_CPU = 1U << 7, | 123 | PERF_SAMPLE_CPU = 1U << 7, |
| 122 | PERF_SAMPLE_PERIOD = 1U << 8, | 124 | PERF_SAMPLE_PERIOD = 1U << 8, |
| 123 | PERF_SAMPLE_STREAM_ID = 1U << 9, | 125 | PERF_SAMPLE_STREAM_ID = 1U << 9, |
| 124 | PERF_SAMPLE_RAW = 1U << 10, | 126 | PERF_SAMPLE_RAW = 1U << 10, |
| 125 | 127 | ||
| 126 | PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ | 128 | PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ |
| 127 | }; | 129 | }; |
| 128 | 130 | ||
| 129 | /* | 131 | /* |
| 130 | * The format of the data returned by read() on a perf event fd, | 132 | * The format of the data returned by read() on a perf event fd, |
| 131 | * as specified by attr.read_format: | 133 | * as specified by attr.read_format: |
| 132 | * | 134 | * |
| 133 | * struct read_format { | 135 | * struct read_format { |
| 134 | * { u64 value; | 136 | * { u64 value; |
| 135 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | 137 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED |
| 136 | * { u64 time_running; } && PERF_FORMAT_RUNNING | 138 | * { u64 time_running; } && PERF_FORMAT_RUNNING |
| 137 | * { u64 id; } && PERF_FORMAT_ID | 139 | * { u64 id; } && PERF_FORMAT_ID |
| 138 | * } && !PERF_FORMAT_GROUP | 140 | * } && !PERF_FORMAT_GROUP |
| 139 | * | 141 | * |
| 140 | * { u64 nr; | 142 | * { u64 nr; |
| 141 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | 143 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED |
| 142 | * { u64 time_running; } && PERF_FORMAT_RUNNING | 144 | * { u64 time_running; } && PERF_FORMAT_RUNNING |
| 143 | * { u64 value; | 145 | * { u64 value; |
| 144 | * { u64 id; } && PERF_FORMAT_ID | 146 | * { u64 id; } && PERF_FORMAT_ID |
| 145 | * } cntr[nr]; | 147 | * } cntr[nr]; |
| 146 | * } && PERF_FORMAT_GROUP | 148 | * } && PERF_FORMAT_GROUP |
| 147 | * }; | 149 | * }; |
| 148 | */ | 150 | */ |
| 149 | enum perf_event_read_format { | 151 | enum perf_event_read_format { |
| 150 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, | 152 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, |
| 151 | PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, | 153 | PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, |
| 152 | PERF_FORMAT_ID = 1U << 2, | 154 | PERF_FORMAT_ID = 1U << 2, |
| 153 | PERF_FORMAT_GROUP = 1U << 3, | 155 | PERF_FORMAT_GROUP = 1U << 3, |
| 154 | 156 | ||
| 155 | PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ | 157 | PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ |
| 156 | }; | 158 | }; |
| 157 | 159 | ||
| 158 | #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ | 160 | #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ |
| 159 | 161 | ||
| 160 | /* | 162 | /* |
| 161 | * Hardware event_id to monitor via a performance monitoring event: | 163 | * Hardware event_id to monitor via a performance monitoring event: |
| 162 | */ | 164 | */ |
| 163 | struct perf_event_attr { | 165 | struct perf_event_attr { |
| 164 | 166 | ||
| 165 | /* | 167 | /* |
| 166 | * Major type: hardware/software/tracepoint/etc. | 168 | * Major type: hardware/software/tracepoint/etc. |
| 167 | */ | 169 | */ |
| 168 | __u32 type; | 170 | __u32 type; |
| 169 | 171 | ||
| 170 | /* | 172 | /* |
| 171 | * Size of the attr structure, for fwd/bwd compat. | 173 | * Size of the attr structure, for fwd/bwd compat. |
| 172 | */ | 174 | */ |
| 173 | __u32 size; | 175 | __u32 size; |
| 174 | 176 | ||
| 175 | /* | 177 | /* |
| 176 | * Type specific configuration information. | 178 | * Type specific configuration information. |
| 177 | */ | 179 | */ |
| 178 | __u64 config; | 180 | __u64 config; |
| 179 | 181 | ||
| 180 | union { | 182 | union { |
| 181 | __u64 sample_period; | 183 | __u64 sample_period; |
| 182 | __u64 sample_freq; | 184 | __u64 sample_freq; |
| 183 | }; | 185 | }; |
| 184 | 186 | ||
| 185 | __u64 sample_type; | 187 | __u64 sample_type; |
| 186 | __u64 read_format; | 188 | __u64 read_format; |
| 187 | 189 | ||
| 188 | __u64 disabled : 1, /* off by default */ | 190 | __u64 disabled : 1, /* off by default */ |
| 189 | inherit : 1, /* children inherit it */ | 191 | inherit : 1, /* children inherit it */ |
| 190 | pinned : 1, /* must always be on PMU */ | 192 | pinned : 1, /* must always be on PMU */ |
| 191 | exclusive : 1, /* only group on PMU */ | 193 | exclusive : 1, /* only group on PMU */ |
| 192 | exclude_user : 1, /* don't count user */ | 194 | exclude_user : 1, /* don't count user */ |
| 193 | exclude_kernel : 1, /* ditto kernel */ | 195 | exclude_kernel : 1, /* ditto kernel */ |
| 194 | exclude_hv : 1, /* ditto hypervisor */ | 196 | exclude_hv : 1, /* ditto hypervisor */ |
| 195 | exclude_idle : 1, /* don't count when idle */ | 197 | exclude_idle : 1, /* don't count when idle */ |
| 196 | mmap : 1, /* include mmap data */ | 198 | mmap : 1, /* include mmap data */ |
| 197 | comm : 1, /* include comm data */ | 199 | comm : 1, /* include comm data */ |
| 198 | freq : 1, /* use freq, not period */ | 200 | freq : 1, /* use freq, not period */ |
| 199 | inherit_stat : 1, /* per task counts */ | 201 | inherit_stat : 1, /* per task counts */ |
| 200 | enable_on_exec : 1, /* next exec enables */ | 202 | enable_on_exec : 1, /* next exec enables */ |
| 201 | task : 1, /* trace fork/exit */ | 203 | task : 1, /* trace fork/exit */ |
| 202 | watermark : 1, /* wakeup_watermark */ | 204 | watermark : 1, /* wakeup_watermark */ |
| 203 | 205 | ||
| 204 | __reserved_1 : 49; | 206 | __reserved_1 : 49; |
| 205 | 207 | ||
| 206 | union { | 208 | union { |
| 207 | __u32 wakeup_events; /* wakeup every n events */ | 209 | __u32 wakeup_events; /* wakeup every n events */ |
| 208 | __u32 wakeup_watermark; /* bytes before wakeup */ | 210 | __u32 wakeup_watermark; /* bytes before wakeup */ |
| 209 | }; | 211 | }; |
| 210 | __u32 __reserved_2; | 212 | __u32 __reserved_2; |
| 211 | 213 | ||
| 212 | __u64 __reserved_3; | 214 | __u64 __reserved_3; |
| 213 | }; | 215 | }; |
| 214 | 216 | ||
| 215 | /* | 217 | /* |
| 216 | * Ioctls that can be done on a perf event fd: | 218 | * Ioctls that can be done on a perf event fd: |
| 217 | */ | 219 | */ |
| 218 | #define PERF_EVENT_IOC_ENABLE _IO ('$', 0) | 220 | #define PERF_EVENT_IOC_ENABLE _IO ('$', 0) |
| 219 | #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) | 221 | #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) |
| 220 | #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) | 222 | #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) |
| 221 | #define PERF_EVENT_IOC_RESET _IO ('$', 3) | 223 | #define PERF_EVENT_IOC_RESET _IO ('$', 3) |
| 222 | #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) | 224 | #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) |
| 223 | #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) | 225 | #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) |
| 224 | #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) | 226 | #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) |
| 225 | 227 | ||
| 226 | enum perf_event_ioc_flags { | 228 | enum perf_event_ioc_flags { |
| 227 | PERF_IOC_FLAG_GROUP = 1U << 0, | 229 | PERF_IOC_FLAG_GROUP = 1U << 0, |
| 228 | }; | 230 | }; |
| 229 | 231 | ||
| 230 | /* | 232 | /* |
| 231 | * Structure of the page that can be mapped via mmap | 233 | * Structure of the page that can be mapped via mmap |
| 232 | */ | 234 | */ |
| 233 | struct perf_event_mmap_page { | 235 | struct perf_event_mmap_page { |
| 234 | __u32 version; /* version number of this structure */ | 236 | __u32 version; /* version number of this structure */ |
| 235 | __u32 compat_version; /* lowest version this is compat with */ | 237 | __u32 compat_version; /* lowest version this is compat with */ |
| 236 | 238 | ||
| 237 | /* | 239 | /* |
| 238 | * Bits needed to read the hw events in user-space. | 240 | * Bits needed to read the hw events in user-space. |
| 239 | * | 241 | * |
| 240 | * u32 seq; | 242 | * u32 seq; |
| 241 | * s64 count; | 243 | * s64 count; |
| 242 | * | 244 | * |
| 243 | * do { | 245 | * do { |
| 244 | * seq = pc->lock; | 246 | * seq = pc->lock; |
| 245 | * | 247 | * |
| 246 | * barrier() | 248 | * barrier() |
| 247 | * if (pc->index) { | 249 | * if (pc->index) { |
| 248 | * count = pmc_read(pc->index - 1); | 250 | * count = pmc_read(pc->index - 1); |
| 249 | * count += pc->offset; | 251 | * count += pc->offset; |
| 250 | * } else | 252 | * } else |
| 251 | * goto regular_read; | 253 | * goto regular_read; |
| 252 | * | 254 | * |
| 253 | * barrier(); | 255 | * barrier(); |
| 254 | * } while (pc->lock != seq); | 256 | * } while (pc->lock != seq); |
| 255 | * | 257 | * |
| 256 | * NOTE: for obvious reason this only works on self-monitoring | 258 | * NOTE: for obvious reason this only works on self-monitoring |
| 257 | * processes. | 259 | * processes. |
| 258 | */ | 260 | */ |
| 259 | __u32 lock; /* seqlock for synchronization */ | 261 | __u32 lock; /* seqlock for synchronization */ |
| 260 | __u32 index; /* hardware event identifier */ | 262 | __u32 index; /* hardware event identifier */ |
| 261 | __s64 offset; /* add to hardware event value */ | 263 | __s64 offset; /* add to hardware event value */ |
| 262 | __u64 time_enabled; /* time event active */ | 264 | __u64 time_enabled; /* time event active */ |
| 263 | __u64 time_running; /* time event on cpu */ | 265 | __u64 time_running; /* time event on cpu */ |
| 264 | 266 | ||
| 265 | /* | 267 | /* |
| 266 | * Hole for extension of the self monitor capabilities | 268 | * Hole for extension of the self monitor capabilities |
| 267 | */ | 269 | */ |
| 268 | 270 | ||
| 269 | __u64 __reserved[123]; /* align to 1k */ | 271 | __u64 __reserved[123]; /* align to 1k */ |
| 270 | 272 | ||
| 271 | /* | 273 | /* |
| 272 | * Control data for the mmap() data buffer. | 274 | * Control data for the mmap() data buffer. |
| 273 | * | 275 | * |
| 274 | * User-space reading the @data_head value should issue an rmb(), on | 276 | * User-space reading the @data_head value should issue an rmb(), on |
| 275 | * SMP capable platforms, after reading this value -- see | 277 | * SMP capable platforms, after reading this value -- see |
| 276 | * perf_event_wakeup(). | 278 | * perf_event_wakeup(). |
| 277 | * | 279 | * |
| 278 | * When the mapping is PROT_WRITE the @data_tail value should be | 280 | * When the mapping is PROT_WRITE the @data_tail value should be |
| 279 | * written by userspace to reflect the last read data. In this case | 281 | * written by userspace to reflect the last read data. In this case |
| 280 | * the kernel will not over-write unread data. | 282 | * the kernel will not over-write unread data. |
| 281 | */ | 283 | */ |
| 282 | __u64 data_head; /* head in the data section */ | 284 | __u64 data_head; /* head in the data section */ |
| 283 | __u64 data_tail; /* user-space written tail */ | 285 | __u64 data_tail; /* user-space written tail */ |
| 284 | }; | 286 | }; |
| 285 | 287 | ||
| 286 | #define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) | 288 | #define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) |
| 287 | #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) | 289 | #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) |
| 288 | #define PERF_RECORD_MISC_KERNEL (1 << 0) | 290 | #define PERF_RECORD_MISC_KERNEL (1 << 0) |
| 289 | #define PERF_RECORD_MISC_USER (2 << 0) | 291 | #define PERF_RECORD_MISC_USER (2 << 0) |
| 290 | #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) | 292 | #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) |
| 291 | 293 | ||
| 292 | struct perf_event_header { | 294 | struct perf_event_header { |
| 293 | __u32 type; | 295 | __u32 type; |
| 294 | __u16 misc; | 296 | __u16 misc; |
| 295 | __u16 size; | 297 | __u16 size; |
| 296 | }; | 298 | }; |
| 297 | 299 | ||
| 298 | enum perf_event_type { | 300 | enum perf_event_type { |
| 299 | 301 | ||
| 300 | /* | 302 | /* |
| 301 | * The MMAP events record the PROT_EXEC mappings so that we can | 303 | * The MMAP events record the PROT_EXEC mappings so that we can |
| 302 | * correlate userspace IPs to code. They have the following structure: | 304 | * correlate userspace IPs to code. They have the following structure: |
| 303 | * | 305 | * |
| 304 | * struct { | 306 | * struct { |
| 305 | * struct perf_event_header header; | 307 | * struct perf_event_header header; |
| 306 | * | 308 | * |
| 307 | * u32 pid, tid; | 309 | * u32 pid, tid; |
| 308 | * u64 addr; | 310 | * u64 addr; |
| 309 | * u64 len; | 311 | * u64 len; |
| 310 | * u64 pgoff; | 312 | * u64 pgoff; |
| 311 | * char filename[]; | 313 | * char filename[]; |
| 312 | * }; | 314 | * }; |
| 313 | */ | 315 | */ |
| 314 | PERF_RECORD_MMAP = 1, | 316 | PERF_RECORD_MMAP = 1, |
| 315 | 317 | ||
| 316 | /* | 318 | /* |
| 317 | * struct { | 319 | * struct { |
| 318 | * struct perf_event_header header; | 320 | * struct perf_event_header header; |
| 319 | * u64 id; | 321 | * u64 id; |
| 320 | * u64 lost; | 322 | * u64 lost; |
| 321 | * }; | 323 | * }; |
| 322 | */ | 324 | */ |
| 323 | PERF_RECORD_LOST = 2, | 325 | PERF_RECORD_LOST = 2, |
| 324 | 326 | ||
| 325 | /* | 327 | /* |
| 326 | * struct { | 328 | * struct { |
| 327 | * struct perf_event_header header; | 329 | * struct perf_event_header header; |
| 328 | * | 330 | * |
| 329 | * u32 pid, tid; | 331 | * u32 pid, tid; |
| 330 | * char comm[]; | 332 | * char comm[]; |
| 331 | * }; | 333 | * }; |
| 332 | */ | 334 | */ |
| 333 | PERF_RECORD_COMM = 3, | 335 | PERF_RECORD_COMM = 3, |
| 334 | 336 | ||
| 335 | /* | 337 | /* |
| 336 | * struct { | 338 | * struct { |
| 337 | * struct perf_event_header header; | 339 | * struct perf_event_header header; |
| 338 | * u32 pid, ppid; | 340 | * u32 pid, ppid; |
| 339 | * u32 tid, ptid; | 341 | * u32 tid, ptid; |
| 340 | * u64 time; | 342 | * u64 time; |
| 341 | * }; | 343 | * }; |
| 342 | */ | 344 | */ |
| 343 | PERF_RECORD_EXIT = 4, | 345 | PERF_RECORD_EXIT = 4, |
| 344 | 346 | ||
| 345 | /* | 347 | /* |
| 346 | * struct { | 348 | * struct { |
| 347 | * struct perf_event_header header; | 349 | * struct perf_event_header header; |
| 348 | * u64 time; | 350 | * u64 time; |
| 349 | * u64 id; | 351 | * u64 id; |
| 350 | * u64 stream_id; | 352 | * u64 stream_id; |
| 351 | * }; | 353 | * }; |
| 352 | */ | 354 | */ |
| 353 | PERF_RECORD_THROTTLE = 5, | 355 | PERF_RECORD_THROTTLE = 5, |
| 354 | PERF_RECORD_UNTHROTTLE = 6, | 356 | PERF_RECORD_UNTHROTTLE = 6, |
| 355 | 357 | ||
| 356 | /* | 358 | /* |
| 357 | * struct { | 359 | * struct { |
| 358 | * struct perf_event_header header; | 360 | * struct perf_event_header header; |
| 359 | * u32 pid, ppid; | 361 | * u32 pid, ppid; |
| 360 | * u32 tid, ptid; | 362 | * u32 tid, ptid; |
| 361 | * u64 time; | 363 | * u64 time; |
| 362 | * }; | 364 | * }; |
| 363 | */ | 365 | */ |
| 364 | PERF_RECORD_FORK = 7, | 366 | PERF_RECORD_FORK = 7, |
| 365 | 367 | ||
| 366 | /* | 368 | /* |
| 367 | * struct { | 369 | * struct { |
| 368 | * struct perf_event_header header; | 370 | * struct perf_event_header header; |
| 369 | * u32 pid, tid; | 371 | * u32 pid, tid; |
| 370 | * | 372 | * |
| 371 | * struct read_format values; | 373 | * struct read_format values; |
| 372 | * }; | 374 | * }; |
| 373 | */ | 375 | */ |
| 374 | PERF_RECORD_READ = 8, | 376 | PERF_RECORD_READ = 8, |
| 375 | 377 | ||
| 376 | /* | 378 | /* |
| 377 | * struct { | 379 | * struct { |
| 378 | * struct perf_event_header header; | 380 | * struct perf_event_header header; |
| 379 | * | 381 | * |
| 380 | * { u64 ip; } && PERF_SAMPLE_IP | 382 | * { u64 ip; } && PERF_SAMPLE_IP |
| 381 | * { u32 pid, tid; } && PERF_SAMPLE_TID | 383 | * { u32 pid, tid; } && PERF_SAMPLE_TID |
| 382 | * { u64 time; } && PERF_SAMPLE_TIME | 384 | * { u64 time; } && PERF_SAMPLE_TIME |
| 383 | * { u64 addr; } && PERF_SAMPLE_ADDR | 385 | * { u64 addr; } && PERF_SAMPLE_ADDR |
| 384 | * { u64 id; } && PERF_SAMPLE_ID | 386 | * { u64 id; } && PERF_SAMPLE_ID |
| 385 | * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID | 387 | * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID |
| 386 | * { u32 cpu, res; } && PERF_SAMPLE_CPU | 388 | * { u32 cpu, res; } && PERF_SAMPLE_CPU |
| 387 | * { u64 period; } && PERF_SAMPLE_PERIOD | 389 | * { u64 period; } && PERF_SAMPLE_PERIOD |
| 388 | * | 390 | * |
| 389 | * { struct read_format values; } && PERF_SAMPLE_READ | 391 | * { struct read_format values; } && PERF_SAMPLE_READ |
| 390 | * | 392 | * |
| 391 | * { u64 nr, | 393 | * { u64 nr, |
| 392 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN | 394 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN |
| 393 | * | 395 | * |
| 394 | * # | 396 | * # |
| 395 | * # The RAW record below is opaque data wrt the ABI | 397 | * # The RAW record below is opaque data wrt the ABI |
| 396 | * # | 398 | * # |
| 397 | * # That is, the ABI doesn't make any promises wrt to | 399 | * # That is, the ABI doesn't make any promises wrt to |
| 398 | * # the stability of its content, it may vary depending | 400 | * # the stability of its content, it may vary depending |
| 399 | * # on event, hardware, kernel version and phase of | 401 | * # on event, hardware, kernel version and phase of |
| 400 | * # the moon. | 402 | * # the moon. |
| 401 | * # | 403 | * # |
| 402 | * # In other words, PERF_SAMPLE_RAW contents are not an ABI. | 404 | * # In other words, PERF_SAMPLE_RAW contents are not an ABI. |
| 403 | * # | 405 | * # |
| 404 | * | 406 | * |
| 405 | * { u32 size; | 407 | * { u32 size; |
| 406 | * char data[size];}&& PERF_SAMPLE_RAW | 408 | * char data[size];}&& PERF_SAMPLE_RAW |
| 407 | * }; | 409 | * }; |
| 408 | */ | 410 | */ |
| 409 | PERF_RECORD_SAMPLE = 9, | 411 | PERF_RECORD_SAMPLE = 9, |
| 410 | 412 | ||
| 411 | PERF_RECORD_MAX, /* non-ABI */ | 413 | PERF_RECORD_MAX, /* non-ABI */ |
| 412 | }; | 414 | }; |
| 413 | 415 | ||
| 414 | enum perf_callchain_context { | 416 | enum perf_callchain_context { |
| 415 | PERF_CONTEXT_HV = (__u64)-32, | 417 | PERF_CONTEXT_HV = (__u64)-32, |
| 416 | PERF_CONTEXT_KERNEL = (__u64)-128, | 418 | PERF_CONTEXT_KERNEL = (__u64)-128, |
| 417 | PERF_CONTEXT_USER = (__u64)-512, | 419 | PERF_CONTEXT_USER = (__u64)-512, |
| 418 | 420 | ||
| 419 | PERF_CONTEXT_GUEST = (__u64)-2048, | 421 | PERF_CONTEXT_GUEST = (__u64)-2048, |
| 420 | PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, | 422 | PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, |
| 421 | PERF_CONTEXT_GUEST_USER = (__u64)-2560, | 423 | PERF_CONTEXT_GUEST_USER = (__u64)-2560, |
| 422 | 424 | ||
| 423 | PERF_CONTEXT_MAX = (__u64)-4095, | 425 | PERF_CONTEXT_MAX = (__u64)-4095, |
| 424 | }; | 426 | }; |
| 425 | 427 | ||
| 426 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) | 428 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) |
| 427 | #define PERF_FLAG_FD_OUTPUT (1U << 1) | 429 | #define PERF_FLAG_FD_OUTPUT (1U << 1) |
| 428 | 430 | ||
| 429 | #ifdef __KERNEL__ | 431 | #ifdef __KERNEL__ |
| 430 | /* | 432 | /* |
| 431 | * Kernel-internal data types and definitions: | 433 | * Kernel-internal data types and definitions: |
| 432 | */ | 434 | */ |
| 433 | 435 | ||
| 434 | #ifdef CONFIG_PERF_EVENTS | 436 | #ifdef CONFIG_PERF_EVENTS |
| 435 | # include <asm/perf_event.h> | 437 | # include <asm/perf_event.h> |
| 436 | #endif | 438 | #endif |
| 437 | 439 | ||
| 438 | #include <linux/list.h> | 440 | #include <linux/list.h> |
| 439 | #include <linux/mutex.h> | 441 | #include <linux/mutex.h> |
| 440 | #include <linux/rculist.h> | 442 | #include <linux/rculist.h> |
| 441 | #include <linux/rcupdate.h> | 443 | #include <linux/rcupdate.h> |
| 442 | #include <linux/spinlock.h> | 444 | #include <linux/spinlock.h> |
| 443 | #include <linux/hrtimer.h> | 445 | #include <linux/hrtimer.h> |
| 444 | #include <linux/fs.h> | 446 | #include <linux/fs.h> |
| 445 | #include <linux/pid_namespace.h> | 447 | #include <linux/pid_namespace.h> |
| 446 | #include <linux/workqueue.h> | 448 | #include <linux/workqueue.h> |
| 447 | #include <asm/atomic.h> | 449 | #include <asm/atomic.h> |
| 448 | 450 | ||
| 449 | #define PERF_MAX_STACK_DEPTH 255 | 451 | #define PERF_MAX_STACK_DEPTH 255 |
| 450 | 452 | ||
| 451 | struct perf_callchain_entry { | 453 | struct perf_callchain_entry { |
| 452 | __u64 nr; | 454 | __u64 nr; |
| 453 | __u64 ip[PERF_MAX_STACK_DEPTH]; | 455 | __u64 ip[PERF_MAX_STACK_DEPTH]; |
| 454 | }; | 456 | }; |
| 455 | 457 | ||
| 456 | struct perf_raw_record { | 458 | struct perf_raw_record { |
| 457 | u32 size; | 459 | u32 size; |
| 458 | void *data; | 460 | void *data; |
| 459 | }; | 461 | }; |
| 460 | 462 | ||
| 461 | struct task_struct; | 463 | struct task_struct; |
| 462 | 464 | ||
| 463 | /** | 465 | /** |
| 464 | * struct hw_perf_event - performance event hardware details: | 466 | * struct hw_perf_event - performance event hardware details: |
| 465 | */ | 467 | */ |
| 466 | struct hw_perf_event { | 468 | struct hw_perf_event { |
| 467 | #ifdef CONFIG_PERF_EVENTS | 469 | #ifdef CONFIG_PERF_EVENTS |
| 468 | union { | 470 | union { |
| 469 | struct { /* hardware */ | 471 | struct { /* hardware */ |
| 470 | u64 config; | 472 | u64 config; |
| 471 | unsigned long config_base; | 473 | unsigned long config_base; |
| 472 | unsigned long event_base; | 474 | unsigned long event_base; |
| 473 | int idx; | 475 | int idx; |
| 474 | }; | 476 | }; |
| 475 | struct { /* software */ | 477 | struct { /* software */ |
| 476 | s64 remaining; | 478 | s64 remaining; |
| 477 | struct hrtimer hrtimer; | 479 | struct hrtimer hrtimer; |
| 478 | }; | 480 | }; |
| 479 | }; | 481 | }; |
| 480 | atomic64_t prev_count; | 482 | atomic64_t prev_count; |
| 481 | u64 sample_period; | 483 | u64 sample_period; |
| 482 | u64 last_period; | 484 | u64 last_period; |
| 483 | atomic64_t period_left; | 485 | atomic64_t period_left; |
| 484 | u64 interrupts; | 486 | u64 interrupts; |
| 485 | 487 | ||
| 486 | u64 freq_count; | 488 | u64 freq_count; |
| 487 | u64 freq_interrupts; | 489 | u64 freq_interrupts; |
| 488 | u64 freq_stamp; | 490 | u64 freq_stamp; |
| 489 | #endif | 491 | #endif |
| 490 | }; | 492 | }; |
| 491 | 493 | ||
| 492 | struct perf_event; | 494 | struct perf_event; |
| 493 | 495 | ||
| 494 | /** | 496 | /** |
| 495 | * struct pmu - generic performance monitoring unit | 497 | * struct pmu - generic performance monitoring unit |
| 496 | */ | 498 | */ |
| 497 | struct pmu { | 499 | struct pmu { |
| 498 | int (*enable) (struct perf_event *event); | 500 | int (*enable) (struct perf_event *event); |
| 499 | void (*disable) (struct perf_event *event); | 501 | void (*disable) (struct perf_event *event); |
| 500 | void (*read) (struct perf_event *event); | 502 | void (*read) (struct perf_event *event); |
| 501 | void (*unthrottle) (struct perf_event *event); | 503 | void (*unthrottle) (struct perf_event *event); |
| 502 | }; | 504 | }; |
| 503 | 505 | ||
| 504 | /** | 506 | /** |
| 505 | * enum perf_event_active_state - the states of a event | 507 | * enum perf_event_active_state - the states of a event |
| 506 | */ | 508 | */ |
| 507 | enum perf_event_active_state { | 509 | enum perf_event_active_state { |
| 508 | PERF_EVENT_STATE_ERROR = -2, | 510 | PERF_EVENT_STATE_ERROR = -2, |
| 509 | PERF_EVENT_STATE_OFF = -1, | 511 | PERF_EVENT_STATE_OFF = -1, |
| 510 | PERF_EVENT_STATE_INACTIVE = 0, | 512 | PERF_EVENT_STATE_INACTIVE = 0, |
| 511 | PERF_EVENT_STATE_ACTIVE = 1, | 513 | PERF_EVENT_STATE_ACTIVE = 1, |
| 512 | }; | 514 | }; |
| 513 | 515 | ||
| 514 | struct file; | 516 | struct file; |
| 515 | 517 | ||
| 516 | struct perf_mmap_data { | 518 | struct perf_mmap_data { |
| 517 | struct rcu_head rcu_head; | 519 | struct rcu_head rcu_head; |
| 518 | #ifdef CONFIG_PERF_USE_VMALLOC | 520 | #ifdef CONFIG_PERF_USE_VMALLOC |
| 519 | struct work_struct work; | 521 | struct work_struct work; |
| 520 | #endif | 522 | #endif |
| 521 | int data_order; | 523 | int data_order; |
| 522 | int nr_pages; /* nr of data pages */ | 524 | int nr_pages; /* nr of data pages */ |
| 523 | int writable; /* are we writable */ | 525 | int writable; /* are we writable */ |
| 524 | int nr_locked; /* nr pages mlocked */ | 526 | int nr_locked; /* nr pages mlocked */ |
| 525 | 527 | ||
| 526 | atomic_t poll; /* POLL_ for wakeups */ | 528 | atomic_t poll; /* POLL_ for wakeups */ |
| 527 | atomic_t events; /* event_id limit */ | 529 | atomic_t events; /* event_id limit */ |
| 528 | 530 | ||
| 529 | atomic_long_t head; /* write position */ | 531 | atomic_long_t head; /* write position */ |
| 530 | atomic_long_t done_head; /* completed head */ | 532 | atomic_long_t done_head; /* completed head */ |
| 531 | 533 | ||
| 532 | atomic_t lock; /* concurrent writes */ | 534 | atomic_t lock; /* concurrent writes */ |
| 533 | atomic_t wakeup; /* needs a wakeup */ | 535 | atomic_t wakeup; /* needs a wakeup */ |
| 534 | atomic_t lost; /* nr records lost */ | 536 | atomic_t lost; /* nr records lost */ |
| 535 | 537 | ||
| 536 | long watermark; /* wakeup watermark */ | 538 | long watermark; /* wakeup watermark */ |
| 537 | 539 | ||
| 538 | struct perf_event_mmap_page *user_page; | 540 | struct perf_event_mmap_page *user_page; |
| 539 | void *data_pages[0]; | 541 | void *data_pages[0]; |
| 540 | }; | 542 | }; |
| 541 | 543 | ||
| 542 | struct perf_pending_entry { | 544 | struct perf_pending_entry { |
| 543 | struct perf_pending_entry *next; | 545 | struct perf_pending_entry *next; |
| 544 | void (*func)(struct perf_pending_entry *); | 546 | void (*func)(struct perf_pending_entry *); |
| 545 | }; | 547 | }; |
| 546 | 548 | ||
| 547 | /** | 549 | /** |
| 548 | * struct perf_event - performance event kernel representation: | 550 | * struct perf_event - performance event kernel representation: |
| 549 | */ | 551 | */ |
| 550 | struct perf_event { | 552 | struct perf_event { |
| 551 | #ifdef CONFIG_PERF_EVENTS | 553 | #ifdef CONFIG_PERF_EVENTS |
| 552 | struct list_head group_entry; | 554 | struct list_head group_entry; |
| 553 | struct list_head event_entry; | 555 | struct list_head event_entry; |
| 554 | struct list_head sibling_list; | 556 | struct list_head sibling_list; |
| 555 | int nr_siblings; | 557 | int nr_siblings; |
| 556 | struct perf_event *group_leader; | 558 | struct perf_event *group_leader; |
| 557 | struct perf_event *output; | 559 | struct perf_event *output; |
| 558 | const struct pmu *pmu; | 560 | const struct pmu *pmu; |
| 559 | 561 | ||
| 560 | enum perf_event_active_state state; | 562 | enum perf_event_active_state state; |
| 561 | atomic64_t count; | 563 | atomic64_t count; |
| 562 | 564 | ||
| 563 | /* | 565 | /* |
| 564 | * These are the total time in nanoseconds that the event | 566 | * These are the total time in nanoseconds that the event |
| 565 | * has been enabled (i.e. eligible to run, and the task has | 567 | * has been enabled (i.e. eligible to run, and the task has |
| 566 | * been scheduled in, if this is a per-task event) | 568 | * been scheduled in, if this is a per-task event) |
| 567 | * and running (scheduled onto the CPU), respectively. | 569 | * and running (scheduled onto the CPU), respectively. |
| 568 | * | 570 | * |
| 569 | * They are computed from tstamp_enabled, tstamp_running and | 571 | * They are computed from tstamp_enabled, tstamp_running and |
| 570 | * tstamp_stopped when the event is in INACTIVE or ACTIVE state. | 572 | * tstamp_stopped when the event is in INACTIVE or ACTIVE state. |
| 571 | */ | 573 | */ |
| 572 | u64 total_time_enabled; | 574 | u64 total_time_enabled; |
| 573 | u64 total_time_running; | 575 | u64 total_time_running; |
| 574 | 576 | ||
| 575 | /* | 577 | /* |
| 576 | * These are timestamps used for computing total_time_enabled | 578 | * These are timestamps used for computing total_time_enabled |
| 577 | * and total_time_running when the event is in INACTIVE or | 579 | * and total_time_running when the event is in INACTIVE or |
| 578 | * ACTIVE state, measured in nanoseconds from an arbitrary point | 580 | * ACTIVE state, measured in nanoseconds from an arbitrary point |
| 579 | * in time. | 581 | * in time. |
| 580 | * tstamp_enabled: the notional time when the event was enabled | 582 | * tstamp_enabled: the notional time when the event was enabled |
| 581 | * tstamp_running: the notional time when the event was scheduled on | 583 | * tstamp_running: the notional time when the event was scheduled on |
| 582 | * tstamp_stopped: in INACTIVE state, the notional time when the | 584 | * tstamp_stopped: in INACTIVE state, the notional time when the |
| 583 | * event was scheduled off. | 585 | * event was scheduled off. |
| 584 | */ | 586 | */ |
| 585 | u64 tstamp_enabled; | 587 | u64 tstamp_enabled; |
| 586 | u64 tstamp_running; | 588 | u64 tstamp_running; |
| 587 | u64 tstamp_stopped; | 589 | u64 tstamp_stopped; |
| 588 | 590 | ||
| 589 | struct perf_event_attr attr; | 591 | struct perf_event_attr attr; |
| 590 | struct hw_perf_event hw; | 592 | struct hw_perf_event hw; |
| 591 | 593 | ||
| 592 | struct perf_event_context *ctx; | 594 | struct perf_event_context *ctx; |
| 593 | struct file *filp; | 595 | struct file *filp; |
| 594 | 596 | ||
| 595 | /* | 597 | /* |
| 596 | * These accumulate total time (in nanoseconds) that children | 598 | * These accumulate total time (in nanoseconds) that children |
| 597 | * events have been enabled and running, respectively. | 599 | * events have been enabled and running, respectively. |
| 598 | */ | 600 | */ |
| 599 | atomic64_t child_total_time_enabled; | 601 | atomic64_t child_total_time_enabled; |
| 600 | atomic64_t child_total_time_running; | 602 | atomic64_t child_total_time_running; |
| 601 | 603 | ||
| 602 | /* | 604 | /* |
| 603 | * Protect attach/detach and child_list: | 605 | * Protect attach/detach and child_list: |
| 604 | */ | 606 | */ |
| 605 | struct mutex child_mutex; | 607 | struct mutex child_mutex; |
| 606 | struct list_head child_list; | 608 | struct list_head child_list; |
| 607 | struct perf_event *parent; | 609 | struct perf_event *parent; |
| 608 | 610 | ||
| 609 | int oncpu; | 611 | int oncpu; |
| 610 | int cpu; | 612 | int cpu; |
| 611 | 613 | ||
| 612 | struct list_head owner_entry; | 614 | struct list_head owner_entry; |
| 613 | struct task_struct *owner; | 615 | struct task_struct *owner; |
| 614 | 616 | ||
| 615 | /* mmap bits */ | 617 | /* mmap bits */ |
| 616 | struct mutex mmap_mutex; | 618 | struct mutex mmap_mutex; |
| 617 | atomic_t mmap_count; | 619 | atomic_t mmap_count; |
| 618 | struct perf_mmap_data *data; | 620 | struct perf_mmap_data *data; |
| 619 | 621 | ||
| 620 | /* poll related */ | 622 | /* poll related */ |
| 621 | wait_queue_head_t waitq; | 623 | wait_queue_head_t waitq; |
| 622 | struct fasync_struct *fasync; | 624 | struct fasync_struct *fasync; |
| 623 | 625 | ||
| 624 | /* delayed work for NMIs and such */ | 626 | /* delayed work for NMIs and such */ |
| 625 | int pending_wakeup; | 627 | int pending_wakeup; |
| 626 | int pending_kill; | 628 | int pending_kill; |
| 627 | int pending_disable; | 629 | int pending_disable; |
| 628 | struct perf_pending_entry pending; | 630 | struct perf_pending_entry pending; |
| 629 | 631 | ||
| 630 | atomic_t event_limit; | 632 | atomic_t event_limit; |
| 631 | 633 | ||
| 632 | void (*destroy)(struct perf_event *); | 634 | void (*destroy)(struct perf_event *); |
| 633 | struct rcu_head rcu_head; | 635 | struct rcu_head rcu_head; |
| 634 | 636 | ||
| 635 | struct pid_namespace *ns; | 637 | struct pid_namespace *ns; |
| 636 | u64 id; | 638 | u64 id; |
| 637 | 639 | ||
| 638 | #ifdef CONFIG_EVENT_PROFILE | 640 | #ifdef CONFIG_EVENT_PROFILE |
| 639 | struct event_filter *filter; | 641 | struct event_filter *filter; |
| 640 | #endif | 642 | #endif |
| 641 | 643 | ||
| 642 | #endif /* CONFIG_PERF_EVENTS */ | 644 | #endif /* CONFIG_PERF_EVENTS */ |
| 643 | }; | 645 | }; |
| 644 | 646 | ||
| 645 | /** | 647 | /** |
| 646 | * struct perf_event_context - event context structure | 648 | * struct perf_event_context - event context structure |
| 647 | * | 649 | * |
| 648 | * Used as a container for task events and CPU events as well: | 650 | * Used as a container for task events and CPU events as well: |
| 649 | */ | 651 | */ |
| 650 | struct perf_event_context { | 652 | struct perf_event_context { |
| 651 | /* | 653 | /* |
| 652 | * Protect the states of the events in the list, | 654 | * Protect the states of the events in the list, |
| 653 | * nr_active, and the list: | 655 | * nr_active, and the list: |
| 654 | */ | 656 | */ |
| 655 | spinlock_t lock; | 657 | spinlock_t lock; |
| 656 | /* | 658 | /* |
| 657 | * Protect the list of events. Locking either mutex or lock | 659 | * Protect the list of events. Locking either mutex or lock |
| 658 | * is sufficient to ensure the list doesn't change; to change | 660 | * is sufficient to ensure the list doesn't change; to change |
| 659 | * the list you need to lock both the mutex and the spinlock. | 661 | * the list you need to lock both the mutex and the spinlock. |
| 660 | */ | 662 | */ |
| 661 | struct mutex mutex; | 663 | struct mutex mutex; |
| 662 | 664 | ||
| 663 | struct list_head group_list; | 665 | struct list_head group_list; |
| 664 | struct list_head event_list; | 666 | struct list_head event_list; |
| 665 | int nr_events; | 667 | int nr_events; |
| 666 | int nr_active; | 668 | int nr_active; |
| 667 | int is_active; | 669 | int is_active; |
| 668 | int nr_stat; | 670 | int nr_stat; |
| 669 | atomic_t refcount; | 671 | atomic_t refcount; |
| 670 | struct task_struct *task; | 672 | struct task_struct *task; |
| 671 | 673 | ||
| 672 | /* | 674 | /* |
| 673 | * Context clock, runs when context enabled. | 675 | * Context clock, runs when context enabled. |
| 674 | */ | 676 | */ |
| 675 | u64 time; | 677 | u64 time; |
| 676 | u64 timestamp; | 678 | u64 timestamp; |
| 677 | 679 | ||
| 678 | /* | 680 | /* |
| 679 | * These fields let us detect when two contexts have both | 681 | * These fields let us detect when two contexts have both |
| 680 | * been cloned (inherited) from a common ancestor. | 682 | * been cloned (inherited) from a common ancestor. |
| 681 | */ | 683 | */ |
| 682 | struct perf_event_context *parent_ctx; | 684 | struct perf_event_context *parent_ctx; |
| 683 | u64 parent_gen; | 685 | u64 parent_gen; |
| 684 | u64 generation; | 686 | u64 generation; |
| 685 | int pin_count; | 687 | int pin_count; |
| 686 | struct rcu_head rcu_head; | 688 | struct rcu_head rcu_head; |
| 687 | }; | 689 | }; |
| 688 | 690 | ||
| 689 | /** | 691 | /** |
| 690 | * struct perf_event_cpu_context - per cpu event context structure | 692 | * struct perf_event_cpu_context - per cpu event context structure |
| 691 | */ | 693 | */ |
| 692 | struct perf_cpu_context { | 694 | struct perf_cpu_context { |
| 693 | struct perf_event_context ctx; | 695 | struct perf_event_context ctx; |
| 694 | struct perf_event_context *task_ctx; | 696 | struct perf_event_context *task_ctx; |
| 695 | int active_oncpu; | 697 | int active_oncpu; |
| 696 | int max_pertask; | 698 | int max_pertask; |
| 697 | int exclusive; | 699 | int exclusive; |
| 698 | 700 | ||
| 699 | /* | 701 | /* |
| 700 | * Recursion avoidance: | 702 | * Recursion avoidance: |
| 701 | * | 703 | * |
| 702 | * task, softirq, irq, nmi context | 704 | * task, softirq, irq, nmi context |
| 703 | */ | 705 | */ |
| 704 | int recursion[4]; | 706 | int recursion[4]; |
| 705 | }; | 707 | }; |
| 706 | 708 | ||
| 707 | struct perf_output_handle { | 709 | struct perf_output_handle { |
| 708 | struct perf_event *event; | 710 | struct perf_event *event; |
| 709 | struct perf_mmap_data *data; | 711 | struct perf_mmap_data *data; |
| 710 | unsigned long head; | 712 | unsigned long head; |
| 711 | unsigned long offset; | 713 | unsigned long offset; |
| 712 | int nmi; | 714 | int nmi; |
| 713 | int sample; | 715 | int sample; |
| 714 | int locked; | 716 | int locked; |
| 715 | unsigned long flags; | 717 | unsigned long flags; |
| 716 | }; | 718 | }; |
| 717 | 719 | ||
| 718 | #ifdef CONFIG_PERF_EVENTS | 720 | #ifdef CONFIG_PERF_EVENTS |
| 719 | 721 | ||
| 720 | /* | 722 | /* |
| 721 | * Set by architecture code: | 723 | * Set by architecture code: |
| 722 | */ | 724 | */ |
| 723 | extern int perf_max_events; | 725 | extern int perf_max_events; |
| 724 | 726 | ||
| 725 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); | 727 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); |
| 726 | 728 | ||
| 727 | extern void perf_event_task_sched_in(struct task_struct *task, int cpu); | 729 | extern void perf_event_task_sched_in(struct task_struct *task, int cpu); |
| 728 | extern void perf_event_task_sched_out(struct task_struct *task, | 730 | extern void perf_event_task_sched_out(struct task_struct *task, |
| 729 | struct task_struct *next, int cpu); | 731 | struct task_struct *next, int cpu); |
| 730 | extern void perf_event_task_tick(struct task_struct *task, int cpu); | 732 | extern void perf_event_task_tick(struct task_struct *task, int cpu); |
| 731 | extern int perf_event_init_task(struct task_struct *child); | 733 | extern int perf_event_init_task(struct task_struct *child); |
| 732 | extern void perf_event_exit_task(struct task_struct *child); | 734 | extern void perf_event_exit_task(struct task_struct *child); |
| 733 | extern void perf_event_free_task(struct task_struct *task); | 735 | extern void perf_event_free_task(struct task_struct *task); |
| 734 | extern void set_perf_event_pending(void); | 736 | extern void set_perf_event_pending(void); |
| 735 | extern void perf_event_do_pending(void); | 737 | extern void perf_event_do_pending(void); |
| 736 | extern void perf_event_print_debug(void); | 738 | extern void perf_event_print_debug(void); |
| 737 | extern void __perf_disable(void); | 739 | extern void __perf_disable(void); |
| 738 | extern bool __perf_enable(void); | 740 | extern bool __perf_enable(void); |
| 739 | extern void perf_disable(void); | 741 | extern void perf_disable(void); |
| 740 | extern void perf_enable(void); | 742 | extern void perf_enable(void); |
| 741 | extern int perf_event_task_disable(void); | 743 | extern int perf_event_task_disable(void); |
| 742 | extern int perf_event_task_enable(void); | 744 | extern int perf_event_task_enable(void); |
| 743 | extern int hw_perf_group_sched_in(struct perf_event *group_leader, | 745 | extern int hw_perf_group_sched_in(struct perf_event *group_leader, |
| 744 | struct perf_cpu_context *cpuctx, | 746 | struct perf_cpu_context *cpuctx, |
| 745 | struct perf_event_context *ctx, int cpu); | 747 | struct perf_event_context *ctx, int cpu); |
| 746 | extern void perf_event_update_userpage(struct perf_event *event); | 748 | extern void perf_event_update_userpage(struct perf_event *event); |
| 747 | 749 | ||
| 748 | struct perf_sample_data { | 750 | struct perf_sample_data { |
| 749 | u64 type; | 751 | u64 type; |
| 750 | 752 | ||
| 751 | u64 ip; | 753 | u64 ip; |
| 752 | struct { | 754 | struct { |
| 753 | u32 pid; | 755 | u32 pid; |
| 754 | u32 tid; | 756 | u32 tid; |
| 755 | } tid_entry; | 757 | } tid_entry; |
| 756 | u64 time; | 758 | u64 time; |
| 757 | u64 addr; | 759 | u64 addr; |
| 758 | u64 id; | 760 | u64 id; |
| 759 | u64 stream_id; | 761 | u64 stream_id; |
| 760 | struct { | 762 | struct { |
| 761 | u32 cpu; | 763 | u32 cpu; |
| 762 | u32 reserved; | 764 | u32 reserved; |
| 763 | } cpu_entry; | 765 | } cpu_entry; |
| 764 | u64 period; | 766 | u64 period; |
| 765 | struct perf_callchain_entry *callchain; | 767 | struct perf_callchain_entry *callchain; |
| 766 | struct perf_raw_record *raw; | 768 | struct perf_raw_record *raw; |
| 767 | }; | 769 | }; |
| 768 | 770 | ||
| 769 | extern void perf_output_sample(struct perf_output_handle *handle, | 771 | extern void perf_output_sample(struct perf_output_handle *handle, |
| 770 | struct perf_event_header *header, | 772 | struct perf_event_header *header, |
| 771 | struct perf_sample_data *data, | 773 | struct perf_sample_data *data, |
| 772 | struct perf_event *event); | 774 | struct perf_event *event); |
| 773 | extern void perf_prepare_sample(struct perf_event_header *header, | 775 | extern void perf_prepare_sample(struct perf_event_header *header, |
| 774 | struct perf_sample_data *data, | 776 | struct perf_sample_data *data, |
| 775 | struct perf_event *event, | 777 | struct perf_event *event, |
| 776 | struct pt_regs *regs); | 778 | struct pt_regs *regs); |
| 777 | 779 | ||
| 778 | extern int perf_event_overflow(struct perf_event *event, int nmi, | 780 | extern int perf_event_overflow(struct perf_event *event, int nmi, |
| 779 | struct perf_sample_data *data, | 781 | struct perf_sample_data *data, |
| 780 | struct pt_regs *regs); | 782 | struct pt_regs *regs); |
| 781 | 783 | ||
| 782 | /* | 784 | /* |
| 783 | * Return 1 for a software event, 0 for a hardware event | 785 | * Return 1 for a software event, 0 for a hardware event |
| 784 | */ | 786 | */ |
| 785 | static inline int is_software_event(struct perf_event *event) | 787 | static inline int is_software_event(struct perf_event *event) |
| 786 | { | 788 | { |
| 787 | return (event->attr.type != PERF_TYPE_RAW) && | 789 | return (event->attr.type != PERF_TYPE_RAW) && |
| 788 | (event->attr.type != PERF_TYPE_HARDWARE) && | 790 | (event->attr.type != PERF_TYPE_HARDWARE) && |
| 789 | (event->attr.type != PERF_TYPE_HW_CACHE); | 791 | (event->attr.type != PERF_TYPE_HW_CACHE); |
| 790 | } | 792 | } |
| 791 | 793 | ||
| 792 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 794 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
| 793 | 795 | ||
| 794 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | 796 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); |
| 795 | 797 | ||
| 796 | static inline void | 798 | static inline void |
| 797 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | 799 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) |
| 798 | { | 800 | { |
| 799 | if (atomic_read(&perf_swevent_enabled[event_id])) | 801 | if (atomic_read(&perf_swevent_enabled[event_id])) |
| 800 | __perf_sw_event(event_id, nr, nmi, regs, addr); | 802 | __perf_sw_event(event_id, nr, nmi, regs, addr); |
| 801 | } | 803 | } |
| 802 | 804 | ||
| 803 | extern void __perf_event_mmap(struct vm_area_struct *vma); | 805 | extern void __perf_event_mmap(struct vm_area_struct *vma); |
| 804 | 806 | ||
| 805 | static inline void perf_event_mmap(struct vm_area_struct *vma) | 807 | static inline void perf_event_mmap(struct vm_area_struct *vma) |
| 806 | { | 808 | { |
| 807 | if (vma->vm_flags & VM_EXEC) | 809 | if (vma->vm_flags & VM_EXEC) |
| 808 | __perf_event_mmap(vma); | 810 | __perf_event_mmap(vma); |
| 809 | } | 811 | } |
| 810 | 812 | ||
| 811 | extern void perf_event_comm(struct task_struct *tsk); | 813 | extern void perf_event_comm(struct task_struct *tsk); |
| 812 | extern void perf_event_fork(struct task_struct *tsk); | 814 | extern void perf_event_fork(struct task_struct *tsk); |
| 813 | 815 | ||
| 814 | extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); | 816 | extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); |
| 815 | 817 | ||
| 816 | extern int sysctl_perf_event_paranoid; | 818 | extern int sysctl_perf_event_paranoid; |
| 817 | extern int sysctl_perf_event_mlock; | 819 | extern int sysctl_perf_event_mlock; |
| 818 | extern int sysctl_perf_event_sample_rate; | 820 | extern int sysctl_perf_event_sample_rate; |
| 819 | 821 | ||
| 820 | extern void perf_event_init(void); | 822 | extern void perf_event_init(void); |
| 821 | extern void perf_tp_event(int event_id, u64 addr, u64 count, | 823 | extern void perf_tp_event(int event_id, u64 addr, u64 count, |
| 822 | void *record, int entry_size); | 824 | void *record, int entry_size); |
| 823 | 825 | ||
| 824 | #ifndef perf_misc_flags | 826 | #ifndef perf_misc_flags |
| 825 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ | 827 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ |
| 826 | PERF_RECORD_MISC_KERNEL) | 828 | PERF_RECORD_MISC_KERNEL) |
| 827 | #define perf_instruction_pointer(regs) instruction_pointer(regs) | 829 | #define perf_instruction_pointer(regs) instruction_pointer(regs) |
| 828 | #endif | 830 | #endif |
| 829 | 831 | ||
| 830 | extern int perf_output_begin(struct perf_output_handle *handle, | 832 | extern int perf_output_begin(struct perf_output_handle *handle, |
| 831 | struct perf_event *event, unsigned int size, | 833 | struct perf_event *event, unsigned int size, |
| 832 | int nmi, int sample); | 834 | int nmi, int sample); |
| 833 | extern void perf_output_end(struct perf_output_handle *handle); | 835 | extern void perf_output_end(struct perf_output_handle *handle); |
| 834 | extern void perf_output_copy(struct perf_output_handle *handle, | 836 | extern void perf_output_copy(struct perf_output_handle *handle, |
| 835 | const void *buf, unsigned int len); | 837 | const void *buf, unsigned int len); |
| 836 | #else | 838 | #else |
| 837 | static inline void | 839 | static inline void |
| 838 | perf_event_task_sched_in(struct task_struct *task, int cpu) { } | 840 | perf_event_task_sched_in(struct task_struct *task, int cpu) { } |
| 839 | static inline void | 841 | static inline void |
| 840 | perf_event_task_sched_out(struct task_struct *task, | 842 | perf_event_task_sched_out(struct task_struct *task, |
| 841 | struct task_struct *next, int cpu) { } | 843 | struct task_struct *next, int cpu) { } |
| 842 | static inline void | 844 | static inline void |
| 843 | perf_event_task_tick(struct task_struct *task, int cpu) { } | 845 | perf_event_task_tick(struct task_struct *task, int cpu) { } |
| 844 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } | 846 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } |
| 845 | static inline void perf_event_exit_task(struct task_struct *child) { } | 847 | static inline void perf_event_exit_task(struct task_struct *child) { } |
| 846 | static inline void perf_event_free_task(struct task_struct *task) { } | 848 | static inline void perf_event_free_task(struct task_struct *task) { } |
| 847 | static inline void perf_event_do_pending(void) { } | 849 | static inline void perf_event_do_pending(void) { } |
| 848 | static inline void perf_event_print_debug(void) { } | 850 | static inline void perf_event_print_debug(void) { } |
| 849 | static inline void perf_disable(void) { } | 851 | static inline void perf_disable(void) { } |
| 850 | static inline void perf_enable(void) { } | 852 | static inline void perf_enable(void) { } |
| 851 | static inline int perf_event_task_disable(void) { return -EINVAL; } | 853 | static inline int perf_event_task_disable(void) { return -EINVAL; } |
| 852 | static inline int perf_event_task_enable(void) { return -EINVAL; } | 854 | static inline int perf_event_task_enable(void) { return -EINVAL; } |
| 853 | 855 | ||
| 854 | static inline void | 856 | static inline void |
| 855 | perf_sw_event(u32 event_id, u64 nr, int nmi, | 857 | perf_sw_event(u32 event_id, u64 nr, int nmi, |
| 856 | struct pt_regs *regs, u64 addr) { } | 858 | struct pt_regs *regs, u64 addr) { } |
| 857 | 859 | ||
| 858 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } | 860 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
| 859 | static inline void perf_event_comm(struct task_struct *tsk) { } | 861 | static inline void perf_event_comm(struct task_struct *tsk) { } |
| 860 | static inline void perf_event_fork(struct task_struct *tsk) { } | 862 | static inline void perf_event_fork(struct task_struct *tsk) { } |
| 861 | static inline void perf_event_init(void) { } | 863 | static inline void perf_event_init(void) { } |
| 862 | 864 | ||
| 863 | #endif | 865 | #endif |
| 864 | 866 | ||
| 865 | #define perf_output_put(handle, x) \ | 867 | #define perf_output_put(handle, x) \ |
| 866 | perf_output_copy((handle), &(x), sizeof(x)) | 868 | perf_output_copy((handle), &(x), sizeof(x)) |
| 867 | 869 | ||
| 868 | #endif /* __KERNEL__ */ | 870 | #endif /* __KERNEL__ */ |
| 869 | #endif /* _LINUX_PERF_EVENT_H */ | 871 | #endif /* _LINUX_PERF_EVENT_H */ |
| 870 | 872 |
kernel/perf_event.c
| 1 | /* | 1 | /* |
| 2 | * Performance events core code: | 2 | * Performance events core code: |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | 4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | 5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar |
| 6 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 6 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
| 7 | * Copyright ยฉ 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | 7 | * Copyright ยฉ 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
| 8 | * | 8 | * |
| 9 | * For licensing details see kernel-base/COPYING | 9 | * For licensing details see kernel-base/COPYING |
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
| 13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
| 14 | #include <linux/cpu.h> | 14 | #include <linux/cpu.h> |
| 15 | #include <linux/smp.h> | 15 | #include <linux/smp.h> |
| 16 | #include <linux/file.h> | 16 | #include <linux/file.h> |
| 17 | #include <linux/poll.h> | 17 | #include <linux/poll.h> |
| 18 | #include <linux/sysfs.h> | 18 | #include <linux/sysfs.h> |
| 19 | #include <linux/dcache.h> | 19 | #include <linux/dcache.h> |
| 20 | #include <linux/percpu.h> | 20 | #include <linux/percpu.h> |
| 21 | #include <linux/ptrace.h> | 21 | #include <linux/ptrace.h> |
| 22 | #include <linux/vmstat.h> | 22 | #include <linux/vmstat.h> |
| 23 | #include <linux/vmalloc.h> | 23 | #include <linux/vmalloc.h> |
| 24 | #include <linux/hardirq.h> | 24 | #include <linux/hardirq.h> |
| 25 | #include <linux/rculist.h> | 25 | #include <linux/rculist.h> |
| 26 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
| 27 | #include <linux/syscalls.h> | 27 | #include <linux/syscalls.h> |
| 28 | #include <linux/anon_inodes.h> | 28 | #include <linux/anon_inodes.h> |
| 29 | #include <linux/kernel_stat.h> | 29 | #include <linux/kernel_stat.h> |
| 30 | #include <linux/perf_event.h> | 30 | #include <linux/perf_event.h> |
| 31 | #include <linux/ftrace_event.h> | 31 | #include <linux/ftrace_event.h> |
| 32 | 32 | ||
| 33 | #include <asm/irq_regs.h> | 33 | #include <asm/irq_regs.h> |
| 34 | 34 | ||
| 35 | /* | 35 | /* |
| 36 | * Each CPU has a list of per CPU events: | 36 | * Each CPU has a list of per CPU events: |
| 37 | */ | 37 | */ |
| 38 | DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); | 38 | DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); |
| 39 | 39 | ||
| 40 | int perf_max_events __read_mostly = 1; | 40 | int perf_max_events __read_mostly = 1; |
| 41 | static int perf_reserved_percpu __read_mostly; | 41 | static int perf_reserved_percpu __read_mostly; |
| 42 | static int perf_overcommit __read_mostly = 1; | 42 | static int perf_overcommit __read_mostly = 1; |
| 43 | 43 | ||
| 44 | static atomic_t nr_events __read_mostly; | 44 | static atomic_t nr_events __read_mostly; |
| 45 | static atomic_t nr_mmap_events __read_mostly; | 45 | static atomic_t nr_mmap_events __read_mostly; |
| 46 | static atomic_t nr_comm_events __read_mostly; | 46 | static atomic_t nr_comm_events __read_mostly; |
| 47 | static atomic_t nr_task_events __read_mostly; | 47 | static atomic_t nr_task_events __read_mostly; |
| 48 | 48 | ||
| 49 | /* | 49 | /* |
| 50 | * perf event paranoia level: | 50 | * perf event paranoia level: |
| 51 | * -1 - not paranoid at all | 51 | * -1 - not paranoid at all |
| 52 | * 0 - disallow raw tracepoint access for unpriv | 52 | * 0 - disallow raw tracepoint access for unpriv |
| 53 | * 1 - disallow cpu events for unpriv | 53 | * 1 - disallow cpu events for unpriv |
| 54 | * 2 - disallow kernel profiling for unpriv | 54 | * 2 - disallow kernel profiling for unpriv |
| 55 | */ | 55 | */ |
| 56 | int sysctl_perf_event_paranoid __read_mostly = 1; | 56 | int sysctl_perf_event_paranoid __read_mostly = 1; |
| 57 | 57 | ||
| 58 | static inline bool perf_paranoid_tracepoint_raw(void) | 58 | static inline bool perf_paranoid_tracepoint_raw(void) |
| 59 | { | 59 | { |
| 60 | return sysctl_perf_event_paranoid > -1; | 60 | return sysctl_perf_event_paranoid > -1; |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | static inline bool perf_paranoid_cpu(void) | 63 | static inline bool perf_paranoid_cpu(void) |
| 64 | { | 64 | { |
| 65 | return sysctl_perf_event_paranoid > 0; | 65 | return sysctl_perf_event_paranoid > 0; |
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | static inline bool perf_paranoid_kernel(void) | 68 | static inline bool perf_paranoid_kernel(void) |
| 69 | { | 69 | { |
| 70 | return sysctl_perf_event_paranoid > 1; | 70 | return sysctl_perf_event_paranoid > 1; |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */ | 73 | int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */ |
| 74 | 74 | ||
| 75 | /* | 75 | /* |
| 76 | * max perf event sample rate | 76 | * max perf event sample rate |
| 77 | */ | 77 | */ |
| 78 | int sysctl_perf_event_sample_rate __read_mostly = 100000; | 78 | int sysctl_perf_event_sample_rate __read_mostly = 100000; |
| 79 | 79 | ||
| 80 | static atomic64_t perf_event_id; | 80 | static atomic64_t perf_event_id; |
| 81 | 81 | ||
| 82 | /* | 82 | /* |
| 83 | * Lock for (sysadmin-configurable) event reservations: | 83 | * Lock for (sysadmin-configurable) event reservations: |
| 84 | */ | 84 | */ |
| 85 | static DEFINE_SPINLOCK(perf_resource_lock); | 85 | static DEFINE_SPINLOCK(perf_resource_lock); |
| 86 | 86 | ||
| 87 | /* | 87 | /* |
| 88 | * Architecture provided APIs - weak aliases: | 88 | * Architecture provided APIs - weak aliases: |
| 89 | */ | 89 | */ |
| 90 | extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event) | 90 | extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event) |
| 91 | { | 91 | { |
| 92 | return NULL; | 92 | return NULL; |
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | void __weak hw_perf_disable(void) { barrier(); } | 95 | void __weak hw_perf_disable(void) { barrier(); } |
| 96 | void __weak hw_perf_enable(void) { barrier(); } | 96 | void __weak hw_perf_enable(void) { barrier(); } |
| 97 | 97 | ||
| 98 | void __weak hw_perf_event_setup(int cpu) { barrier(); } | 98 | void __weak hw_perf_event_setup(int cpu) { barrier(); } |
| 99 | void __weak hw_perf_event_setup_online(int cpu) { barrier(); } | 99 | void __weak hw_perf_event_setup_online(int cpu) { barrier(); } |
| 100 | 100 | ||
| 101 | int __weak | 101 | int __weak |
| 102 | hw_perf_group_sched_in(struct perf_event *group_leader, | 102 | hw_perf_group_sched_in(struct perf_event *group_leader, |
| 103 | struct perf_cpu_context *cpuctx, | 103 | struct perf_cpu_context *cpuctx, |
| 104 | struct perf_event_context *ctx, int cpu) | 104 | struct perf_event_context *ctx, int cpu) |
| 105 | { | 105 | { |
| 106 | return 0; | 106 | return 0; |
| 107 | } | 107 | } |
| 108 | 108 | ||
| 109 | void __weak perf_event_print_debug(void) { } | 109 | void __weak perf_event_print_debug(void) { } |
| 110 | 110 | ||
| 111 | static DEFINE_PER_CPU(int, perf_disable_count); | 111 | static DEFINE_PER_CPU(int, perf_disable_count); |
| 112 | 112 | ||
| 113 | void __perf_disable(void) | 113 | void __perf_disable(void) |
| 114 | { | 114 | { |
| 115 | __get_cpu_var(perf_disable_count)++; | 115 | __get_cpu_var(perf_disable_count)++; |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | bool __perf_enable(void) | 118 | bool __perf_enable(void) |
| 119 | { | 119 | { |
| 120 | return !--__get_cpu_var(perf_disable_count); | 120 | return !--__get_cpu_var(perf_disable_count); |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | void perf_disable(void) | 123 | void perf_disable(void) |
| 124 | { | 124 | { |
| 125 | __perf_disable(); | 125 | __perf_disable(); |
| 126 | hw_perf_disable(); | 126 | hw_perf_disable(); |
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | void perf_enable(void) | 129 | void perf_enable(void) |
| 130 | { | 130 | { |
| 131 | if (__perf_enable()) | 131 | if (__perf_enable()) |
| 132 | hw_perf_enable(); | 132 | hw_perf_enable(); |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | static void get_ctx(struct perf_event_context *ctx) | 135 | static void get_ctx(struct perf_event_context *ctx) |
| 136 | { | 136 | { |
| 137 | WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); | 137 | WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); |
| 138 | } | 138 | } |
| 139 | 139 | ||
| 140 | static void free_ctx(struct rcu_head *head) | 140 | static void free_ctx(struct rcu_head *head) |
| 141 | { | 141 | { |
| 142 | struct perf_event_context *ctx; | 142 | struct perf_event_context *ctx; |
| 143 | 143 | ||
| 144 | ctx = container_of(head, struct perf_event_context, rcu_head); | 144 | ctx = container_of(head, struct perf_event_context, rcu_head); |
| 145 | kfree(ctx); | 145 | kfree(ctx); |
| 146 | } | 146 | } |
| 147 | 147 | ||
| 148 | static void put_ctx(struct perf_event_context *ctx) | 148 | static void put_ctx(struct perf_event_context *ctx) |
| 149 | { | 149 | { |
| 150 | if (atomic_dec_and_test(&ctx->refcount)) { | 150 | if (atomic_dec_and_test(&ctx->refcount)) { |
| 151 | if (ctx->parent_ctx) | 151 | if (ctx->parent_ctx) |
| 152 | put_ctx(ctx->parent_ctx); | 152 | put_ctx(ctx->parent_ctx); |
| 153 | if (ctx->task) | 153 | if (ctx->task) |
| 154 | put_task_struct(ctx->task); | 154 | put_task_struct(ctx->task); |
| 155 | call_rcu(&ctx->rcu_head, free_ctx); | 155 | call_rcu(&ctx->rcu_head, free_ctx); |
| 156 | } | 156 | } |
| 157 | } | 157 | } |
| 158 | 158 | ||
| 159 | static void unclone_ctx(struct perf_event_context *ctx) | 159 | static void unclone_ctx(struct perf_event_context *ctx) |
| 160 | { | 160 | { |
| 161 | if (ctx->parent_ctx) { | 161 | if (ctx->parent_ctx) { |
| 162 | put_ctx(ctx->parent_ctx); | 162 | put_ctx(ctx->parent_ctx); |
| 163 | ctx->parent_ctx = NULL; | 163 | ctx->parent_ctx = NULL; |
| 164 | } | 164 | } |
| 165 | } | 165 | } |
| 166 | 166 | ||
| 167 | /* | 167 | /* |
| 168 | * If we inherit events we want to return the parent event id | 168 | * If we inherit events we want to return the parent event id |
| 169 | * to userspace. | 169 | * to userspace. |
| 170 | */ | 170 | */ |
| 171 | static u64 primary_event_id(struct perf_event *event) | 171 | static u64 primary_event_id(struct perf_event *event) |
| 172 | { | 172 | { |
| 173 | u64 id = event->id; | 173 | u64 id = event->id; |
| 174 | 174 | ||
| 175 | if (event->parent) | 175 | if (event->parent) |
| 176 | id = event->parent->id; | 176 | id = event->parent->id; |
| 177 | 177 | ||
| 178 | return id; | 178 | return id; |
| 179 | } | 179 | } |
| 180 | 180 | ||
| 181 | /* | 181 | /* |
| 182 | * Get the perf_event_context for a task and lock it. | 182 | * Get the perf_event_context for a task and lock it. |
| 183 | * This has to cope with with the fact that until it is locked, | 183 | * This has to cope with with the fact that until it is locked, |
| 184 | * the context could get moved to another task. | 184 | * the context could get moved to another task. |
| 185 | */ | 185 | */ |
| 186 | static struct perf_event_context * | 186 | static struct perf_event_context * |
| 187 | perf_lock_task_context(struct task_struct *task, unsigned long *flags) | 187 | perf_lock_task_context(struct task_struct *task, unsigned long *flags) |
| 188 | { | 188 | { |
| 189 | struct perf_event_context *ctx; | 189 | struct perf_event_context *ctx; |
| 190 | 190 | ||
| 191 | rcu_read_lock(); | 191 | rcu_read_lock(); |
| 192 | retry: | 192 | retry: |
| 193 | ctx = rcu_dereference(task->perf_event_ctxp); | 193 | ctx = rcu_dereference(task->perf_event_ctxp); |
| 194 | if (ctx) { | 194 | if (ctx) { |
| 195 | /* | 195 | /* |
| 196 | * If this context is a clone of another, it might | 196 | * If this context is a clone of another, it might |
| 197 | * get swapped for another underneath us by | 197 | * get swapped for another underneath us by |
| 198 | * perf_event_task_sched_out, though the | 198 | * perf_event_task_sched_out, though the |
| 199 | * rcu_read_lock() protects us from any context | 199 | * rcu_read_lock() protects us from any context |
| 200 | * getting freed. Lock the context and check if it | 200 | * getting freed. Lock the context and check if it |
| 201 | * got swapped before we could get the lock, and retry | 201 | * got swapped before we could get the lock, and retry |
| 202 | * if so. If we locked the right context, then it | 202 | * if so. If we locked the right context, then it |
| 203 | * can't get swapped on us any more. | 203 | * can't get swapped on us any more. |
| 204 | */ | 204 | */ |
| 205 | spin_lock_irqsave(&ctx->lock, *flags); | 205 | spin_lock_irqsave(&ctx->lock, *flags); |
| 206 | if (ctx != rcu_dereference(task->perf_event_ctxp)) { | 206 | if (ctx != rcu_dereference(task->perf_event_ctxp)) { |
| 207 | spin_unlock_irqrestore(&ctx->lock, *flags); | 207 | spin_unlock_irqrestore(&ctx->lock, *flags); |
| 208 | goto retry; | 208 | goto retry; |
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | if (!atomic_inc_not_zero(&ctx->refcount)) { | 211 | if (!atomic_inc_not_zero(&ctx->refcount)) { |
| 212 | spin_unlock_irqrestore(&ctx->lock, *flags); | 212 | spin_unlock_irqrestore(&ctx->lock, *flags); |
| 213 | ctx = NULL; | 213 | ctx = NULL; |
| 214 | } | 214 | } |
| 215 | } | 215 | } |
| 216 | rcu_read_unlock(); | 216 | rcu_read_unlock(); |
| 217 | return ctx; | 217 | return ctx; |
| 218 | } | 218 | } |
| 219 | 219 | ||
| 220 | /* | 220 | /* |
| 221 | * Get the context for a task and increment its pin_count so it | 221 | * Get the context for a task and increment its pin_count so it |
| 222 | * can't get swapped to another task. This also increments its | 222 | * can't get swapped to another task. This also increments its |
| 223 | * reference count so that the context can't get freed. | 223 | * reference count so that the context can't get freed. |
| 224 | */ | 224 | */ |
| 225 | static struct perf_event_context *perf_pin_task_context(struct task_struct *task) | 225 | static struct perf_event_context *perf_pin_task_context(struct task_struct *task) |
| 226 | { | 226 | { |
| 227 | struct perf_event_context *ctx; | 227 | struct perf_event_context *ctx; |
| 228 | unsigned long flags; | 228 | unsigned long flags; |
| 229 | 229 | ||
| 230 | ctx = perf_lock_task_context(task, &flags); | 230 | ctx = perf_lock_task_context(task, &flags); |
| 231 | if (ctx) { | 231 | if (ctx) { |
| 232 | ++ctx->pin_count; | 232 | ++ctx->pin_count; |
| 233 | spin_unlock_irqrestore(&ctx->lock, flags); | 233 | spin_unlock_irqrestore(&ctx->lock, flags); |
| 234 | } | 234 | } |
| 235 | return ctx; | 235 | return ctx; |
| 236 | } | 236 | } |
| 237 | 237 | ||
| 238 | static void perf_unpin_context(struct perf_event_context *ctx) | 238 | static void perf_unpin_context(struct perf_event_context *ctx) |
| 239 | { | 239 | { |
| 240 | unsigned long flags; | 240 | unsigned long flags; |
| 241 | 241 | ||
| 242 | spin_lock_irqsave(&ctx->lock, flags); | 242 | spin_lock_irqsave(&ctx->lock, flags); |
| 243 | --ctx->pin_count; | 243 | --ctx->pin_count; |
| 244 | spin_unlock_irqrestore(&ctx->lock, flags); | 244 | spin_unlock_irqrestore(&ctx->lock, flags); |
| 245 | put_ctx(ctx); | 245 | put_ctx(ctx); |
| 246 | } | 246 | } |
| 247 | 247 | ||
| 248 | /* | 248 | /* |
| 249 | * Add a event from the lists for its context. | 249 | * Add a event from the lists for its context. |
| 250 | * Must be called with ctx->mutex and ctx->lock held. | 250 | * Must be called with ctx->mutex and ctx->lock held. |
| 251 | */ | 251 | */ |
| 252 | static void | 252 | static void |
| 253 | list_add_event(struct perf_event *event, struct perf_event_context *ctx) | 253 | list_add_event(struct perf_event *event, struct perf_event_context *ctx) |
| 254 | { | 254 | { |
| 255 | struct perf_event *group_leader = event->group_leader; | 255 | struct perf_event *group_leader = event->group_leader; |
| 256 | 256 | ||
| 257 | /* | 257 | /* |
| 258 | * Depending on whether it is a standalone or sibling event, | 258 | * Depending on whether it is a standalone or sibling event, |
| 259 | * add it straight to the context's event list, or to the group | 259 | * add it straight to the context's event list, or to the group |
| 260 | * leader's sibling list: | 260 | * leader's sibling list: |
| 261 | */ | 261 | */ |
| 262 | if (group_leader == event) | 262 | if (group_leader == event) |
| 263 | list_add_tail(&event->group_entry, &ctx->group_list); | 263 | list_add_tail(&event->group_entry, &ctx->group_list); |
| 264 | else { | 264 | else { |
| 265 | list_add_tail(&event->group_entry, &group_leader->sibling_list); | 265 | list_add_tail(&event->group_entry, &group_leader->sibling_list); |
| 266 | group_leader->nr_siblings++; | 266 | group_leader->nr_siblings++; |
| 267 | } | 267 | } |
| 268 | 268 | ||
| 269 | list_add_rcu(&event->event_entry, &ctx->event_list); | 269 | list_add_rcu(&event->event_entry, &ctx->event_list); |
| 270 | ctx->nr_events++; | 270 | ctx->nr_events++; |
| 271 | if (event->attr.inherit_stat) | 271 | if (event->attr.inherit_stat) |
| 272 | ctx->nr_stat++; | 272 | ctx->nr_stat++; |
| 273 | } | 273 | } |
| 274 | 274 | ||
| 275 | /* | 275 | /* |
| 276 | * Remove a event from the lists for its context. | 276 | * Remove a event from the lists for its context. |
| 277 | * Must be called with ctx->mutex and ctx->lock held. | 277 | * Must be called with ctx->mutex and ctx->lock held. |
| 278 | */ | 278 | */ |
| 279 | static void | 279 | static void |
| 280 | list_del_event(struct perf_event *event, struct perf_event_context *ctx) | 280 | list_del_event(struct perf_event *event, struct perf_event_context *ctx) |
| 281 | { | 281 | { |
| 282 | struct perf_event *sibling, *tmp; | 282 | struct perf_event *sibling, *tmp; |
| 283 | 283 | ||
| 284 | if (list_empty(&event->group_entry)) | 284 | if (list_empty(&event->group_entry)) |
| 285 | return; | 285 | return; |
| 286 | ctx->nr_events--; | 286 | ctx->nr_events--; |
| 287 | if (event->attr.inherit_stat) | 287 | if (event->attr.inherit_stat) |
| 288 | ctx->nr_stat--; | 288 | ctx->nr_stat--; |
| 289 | 289 | ||
| 290 | list_del_init(&event->group_entry); | 290 | list_del_init(&event->group_entry); |
| 291 | list_del_rcu(&event->event_entry); | 291 | list_del_rcu(&event->event_entry); |
| 292 | 292 | ||
| 293 | if (event->group_leader != event) | 293 | if (event->group_leader != event) |
| 294 | event->group_leader->nr_siblings--; | 294 | event->group_leader->nr_siblings--; |
| 295 | 295 | ||
| 296 | /* | 296 | /* |
| 297 | * If this was a group event with sibling events then | 297 | * If this was a group event with sibling events then |
| 298 | * upgrade the siblings to singleton events by adding them | 298 | * upgrade the siblings to singleton events by adding them |
| 299 | * to the context list directly: | 299 | * to the context list directly: |
| 300 | */ | 300 | */ |
| 301 | list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { | 301 | list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { |
| 302 | 302 | ||
| 303 | list_move_tail(&sibling->group_entry, &ctx->group_list); | 303 | list_move_tail(&sibling->group_entry, &ctx->group_list); |
| 304 | sibling->group_leader = sibling; | 304 | sibling->group_leader = sibling; |
| 305 | } | 305 | } |
| 306 | } | 306 | } |
| 307 | 307 | ||
| 308 | static void | 308 | static void |
| 309 | event_sched_out(struct perf_event *event, | 309 | event_sched_out(struct perf_event *event, |
| 310 | struct perf_cpu_context *cpuctx, | 310 | struct perf_cpu_context *cpuctx, |
| 311 | struct perf_event_context *ctx) | 311 | struct perf_event_context *ctx) |
| 312 | { | 312 | { |
| 313 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 313 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| 314 | return; | 314 | return; |
| 315 | 315 | ||
| 316 | event->state = PERF_EVENT_STATE_INACTIVE; | 316 | event->state = PERF_EVENT_STATE_INACTIVE; |
| 317 | if (event->pending_disable) { | 317 | if (event->pending_disable) { |
| 318 | event->pending_disable = 0; | 318 | event->pending_disable = 0; |
| 319 | event->state = PERF_EVENT_STATE_OFF; | 319 | event->state = PERF_EVENT_STATE_OFF; |
| 320 | } | 320 | } |
| 321 | event->tstamp_stopped = ctx->time; | 321 | event->tstamp_stopped = ctx->time; |
| 322 | event->pmu->disable(event); | 322 | event->pmu->disable(event); |
| 323 | event->oncpu = -1; | 323 | event->oncpu = -1; |
| 324 | 324 | ||
| 325 | if (!is_software_event(event)) | 325 | if (!is_software_event(event)) |
| 326 | cpuctx->active_oncpu--; | 326 | cpuctx->active_oncpu--; |
| 327 | ctx->nr_active--; | 327 | ctx->nr_active--; |
| 328 | if (event->attr.exclusive || !cpuctx->active_oncpu) | 328 | if (event->attr.exclusive || !cpuctx->active_oncpu) |
| 329 | cpuctx->exclusive = 0; | 329 | cpuctx->exclusive = 0; |
| 330 | } | 330 | } |
| 331 | 331 | ||
| 332 | static void | 332 | static void |
| 333 | group_sched_out(struct perf_event *group_event, | 333 | group_sched_out(struct perf_event *group_event, |
| 334 | struct perf_cpu_context *cpuctx, | 334 | struct perf_cpu_context *cpuctx, |
| 335 | struct perf_event_context *ctx) | 335 | struct perf_event_context *ctx) |
| 336 | { | 336 | { |
| 337 | struct perf_event *event; | 337 | struct perf_event *event; |
| 338 | 338 | ||
| 339 | if (group_event->state != PERF_EVENT_STATE_ACTIVE) | 339 | if (group_event->state != PERF_EVENT_STATE_ACTIVE) |
| 340 | return; | 340 | return; |
| 341 | 341 | ||
| 342 | event_sched_out(group_event, cpuctx, ctx); | 342 | event_sched_out(group_event, cpuctx, ctx); |
| 343 | 343 | ||
| 344 | /* | 344 | /* |
| 345 | * Schedule out siblings (if any): | 345 | * Schedule out siblings (if any): |
| 346 | */ | 346 | */ |
| 347 | list_for_each_entry(event, &group_event->sibling_list, group_entry) | 347 | list_for_each_entry(event, &group_event->sibling_list, group_entry) |
| 348 | event_sched_out(event, cpuctx, ctx); | 348 | event_sched_out(event, cpuctx, ctx); |
| 349 | 349 | ||
| 350 | if (group_event->attr.exclusive) | 350 | if (group_event->attr.exclusive) |
| 351 | cpuctx->exclusive = 0; | 351 | cpuctx->exclusive = 0; |
| 352 | } | 352 | } |
| 353 | 353 | ||
| 354 | /* | 354 | /* |
| 355 | * Cross CPU call to remove a performance event | 355 | * Cross CPU call to remove a performance event |
| 356 | * | 356 | * |
| 357 | * We disable the event on the hardware level first. After that we | 357 | * We disable the event on the hardware level first. After that we |
| 358 | * remove it from the context list. | 358 | * remove it from the context list. |
| 359 | */ | 359 | */ |
| 360 | static void __perf_event_remove_from_context(void *info) | 360 | static void __perf_event_remove_from_context(void *info) |
| 361 | { | 361 | { |
| 362 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 362 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); |
| 363 | struct perf_event *event = info; | 363 | struct perf_event *event = info; |
| 364 | struct perf_event_context *ctx = event->ctx; | 364 | struct perf_event_context *ctx = event->ctx; |
| 365 | 365 | ||
| 366 | /* | 366 | /* |
| 367 | * If this is a task context, we need to check whether it is | 367 | * If this is a task context, we need to check whether it is |
| 368 | * the current task context of this cpu. If not it has been | 368 | * the current task context of this cpu. If not it has been |
| 369 | * scheduled out before the smp call arrived. | 369 | * scheduled out before the smp call arrived. |
| 370 | */ | 370 | */ |
| 371 | if (ctx->task && cpuctx->task_ctx != ctx) | 371 | if (ctx->task && cpuctx->task_ctx != ctx) |
| 372 | return; | 372 | return; |
| 373 | 373 | ||
| 374 | spin_lock(&ctx->lock); | 374 | spin_lock(&ctx->lock); |
| 375 | /* | 375 | /* |
| 376 | * Protect the list operation against NMI by disabling the | 376 | * Protect the list operation against NMI by disabling the |
| 377 | * events on a global level. | 377 | * events on a global level. |
| 378 | */ | 378 | */ |
| 379 | perf_disable(); | 379 | perf_disable(); |
| 380 | 380 | ||
| 381 | event_sched_out(event, cpuctx, ctx); | 381 | event_sched_out(event, cpuctx, ctx); |
| 382 | 382 | ||
| 383 | list_del_event(event, ctx); | 383 | list_del_event(event, ctx); |
| 384 | 384 | ||
| 385 | if (!ctx->task) { | 385 | if (!ctx->task) { |
| 386 | /* | 386 | /* |
| 387 | * Allow more per task events with respect to the | 387 | * Allow more per task events with respect to the |
| 388 | * reservation: | 388 | * reservation: |
| 389 | */ | 389 | */ |
| 390 | cpuctx->max_pertask = | 390 | cpuctx->max_pertask = |
| 391 | min(perf_max_events - ctx->nr_events, | 391 | min(perf_max_events - ctx->nr_events, |
| 392 | perf_max_events - perf_reserved_percpu); | 392 | perf_max_events - perf_reserved_percpu); |
| 393 | } | 393 | } |
| 394 | 394 | ||
| 395 | perf_enable(); | 395 | perf_enable(); |
| 396 | spin_unlock(&ctx->lock); | 396 | spin_unlock(&ctx->lock); |
| 397 | } | 397 | } |
| 398 | 398 | ||
| 399 | 399 | ||
| 400 | /* | 400 | /* |
| 401 | * Remove the event from a task's (or a CPU's) list of events. | 401 | * Remove the event from a task's (or a CPU's) list of events. |
| 402 | * | 402 | * |
| 403 | * Must be called with ctx->mutex held. | 403 | * Must be called with ctx->mutex held. |
| 404 | * | 404 | * |
| 405 | * CPU events are removed with a smp call. For task events we only | 405 | * CPU events are removed with a smp call. For task events we only |
| 406 | * call when the task is on a CPU. | 406 | * call when the task is on a CPU. |
| 407 | * | 407 | * |
| 408 | * If event->ctx is a cloned context, callers must make sure that | 408 | * If event->ctx is a cloned context, callers must make sure that |
| 409 | * every task struct that event->ctx->task could possibly point to | 409 | * every task struct that event->ctx->task could possibly point to |
| 410 | * remains valid. This is OK when called from perf_release since | 410 | * remains valid. This is OK when called from perf_release since |
| 411 | * that only calls us on the top-level context, which can't be a clone. | 411 | * that only calls us on the top-level context, which can't be a clone. |
| 412 | * When called from perf_event_exit_task, it's OK because the | 412 | * When called from perf_event_exit_task, it's OK because the |
| 413 | * context has been detached from its task. | 413 | * context has been detached from its task. |
| 414 | */ | 414 | */ |
| 415 | static void perf_event_remove_from_context(struct perf_event *event) | 415 | static void perf_event_remove_from_context(struct perf_event *event) |
| 416 | { | 416 | { |
| 417 | struct perf_event_context *ctx = event->ctx; | 417 | struct perf_event_context *ctx = event->ctx; |
| 418 | struct task_struct *task = ctx->task; | 418 | struct task_struct *task = ctx->task; |
| 419 | 419 | ||
| 420 | if (!task) { | 420 | if (!task) { |
| 421 | /* | 421 | /* |
| 422 | * Per cpu events are removed via an smp call and | 422 | * Per cpu events are removed via an smp call and |
| 423 | * the removal is always sucessful. | 423 | * the removal is always sucessful. |
| 424 | */ | 424 | */ |
| 425 | smp_call_function_single(event->cpu, | 425 | smp_call_function_single(event->cpu, |
| 426 | __perf_event_remove_from_context, | 426 | __perf_event_remove_from_context, |
| 427 | event, 1); | 427 | event, 1); |
| 428 | return; | 428 | return; |
| 429 | } | 429 | } |
| 430 | 430 | ||
| 431 | retry: | 431 | retry: |
| 432 | task_oncpu_function_call(task, __perf_event_remove_from_context, | 432 | task_oncpu_function_call(task, __perf_event_remove_from_context, |
| 433 | event); | 433 | event); |
| 434 | 434 | ||
| 435 | spin_lock_irq(&ctx->lock); | 435 | spin_lock_irq(&ctx->lock); |
| 436 | /* | 436 | /* |
| 437 | * If the context is active we need to retry the smp call. | 437 | * If the context is active we need to retry the smp call. |
| 438 | */ | 438 | */ |
| 439 | if (ctx->nr_active && !list_empty(&event->group_entry)) { | 439 | if (ctx->nr_active && !list_empty(&event->group_entry)) { |
| 440 | spin_unlock_irq(&ctx->lock); | 440 | spin_unlock_irq(&ctx->lock); |
| 441 | goto retry; | 441 | goto retry; |
| 442 | } | 442 | } |
| 443 | 443 | ||
| 444 | /* | 444 | /* |
| 445 | * The lock prevents that this context is scheduled in so we | 445 | * The lock prevents that this context is scheduled in so we |
| 446 | * can remove the event safely, if the call above did not | 446 | * can remove the event safely, if the call above did not |
| 447 | * succeed. | 447 | * succeed. |
| 448 | */ | 448 | */ |
| 449 | if (!list_empty(&event->group_entry)) { | 449 | if (!list_empty(&event->group_entry)) { |
| 450 | list_del_event(event, ctx); | 450 | list_del_event(event, ctx); |
| 451 | } | 451 | } |
| 452 | spin_unlock_irq(&ctx->lock); | 452 | spin_unlock_irq(&ctx->lock); |
| 453 | } | 453 | } |
| 454 | 454 | ||
| 455 | static inline u64 perf_clock(void) | 455 | static inline u64 perf_clock(void) |
| 456 | { | 456 | { |
| 457 | return cpu_clock(smp_processor_id()); | 457 | return cpu_clock(smp_processor_id()); |
| 458 | } | 458 | } |
| 459 | 459 | ||
| 460 | /* | 460 | /* |
| 461 | * Update the record of the current time in a context. | 461 | * Update the record of the current time in a context. |
| 462 | */ | 462 | */ |
| 463 | static void update_context_time(struct perf_event_context *ctx) | 463 | static void update_context_time(struct perf_event_context *ctx) |
| 464 | { | 464 | { |
| 465 | u64 now = perf_clock(); | 465 | u64 now = perf_clock(); |
| 466 | 466 | ||
| 467 | ctx->time += now - ctx->timestamp; | 467 | ctx->time += now - ctx->timestamp; |
| 468 | ctx->timestamp = now; | 468 | ctx->timestamp = now; |
| 469 | } | 469 | } |
| 470 | 470 | ||
| 471 | /* | 471 | /* |
| 472 | * Update the total_time_enabled and total_time_running fields for a event. | 472 | * Update the total_time_enabled and total_time_running fields for a event. |
| 473 | */ | 473 | */ |
| 474 | static void update_event_times(struct perf_event *event) | 474 | static void update_event_times(struct perf_event *event) |
| 475 | { | 475 | { |
| 476 | struct perf_event_context *ctx = event->ctx; | 476 | struct perf_event_context *ctx = event->ctx; |
| 477 | u64 run_end; | 477 | u64 run_end; |
| 478 | 478 | ||
| 479 | if (event->state < PERF_EVENT_STATE_INACTIVE || | 479 | if (event->state < PERF_EVENT_STATE_INACTIVE || |
| 480 | event->group_leader->state < PERF_EVENT_STATE_INACTIVE) | 480 | event->group_leader->state < PERF_EVENT_STATE_INACTIVE) |
| 481 | return; | 481 | return; |
| 482 | 482 | ||
| 483 | event->total_time_enabled = ctx->time - event->tstamp_enabled; | 483 | event->total_time_enabled = ctx->time - event->tstamp_enabled; |
| 484 | 484 | ||
| 485 | if (event->state == PERF_EVENT_STATE_INACTIVE) | 485 | if (event->state == PERF_EVENT_STATE_INACTIVE) |
| 486 | run_end = event->tstamp_stopped; | 486 | run_end = event->tstamp_stopped; |
| 487 | else | 487 | else |
| 488 | run_end = ctx->time; | 488 | run_end = ctx->time; |
| 489 | 489 | ||
| 490 | event->total_time_running = run_end - event->tstamp_running; | 490 | event->total_time_running = run_end - event->tstamp_running; |
| 491 | } | 491 | } |
| 492 | 492 | ||
| 493 | /* | 493 | /* |
| 494 | * Update total_time_enabled and total_time_running for all events in a group. | 494 | * Update total_time_enabled and total_time_running for all events in a group. |
| 495 | */ | 495 | */ |
| 496 | static void update_group_times(struct perf_event *leader) | 496 | static void update_group_times(struct perf_event *leader) |
| 497 | { | 497 | { |
| 498 | struct perf_event *event; | 498 | struct perf_event *event; |
| 499 | 499 | ||
| 500 | update_event_times(leader); | 500 | update_event_times(leader); |
| 501 | list_for_each_entry(event, &leader->sibling_list, group_entry) | 501 | list_for_each_entry(event, &leader->sibling_list, group_entry) |
| 502 | update_event_times(event); | 502 | update_event_times(event); |
| 503 | } | 503 | } |
| 504 | 504 | ||
| 505 | /* | 505 | /* |
| 506 | * Cross CPU call to disable a performance event | 506 | * Cross CPU call to disable a performance event |
| 507 | */ | 507 | */ |
| 508 | static void __perf_event_disable(void *info) | 508 | static void __perf_event_disable(void *info) |
| 509 | { | 509 | { |
| 510 | struct perf_event *event = info; | 510 | struct perf_event *event = info; |
| 511 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 511 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); |
| 512 | struct perf_event_context *ctx = event->ctx; | 512 | struct perf_event_context *ctx = event->ctx; |
| 513 | 513 | ||
| 514 | /* | 514 | /* |
| 515 | * If this is a per-task event, need to check whether this | 515 | * If this is a per-task event, need to check whether this |
| 516 | * event's task is the current task on this cpu. | 516 | * event's task is the current task on this cpu. |
| 517 | */ | 517 | */ |
| 518 | if (ctx->task && cpuctx->task_ctx != ctx) | 518 | if (ctx->task && cpuctx->task_ctx != ctx) |
| 519 | return; | 519 | return; |
| 520 | 520 | ||
| 521 | spin_lock(&ctx->lock); | 521 | spin_lock(&ctx->lock); |
| 522 | 522 | ||
| 523 | /* | 523 | /* |
| 524 | * If the event is on, turn it off. | 524 | * If the event is on, turn it off. |
| 525 | * If it is in error state, leave it in error state. | 525 | * If it is in error state, leave it in error state. |
| 526 | */ | 526 | */ |
| 527 | if (event->state >= PERF_EVENT_STATE_INACTIVE) { | 527 | if (event->state >= PERF_EVENT_STATE_INACTIVE) { |
| 528 | update_context_time(ctx); | 528 | update_context_time(ctx); |
| 529 | update_group_times(event); | 529 | update_group_times(event); |
| 530 | if (event == event->group_leader) | 530 | if (event == event->group_leader) |
| 531 | group_sched_out(event, cpuctx, ctx); | 531 | group_sched_out(event, cpuctx, ctx); |
| 532 | else | 532 | else |
| 533 | event_sched_out(event, cpuctx, ctx); | 533 | event_sched_out(event, cpuctx, ctx); |
| 534 | event->state = PERF_EVENT_STATE_OFF; | 534 | event->state = PERF_EVENT_STATE_OFF; |
| 535 | } | 535 | } |
| 536 | 536 | ||
| 537 | spin_unlock(&ctx->lock); | 537 | spin_unlock(&ctx->lock); |
| 538 | } | 538 | } |
| 539 | 539 | ||
| 540 | /* | 540 | /* |
| 541 | * Disable a event. | 541 | * Disable a event. |
| 542 | * | 542 | * |
| 543 | * If event->ctx is a cloned context, callers must make sure that | 543 | * If event->ctx is a cloned context, callers must make sure that |
| 544 | * every task struct that event->ctx->task could possibly point to | 544 | * every task struct that event->ctx->task could possibly point to |
| 545 | * remains valid. This condition is satisifed when called through | 545 | * remains valid. This condition is satisifed when called through |
| 546 | * perf_event_for_each_child or perf_event_for_each because they | 546 | * perf_event_for_each_child or perf_event_for_each because they |
| 547 | * hold the top-level event's child_mutex, so any descendant that | 547 | * hold the top-level event's child_mutex, so any descendant that |
| 548 | * goes to exit will block in sync_child_event. | 548 | * goes to exit will block in sync_child_event. |
| 549 | * When called from perf_pending_event it's OK because event->ctx | 549 | * When called from perf_pending_event it's OK because event->ctx |
| 550 | * is the current context on this CPU and preemption is disabled, | 550 | * is the current context on this CPU and preemption is disabled, |
| 551 | * hence we can't get into perf_event_task_sched_out for this context. | 551 | * hence we can't get into perf_event_task_sched_out for this context. |
| 552 | */ | 552 | */ |
| 553 | static void perf_event_disable(struct perf_event *event) | 553 | static void perf_event_disable(struct perf_event *event) |
| 554 | { | 554 | { |
| 555 | struct perf_event_context *ctx = event->ctx; | 555 | struct perf_event_context *ctx = event->ctx; |
| 556 | struct task_struct *task = ctx->task; | 556 | struct task_struct *task = ctx->task; |
| 557 | 557 | ||
| 558 | if (!task) { | 558 | if (!task) { |
| 559 | /* | 559 | /* |
| 560 | * Disable the event on the cpu that it's on | 560 | * Disable the event on the cpu that it's on |
| 561 | */ | 561 | */ |
| 562 | smp_call_function_single(event->cpu, __perf_event_disable, | 562 | smp_call_function_single(event->cpu, __perf_event_disable, |
| 563 | event, 1); | 563 | event, 1); |
| 564 | return; | 564 | return; |
| 565 | } | 565 | } |
| 566 | 566 | ||
| 567 | retry: | 567 | retry: |
| 568 | task_oncpu_function_call(task, __perf_event_disable, event); | 568 | task_oncpu_function_call(task, __perf_event_disable, event); |
| 569 | 569 | ||
| 570 | spin_lock_irq(&ctx->lock); | 570 | spin_lock_irq(&ctx->lock); |
| 571 | /* | 571 | /* |
| 572 | * If the event is still active, we need to retry the cross-call. | 572 | * If the event is still active, we need to retry the cross-call. |
| 573 | */ | 573 | */ |
| 574 | if (event->state == PERF_EVENT_STATE_ACTIVE) { | 574 | if (event->state == PERF_EVENT_STATE_ACTIVE) { |
| 575 | spin_unlock_irq(&ctx->lock); | 575 | spin_unlock_irq(&ctx->lock); |
| 576 | goto retry; | 576 | goto retry; |
| 577 | } | 577 | } |
| 578 | 578 | ||
| 579 | /* | 579 | /* |
| 580 | * Since we have the lock this context can't be scheduled | 580 | * Since we have the lock this context can't be scheduled |
| 581 | * in, so we can change the state safely. | 581 | * in, so we can change the state safely. |
| 582 | */ | 582 | */ |
| 583 | if (event->state == PERF_EVENT_STATE_INACTIVE) { | 583 | if (event->state == PERF_EVENT_STATE_INACTIVE) { |
| 584 | update_group_times(event); | 584 | update_group_times(event); |
| 585 | event->state = PERF_EVENT_STATE_OFF; | 585 | event->state = PERF_EVENT_STATE_OFF; |
| 586 | } | 586 | } |
| 587 | 587 | ||
| 588 | spin_unlock_irq(&ctx->lock); | 588 | spin_unlock_irq(&ctx->lock); |
| 589 | } | 589 | } |
| 590 | 590 | ||
| 591 | static int | 591 | static int |
| 592 | event_sched_in(struct perf_event *event, | 592 | event_sched_in(struct perf_event *event, |
| 593 | struct perf_cpu_context *cpuctx, | 593 | struct perf_cpu_context *cpuctx, |
| 594 | struct perf_event_context *ctx, | 594 | struct perf_event_context *ctx, |
| 595 | int cpu) | 595 | int cpu) |
| 596 | { | 596 | { |
| 597 | if (event->state <= PERF_EVENT_STATE_OFF) | 597 | if (event->state <= PERF_EVENT_STATE_OFF) |
| 598 | return 0; | 598 | return 0; |
| 599 | 599 | ||
| 600 | event->state = PERF_EVENT_STATE_ACTIVE; | 600 | event->state = PERF_EVENT_STATE_ACTIVE; |
| 601 | event->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ | 601 | event->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ |
| 602 | /* | 602 | /* |
| 603 | * The new state must be visible before we turn it on in the hardware: | 603 | * The new state must be visible before we turn it on in the hardware: |
| 604 | */ | 604 | */ |
| 605 | smp_wmb(); | 605 | smp_wmb(); |
| 606 | 606 | ||
| 607 | if (event->pmu->enable(event)) { | 607 | if (event->pmu->enable(event)) { |
| 608 | event->state = PERF_EVENT_STATE_INACTIVE; | 608 | event->state = PERF_EVENT_STATE_INACTIVE; |
| 609 | event->oncpu = -1; | 609 | event->oncpu = -1; |
| 610 | return -EAGAIN; | 610 | return -EAGAIN; |
| 611 | } | 611 | } |
| 612 | 612 | ||
| 613 | event->tstamp_running += ctx->time - event->tstamp_stopped; | 613 | event->tstamp_running += ctx->time - event->tstamp_stopped; |
| 614 | 614 | ||
| 615 | if (!is_software_event(event)) | 615 | if (!is_software_event(event)) |
| 616 | cpuctx->active_oncpu++; | 616 | cpuctx->active_oncpu++; |
| 617 | ctx->nr_active++; | 617 | ctx->nr_active++; |
| 618 | 618 | ||
| 619 | if (event->attr.exclusive) | 619 | if (event->attr.exclusive) |
| 620 | cpuctx->exclusive = 1; | 620 | cpuctx->exclusive = 1; |
| 621 | 621 | ||
| 622 | return 0; | 622 | return 0; |
| 623 | } | 623 | } |
| 624 | 624 | ||
| 625 | static int | 625 | static int |
| 626 | group_sched_in(struct perf_event *group_event, | 626 | group_sched_in(struct perf_event *group_event, |
| 627 | struct perf_cpu_context *cpuctx, | 627 | struct perf_cpu_context *cpuctx, |
| 628 | struct perf_event_context *ctx, | 628 | struct perf_event_context *ctx, |
| 629 | int cpu) | 629 | int cpu) |
| 630 | { | 630 | { |
| 631 | struct perf_event *event, *partial_group; | 631 | struct perf_event *event, *partial_group; |
| 632 | int ret; | 632 | int ret; |
| 633 | 633 | ||
| 634 | if (group_event->state == PERF_EVENT_STATE_OFF) | 634 | if (group_event->state == PERF_EVENT_STATE_OFF) |
| 635 | return 0; | 635 | return 0; |
| 636 | 636 | ||
| 637 | ret = hw_perf_group_sched_in(group_event, cpuctx, ctx, cpu); | 637 | ret = hw_perf_group_sched_in(group_event, cpuctx, ctx, cpu); |
| 638 | if (ret) | 638 | if (ret) |
| 639 | return ret < 0 ? ret : 0; | 639 | return ret < 0 ? ret : 0; |
| 640 | 640 | ||
| 641 | if (event_sched_in(group_event, cpuctx, ctx, cpu)) | 641 | if (event_sched_in(group_event, cpuctx, ctx, cpu)) |
| 642 | return -EAGAIN; | 642 | return -EAGAIN; |
| 643 | 643 | ||
| 644 | /* | 644 | /* |
| 645 | * Schedule in siblings as one group (if any): | 645 | * Schedule in siblings as one group (if any): |
| 646 | */ | 646 | */ |
| 647 | list_for_each_entry(event, &group_event->sibling_list, group_entry) { | 647 | list_for_each_entry(event, &group_event->sibling_list, group_entry) { |
| 648 | if (event_sched_in(event, cpuctx, ctx, cpu)) { | 648 | if (event_sched_in(event, cpuctx, ctx, cpu)) { |
| 649 | partial_group = event; | 649 | partial_group = event; |
| 650 | goto group_error; | 650 | goto group_error; |
| 651 | } | 651 | } |
| 652 | } | 652 | } |
| 653 | 653 | ||
| 654 | return 0; | 654 | return 0; |
| 655 | 655 | ||
| 656 | group_error: | 656 | group_error: |
| 657 | /* | 657 | /* |
| 658 | * Groups can be scheduled in as one unit only, so undo any | 658 | * Groups can be scheduled in as one unit only, so undo any |
| 659 | * partial group before returning: | 659 | * partial group before returning: |
| 660 | */ | 660 | */ |
| 661 | list_for_each_entry(event, &group_event->sibling_list, group_entry) { | 661 | list_for_each_entry(event, &group_event->sibling_list, group_entry) { |
| 662 | if (event == partial_group) | 662 | if (event == partial_group) |
| 663 | break; | 663 | break; |
| 664 | event_sched_out(event, cpuctx, ctx); | 664 | event_sched_out(event, cpuctx, ctx); |
| 665 | } | 665 | } |
| 666 | event_sched_out(group_event, cpuctx, ctx); | 666 | event_sched_out(group_event, cpuctx, ctx); |
| 667 | 667 | ||
| 668 | return -EAGAIN; | 668 | return -EAGAIN; |
| 669 | } | 669 | } |
| 670 | 670 | ||
| 671 | /* | 671 | /* |
| 672 | * Return 1 for a group consisting entirely of software events, | 672 | * Return 1 for a group consisting entirely of software events, |
| 673 | * 0 if the group contains any hardware events. | 673 | * 0 if the group contains any hardware events. |
| 674 | */ | 674 | */ |
| 675 | static int is_software_only_group(struct perf_event *leader) | 675 | static int is_software_only_group(struct perf_event *leader) |
| 676 | { | 676 | { |
| 677 | struct perf_event *event; | 677 | struct perf_event *event; |
| 678 | 678 | ||
| 679 | if (!is_software_event(leader)) | 679 | if (!is_software_event(leader)) |
| 680 | return 0; | 680 | return 0; |
| 681 | 681 | ||
| 682 | list_for_each_entry(event, &leader->sibling_list, group_entry) | 682 | list_for_each_entry(event, &leader->sibling_list, group_entry) |
| 683 | if (!is_software_event(event)) | 683 | if (!is_software_event(event)) |
| 684 | return 0; | 684 | return 0; |
| 685 | 685 | ||
| 686 | return 1; | 686 | return 1; |
| 687 | } | 687 | } |
| 688 | 688 | ||
| 689 | /* | 689 | /* |
| 690 | * Work out whether we can put this event group on the CPU now. | 690 | * Work out whether we can put this event group on the CPU now. |
| 691 | */ | 691 | */ |
| 692 | static int group_can_go_on(struct perf_event *event, | 692 | static int group_can_go_on(struct perf_event *event, |
| 693 | struct perf_cpu_context *cpuctx, | 693 | struct perf_cpu_context *cpuctx, |
| 694 | int can_add_hw) | 694 | int can_add_hw) |
| 695 | { | 695 | { |
| 696 | /* | 696 | /* |
| 697 | * Groups consisting entirely of software events can always go on. | 697 | * Groups consisting entirely of software events can always go on. |
| 698 | */ | 698 | */ |
| 699 | if (is_software_only_group(event)) | 699 | if (is_software_only_group(event)) |
| 700 | return 1; | 700 | return 1; |
| 701 | /* | 701 | /* |
| 702 | * If an exclusive group is already on, no other hardware | 702 | * If an exclusive group is already on, no other hardware |
| 703 | * events can go on. | 703 | * events can go on. |
| 704 | */ | 704 | */ |
| 705 | if (cpuctx->exclusive) | 705 | if (cpuctx->exclusive) |
| 706 | return 0; | 706 | return 0; |
| 707 | /* | 707 | /* |
| 708 | * If this group is exclusive and there are already | 708 | * If this group is exclusive and there are already |
| 709 | * events on the CPU, it can't go on. | 709 | * events on the CPU, it can't go on. |
| 710 | */ | 710 | */ |
| 711 | if (event->attr.exclusive && cpuctx->active_oncpu) | 711 | if (event->attr.exclusive && cpuctx->active_oncpu) |
| 712 | return 0; | 712 | return 0; |
| 713 | /* | 713 | /* |
| 714 | * Otherwise, try to add it if all previous groups were able | 714 | * Otherwise, try to add it if all previous groups were able |
| 715 | * to go on. | 715 | * to go on. |
| 716 | */ | 716 | */ |
| 717 | return can_add_hw; | 717 | return can_add_hw; |
| 718 | } | 718 | } |
| 719 | 719 | ||
| 720 | static void add_event_to_ctx(struct perf_event *event, | 720 | static void add_event_to_ctx(struct perf_event *event, |
| 721 | struct perf_event_context *ctx) | 721 | struct perf_event_context *ctx) |
| 722 | { | 722 | { |
| 723 | list_add_event(event, ctx); | 723 | list_add_event(event, ctx); |
| 724 | event->tstamp_enabled = ctx->time; | 724 | event->tstamp_enabled = ctx->time; |
| 725 | event->tstamp_running = ctx->time; | 725 | event->tstamp_running = ctx->time; |
| 726 | event->tstamp_stopped = ctx->time; | 726 | event->tstamp_stopped = ctx->time; |
| 727 | } | 727 | } |
| 728 | 728 | ||
| 729 | /* | 729 | /* |
| 730 | * Cross CPU call to install and enable a performance event | 730 | * Cross CPU call to install and enable a performance event |
| 731 | * | 731 | * |
| 732 | * Must be called with ctx->mutex held | 732 | * Must be called with ctx->mutex held |
| 733 | */ | 733 | */ |
| 734 | static void __perf_install_in_context(void *info) | 734 | static void __perf_install_in_context(void *info) |
| 735 | { | 735 | { |
| 736 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 736 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); |
| 737 | struct perf_event *event = info; | 737 | struct perf_event *event = info; |
| 738 | struct perf_event_context *ctx = event->ctx; | 738 | struct perf_event_context *ctx = event->ctx; |
| 739 | struct perf_event *leader = event->group_leader; | 739 | struct perf_event *leader = event->group_leader; |
| 740 | int cpu = smp_processor_id(); | 740 | int cpu = smp_processor_id(); |
| 741 | int err; | 741 | int err; |
| 742 | 742 | ||
| 743 | /* | 743 | /* |
| 744 | * If this is a task context, we need to check whether it is | 744 | * If this is a task context, we need to check whether it is |
| 745 | * the current task context of this cpu. If not it has been | 745 | * the current task context of this cpu. If not it has been |
| 746 | * scheduled out before the smp call arrived. | 746 | * scheduled out before the smp call arrived. |
| 747 | * Or possibly this is the right context but it isn't | 747 | * Or possibly this is the right context but it isn't |
| 748 | * on this cpu because it had no events. | 748 | * on this cpu because it had no events. |
| 749 | */ | 749 | */ |
| 750 | if (ctx->task && cpuctx->task_ctx != ctx) { | 750 | if (ctx->task && cpuctx->task_ctx != ctx) { |
| 751 | if (cpuctx->task_ctx || ctx->task != current) | 751 | if (cpuctx->task_ctx || ctx->task != current) |
| 752 | return; | 752 | return; |
| 753 | cpuctx->task_ctx = ctx; | 753 | cpuctx->task_ctx = ctx; |
| 754 | } | 754 | } |
| 755 | 755 | ||
| 756 | spin_lock(&ctx->lock); | 756 | spin_lock(&ctx->lock); |
| 757 | ctx->is_active = 1; | 757 | ctx->is_active = 1; |
| 758 | update_context_time(ctx); | 758 | update_context_time(ctx); |
| 759 | 759 | ||
| 760 | /* | 760 | /* |
| 761 | * Protect the list operation against NMI by disabling the | 761 | * Protect the list operation against NMI by disabling the |
| 762 | * events on a global level. NOP for non NMI based events. | 762 | * events on a global level. NOP for non NMI based events. |
| 763 | */ | 763 | */ |
| 764 | perf_disable(); | 764 | perf_disable(); |
| 765 | 765 | ||
| 766 | add_event_to_ctx(event, ctx); | 766 | add_event_to_ctx(event, ctx); |
| 767 | 767 | ||
| 768 | /* | 768 | /* |
| 769 | * Don't put the event on if it is disabled or if | 769 | * Don't put the event on if it is disabled or if |
| 770 | * it is in a group and the group isn't on. | 770 | * it is in a group and the group isn't on. |
| 771 | */ | 771 | */ |
| 772 | if (event->state != PERF_EVENT_STATE_INACTIVE || | 772 | if (event->state != PERF_EVENT_STATE_INACTIVE || |
| 773 | (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)) | 773 | (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)) |
| 774 | goto unlock; | 774 | goto unlock; |
| 775 | 775 | ||
| 776 | /* | 776 | /* |
| 777 | * An exclusive event can't go on if there are already active | 777 | * An exclusive event can't go on if there are already active |
| 778 | * hardware events, and no hardware event can go on if there | 778 | * hardware events, and no hardware event can go on if there |
| 779 | * is already an exclusive event on. | 779 | * is already an exclusive event on. |
| 780 | */ | 780 | */ |
| 781 | if (!group_can_go_on(event, cpuctx, 1)) | 781 | if (!group_can_go_on(event, cpuctx, 1)) |
| 782 | err = -EEXIST; | 782 | err = -EEXIST; |
| 783 | else | 783 | else |
| 784 | err = event_sched_in(event, cpuctx, ctx, cpu); | 784 | err = event_sched_in(event, cpuctx, ctx, cpu); |
| 785 | 785 | ||
| 786 | if (err) { | 786 | if (err) { |
| 787 | /* | 787 | /* |
| 788 | * This event couldn't go on. If it is in a group | 788 | * This event couldn't go on. If it is in a group |
| 789 | * then we have to pull the whole group off. | 789 | * then we have to pull the whole group off. |
| 790 | * If the event group is pinned then put it in error state. | 790 | * If the event group is pinned then put it in error state. |
| 791 | */ | 791 | */ |
| 792 | if (leader != event) | 792 | if (leader != event) |
| 793 | group_sched_out(leader, cpuctx, ctx); | 793 | group_sched_out(leader, cpuctx, ctx); |
| 794 | if (leader->attr.pinned) { | 794 | if (leader->attr.pinned) { |
| 795 | update_group_times(leader); | 795 | update_group_times(leader); |
| 796 | leader->state = PERF_EVENT_STATE_ERROR; | 796 | leader->state = PERF_EVENT_STATE_ERROR; |
| 797 | } | 797 | } |
| 798 | } | 798 | } |
| 799 | 799 | ||
| 800 | if (!err && !ctx->task && cpuctx->max_pertask) | 800 | if (!err && !ctx->task && cpuctx->max_pertask) |
| 801 | cpuctx->max_pertask--; | 801 | cpuctx->max_pertask--; |
| 802 | 802 | ||
| 803 | unlock: | 803 | unlock: |
| 804 | perf_enable(); | 804 | perf_enable(); |
| 805 | 805 | ||
| 806 | spin_unlock(&ctx->lock); | 806 | spin_unlock(&ctx->lock); |
| 807 | } | 807 | } |
| 808 | 808 | ||
| 809 | /* | 809 | /* |
| 810 | * Attach a performance event to a context | 810 | * Attach a performance event to a context |
| 811 | * | 811 | * |
| 812 | * First we add the event to the list with the hardware enable bit | 812 | * First we add the event to the list with the hardware enable bit |
| 813 | * in event->hw_config cleared. | 813 | * in event->hw_config cleared. |
| 814 | * | 814 | * |
| 815 | * If the event is attached to a task which is on a CPU we use a smp | 815 | * If the event is attached to a task which is on a CPU we use a smp |
| 816 | * call to enable it in the task context. The task might have been | 816 | * call to enable it in the task context. The task might have been |
| 817 | * scheduled away, but we check this in the smp call again. | 817 | * scheduled away, but we check this in the smp call again. |
| 818 | * | 818 | * |
| 819 | * Must be called with ctx->mutex held. | 819 | * Must be called with ctx->mutex held. |
| 820 | */ | 820 | */ |
| 821 | static void | 821 | static void |
| 822 | perf_install_in_context(struct perf_event_context *ctx, | 822 | perf_install_in_context(struct perf_event_context *ctx, |
| 823 | struct perf_event *event, | 823 | struct perf_event *event, |
| 824 | int cpu) | 824 | int cpu) |
| 825 | { | 825 | { |
| 826 | struct task_struct *task = ctx->task; | 826 | struct task_struct *task = ctx->task; |
| 827 | 827 | ||
| 828 | if (!task) { | 828 | if (!task) { |
| 829 | /* | 829 | /* |
| 830 | * Per cpu events are installed via an smp call and | 830 | * Per cpu events are installed via an smp call and |
| 831 | * the install is always sucessful. | 831 | * the install is always sucessful. |
| 832 | */ | 832 | */ |
| 833 | smp_call_function_single(cpu, __perf_install_in_context, | 833 | smp_call_function_single(cpu, __perf_install_in_context, |
| 834 | event, 1); | 834 | event, 1); |
| 835 | return; | 835 | return; |
| 836 | } | 836 | } |
| 837 | 837 | ||
| 838 | retry: | 838 | retry: |
| 839 | task_oncpu_function_call(task, __perf_install_in_context, | 839 | task_oncpu_function_call(task, __perf_install_in_context, |
| 840 | event); | 840 | event); |
| 841 | 841 | ||
| 842 | spin_lock_irq(&ctx->lock); | 842 | spin_lock_irq(&ctx->lock); |
| 843 | /* | 843 | /* |
| 844 | * we need to retry the smp call. | 844 | * we need to retry the smp call. |
| 845 | */ | 845 | */ |
| 846 | if (ctx->is_active && list_empty(&event->group_entry)) { | 846 | if (ctx->is_active && list_empty(&event->group_entry)) { |
| 847 | spin_unlock_irq(&ctx->lock); | 847 | spin_unlock_irq(&ctx->lock); |
| 848 | goto retry; | 848 | goto retry; |
| 849 | } | 849 | } |
| 850 | 850 | ||
| 851 | /* | 851 | /* |
| 852 | * The lock prevents that this context is scheduled in so we | 852 | * The lock prevents that this context is scheduled in so we |
| 853 | * can add the event safely, if it the call above did not | 853 | * can add the event safely, if it the call above did not |
| 854 | * succeed. | 854 | * succeed. |
| 855 | */ | 855 | */ |
| 856 | if (list_empty(&event->group_entry)) | 856 | if (list_empty(&event->group_entry)) |
| 857 | add_event_to_ctx(event, ctx); | 857 | add_event_to_ctx(event, ctx); |
| 858 | spin_unlock_irq(&ctx->lock); | 858 | spin_unlock_irq(&ctx->lock); |
| 859 | } | 859 | } |
| 860 | 860 | ||
| 861 | /* | 861 | /* |
| 862 | * Put a event into inactive state and update time fields. | 862 | * Put a event into inactive state and update time fields. |
| 863 | * Enabling the leader of a group effectively enables all | 863 | * Enabling the leader of a group effectively enables all |
| 864 | * the group members that aren't explicitly disabled, so we | 864 | * the group members that aren't explicitly disabled, so we |
| 865 | * have to update their ->tstamp_enabled also. | 865 | * have to update their ->tstamp_enabled also. |
| 866 | * Note: this works for group members as well as group leaders | 866 | * Note: this works for group members as well as group leaders |
| 867 | * since the non-leader members' sibling_lists will be empty. | 867 | * since the non-leader members' sibling_lists will be empty. |
| 868 | */ | 868 | */ |
| 869 | static void __perf_event_mark_enabled(struct perf_event *event, | 869 | static void __perf_event_mark_enabled(struct perf_event *event, |
| 870 | struct perf_event_context *ctx) | 870 | struct perf_event_context *ctx) |
| 871 | { | 871 | { |
| 872 | struct perf_event *sub; | 872 | struct perf_event *sub; |
| 873 | 873 | ||
| 874 | event->state = PERF_EVENT_STATE_INACTIVE; | 874 | event->state = PERF_EVENT_STATE_INACTIVE; |
| 875 | event->tstamp_enabled = ctx->time - event->total_time_enabled; | 875 | event->tstamp_enabled = ctx->time - event->total_time_enabled; |
| 876 | list_for_each_entry(sub, &event->sibling_list, group_entry) | 876 | list_for_each_entry(sub, &event->sibling_list, group_entry) |
| 877 | if (sub->state >= PERF_EVENT_STATE_INACTIVE) | 877 | if (sub->state >= PERF_EVENT_STATE_INACTIVE) |
| 878 | sub->tstamp_enabled = | 878 | sub->tstamp_enabled = |
| 879 | ctx->time - sub->total_time_enabled; | 879 | ctx->time - sub->total_time_enabled; |
| 880 | } | 880 | } |
| 881 | 881 | ||
| 882 | /* | 882 | /* |
| 883 | * Cross CPU call to enable a performance event | 883 | * Cross CPU call to enable a performance event |
| 884 | */ | 884 | */ |
| 885 | static void __perf_event_enable(void *info) | 885 | static void __perf_event_enable(void *info) |
| 886 | { | 886 | { |
| 887 | struct perf_event *event = info; | 887 | struct perf_event *event = info; |
| 888 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 888 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); |
| 889 | struct perf_event_context *ctx = event->ctx; | 889 | struct perf_event_context *ctx = event->ctx; |
| 890 | struct perf_event *leader = event->group_leader; | 890 | struct perf_event *leader = event->group_leader; |
| 891 | int err; | 891 | int err; |
| 892 | 892 | ||
| 893 | /* | 893 | /* |
| 894 | * If this is a per-task event, need to check whether this | 894 | * If this is a per-task event, need to check whether this |
| 895 | * event's task is the current task on this cpu. | 895 | * event's task is the current task on this cpu. |
| 896 | */ | 896 | */ |
| 897 | if (ctx->task && cpuctx->task_ctx != ctx) { | 897 | if (ctx->task && cpuctx->task_ctx != ctx) { |
| 898 | if (cpuctx->task_ctx || ctx->task != current) | 898 | if (cpuctx->task_ctx || ctx->task != current) |
| 899 | return; | 899 | return; |
| 900 | cpuctx->task_ctx = ctx; | 900 | cpuctx->task_ctx = ctx; |
| 901 | } | 901 | } |
| 902 | 902 | ||
| 903 | spin_lock(&ctx->lock); | 903 | spin_lock(&ctx->lock); |
| 904 | ctx->is_active = 1; | 904 | ctx->is_active = 1; |
| 905 | update_context_time(ctx); | 905 | update_context_time(ctx); |
| 906 | 906 | ||
| 907 | if (event->state >= PERF_EVENT_STATE_INACTIVE) | 907 | if (event->state >= PERF_EVENT_STATE_INACTIVE) |
| 908 | goto unlock; | 908 | goto unlock; |
| 909 | __perf_event_mark_enabled(event, ctx); | 909 | __perf_event_mark_enabled(event, ctx); |
| 910 | 910 | ||
| 911 | /* | 911 | /* |
| 912 | * If the event is in a group and isn't the group leader, | 912 | * If the event is in a group and isn't the group leader, |
| 913 | * then don't put it on unless the group is on. | 913 | * then don't put it on unless the group is on. |
| 914 | */ | 914 | */ |
| 915 | if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) | 915 | if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) |
| 916 | goto unlock; | 916 | goto unlock; |
| 917 | 917 | ||
| 918 | if (!group_can_go_on(event, cpuctx, 1)) { | 918 | if (!group_can_go_on(event, cpuctx, 1)) { |
| 919 | err = -EEXIST; | 919 | err = -EEXIST; |
| 920 | } else { | 920 | } else { |
| 921 | perf_disable(); | 921 | perf_disable(); |
| 922 | if (event == leader) | 922 | if (event == leader) |
| 923 | err = group_sched_in(event, cpuctx, ctx, | 923 | err = group_sched_in(event, cpuctx, ctx, |
| 924 | smp_processor_id()); | 924 | smp_processor_id()); |
| 925 | else | 925 | else |
| 926 | err = event_sched_in(event, cpuctx, ctx, | 926 | err = event_sched_in(event, cpuctx, ctx, |
| 927 | smp_processor_id()); | 927 | smp_processor_id()); |
| 928 | perf_enable(); | 928 | perf_enable(); |
| 929 | } | 929 | } |
| 930 | 930 | ||
| 931 | if (err) { | 931 | if (err) { |
| 932 | /* | 932 | /* |
| 933 | * If this event can't go on and it's part of a | 933 | * If this event can't go on and it's part of a |
| 934 | * group, then the whole group has to come off. | 934 | * group, then the whole group has to come off. |
| 935 | */ | 935 | */ |
| 936 | if (leader != event) | 936 | if (leader != event) |
| 937 | group_sched_out(leader, cpuctx, ctx); | 937 | group_sched_out(leader, cpuctx, ctx); |
| 938 | if (leader->attr.pinned) { | 938 | if (leader->attr.pinned) { |
| 939 | update_group_times(leader); | 939 | update_group_times(leader); |
| 940 | leader->state = PERF_EVENT_STATE_ERROR; | 940 | leader->state = PERF_EVENT_STATE_ERROR; |
| 941 | } | 941 | } |
| 942 | } | 942 | } |
| 943 | 943 | ||
| 944 | unlock: | 944 | unlock: |
| 945 | spin_unlock(&ctx->lock); | 945 | spin_unlock(&ctx->lock); |
| 946 | } | 946 | } |
| 947 | 947 | ||
| 948 | /* | 948 | /* |
| 949 | * Enable a event. | 949 | * Enable a event. |
| 950 | * | 950 | * |
| 951 | * If event->ctx is a cloned context, callers must make sure that | 951 | * If event->ctx is a cloned context, callers must make sure that |
| 952 | * every task struct that event->ctx->task could possibly point to | 952 | * every task struct that event->ctx->task could possibly point to |
| 953 | * remains valid. This condition is satisfied when called through | 953 | * remains valid. This condition is satisfied when called through |
| 954 | * perf_event_for_each_child or perf_event_for_each as described | 954 | * perf_event_for_each_child or perf_event_for_each as described |
| 955 | * for perf_event_disable. | 955 | * for perf_event_disable. |
| 956 | */ | 956 | */ |
| 957 | static void perf_event_enable(struct perf_event *event) | 957 | static void perf_event_enable(struct perf_event *event) |
| 958 | { | 958 | { |
| 959 | struct perf_event_context *ctx = event->ctx; | 959 | struct perf_event_context *ctx = event->ctx; |
| 960 | struct task_struct *task = ctx->task; | 960 | struct task_struct *task = ctx->task; |
| 961 | 961 | ||
| 962 | if (!task) { | 962 | if (!task) { |
| 963 | /* | 963 | /* |
| 964 | * Enable the event on the cpu that it's on | 964 | * Enable the event on the cpu that it's on |
| 965 | */ | 965 | */ |
| 966 | smp_call_function_single(event->cpu, __perf_event_enable, | 966 | smp_call_function_single(event->cpu, __perf_event_enable, |
| 967 | event, 1); | 967 | event, 1); |
| 968 | return; | 968 | return; |
| 969 | } | 969 | } |
| 970 | 970 | ||
| 971 | spin_lock_irq(&ctx->lock); | 971 | spin_lock_irq(&ctx->lock); |
| 972 | if (event->state >= PERF_EVENT_STATE_INACTIVE) | 972 | if (event->state >= PERF_EVENT_STATE_INACTIVE) |
| 973 | goto out; | 973 | goto out; |
| 974 | 974 | ||
| 975 | /* | 975 | /* |
| 976 | * If the event is in error state, clear that first. | 976 | * If the event is in error state, clear that first. |
| 977 | * That way, if we see the event in error state below, we | 977 | * That way, if we see the event in error state below, we |
| 978 | * know that it has gone back into error state, as distinct | 978 | * know that it has gone back into error state, as distinct |
| 979 | * from the task having been scheduled away before the | 979 | * from the task having been scheduled away before the |
| 980 | * cross-call arrived. | 980 | * cross-call arrived. |
| 981 | */ | 981 | */ |
| 982 | if (event->state == PERF_EVENT_STATE_ERROR) | 982 | if (event->state == PERF_EVENT_STATE_ERROR) |
| 983 | event->state = PERF_EVENT_STATE_OFF; | 983 | event->state = PERF_EVENT_STATE_OFF; |
| 984 | 984 | ||
| 985 | retry: | 985 | retry: |
| 986 | spin_unlock_irq(&ctx->lock); | 986 | spin_unlock_irq(&ctx->lock); |
| 987 | task_oncpu_function_call(task, __perf_event_enable, event); | 987 | task_oncpu_function_call(task, __perf_event_enable, event); |
| 988 | 988 | ||
| 989 | spin_lock_irq(&ctx->lock); | 989 | spin_lock_irq(&ctx->lock); |
| 990 | 990 | ||
| 991 | /* | 991 | /* |
| 992 | * If the context is active and the event is still off, | 992 | * If the context is active and the event is still off, |
| 993 | * we need to retry the cross-call. | 993 | * we need to retry the cross-call. |
| 994 | */ | 994 | */ |
| 995 | if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) | 995 | if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) |
| 996 | goto retry; | 996 | goto retry; |
| 997 | 997 | ||
| 998 | /* | 998 | /* |
| 999 | * Since we have the lock this context can't be scheduled | 999 | * Since we have the lock this context can't be scheduled |
| 1000 | * in, so we can change the state safely. | 1000 | * in, so we can change the state safely. |
| 1001 | */ | 1001 | */ |
| 1002 | if (event->state == PERF_EVENT_STATE_OFF) | 1002 | if (event->state == PERF_EVENT_STATE_OFF) |
| 1003 | __perf_event_mark_enabled(event, ctx); | 1003 | __perf_event_mark_enabled(event, ctx); |
| 1004 | 1004 | ||
| 1005 | out: | 1005 | out: |
| 1006 | spin_unlock_irq(&ctx->lock); | 1006 | spin_unlock_irq(&ctx->lock); |
| 1007 | } | 1007 | } |
| 1008 | 1008 | ||
| 1009 | static int perf_event_refresh(struct perf_event *event, int refresh) | 1009 | static int perf_event_refresh(struct perf_event *event, int refresh) |
| 1010 | { | 1010 | { |
| 1011 | /* | 1011 | /* |
| 1012 | * not supported on inherited events | 1012 | * not supported on inherited events |
| 1013 | */ | 1013 | */ |
| 1014 | if (event->attr.inherit) | 1014 | if (event->attr.inherit) |
| 1015 | return -EINVAL; | 1015 | return -EINVAL; |
| 1016 | 1016 | ||
| 1017 | atomic_add(refresh, &event->event_limit); | 1017 | atomic_add(refresh, &event->event_limit); |
| 1018 | perf_event_enable(event); | 1018 | perf_event_enable(event); |
| 1019 | 1019 | ||
| 1020 | return 0; | 1020 | return 0; |
| 1021 | } | 1021 | } |
| 1022 | 1022 | ||
| 1023 | void __perf_event_sched_out(struct perf_event_context *ctx, | 1023 | void __perf_event_sched_out(struct perf_event_context *ctx, |
| 1024 | struct perf_cpu_context *cpuctx) | 1024 | struct perf_cpu_context *cpuctx) |
| 1025 | { | 1025 | { |
| 1026 | struct perf_event *event; | 1026 | struct perf_event *event; |
| 1027 | 1027 | ||
| 1028 | spin_lock(&ctx->lock); | 1028 | spin_lock(&ctx->lock); |
| 1029 | ctx->is_active = 0; | 1029 | ctx->is_active = 0; |
| 1030 | if (likely(!ctx->nr_events)) | 1030 | if (likely(!ctx->nr_events)) |
| 1031 | goto out; | 1031 | goto out; |
| 1032 | update_context_time(ctx); | 1032 | update_context_time(ctx); |
| 1033 | 1033 | ||
| 1034 | perf_disable(); | 1034 | perf_disable(); |
| 1035 | if (ctx->nr_active) | 1035 | if (ctx->nr_active) |
| 1036 | list_for_each_entry(event, &ctx->group_list, group_entry) | 1036 | list_for_each_entry(event, &ctx->group_list, group_entry) |
| 1037 | group_sched_out(event, cpuctx, ctx); | 1037 | group_sched_out(event, cpuctx, ctx); |
| 1038 | 1038 | ||
| 1039 | perf_enable(); | 1039 | perf_enable(); |
| 1040 | out: | 1040 | out: |
| 1041 | spin_unlock(&ctx->lock); | 1041 | spin_unlock(&ctx->lock); |
| 1042 | } | 1042 | } |
| 1043 | 1043 | ||
| 1044 | /* | 1044 | /* |
| 1045 | * Test whether two contexts are equivalent, i.e. whether they | 1045 | * Test whether two contexts are equivalent, i.e. whether they |
| 1046 | * have both been cloned from the same version of the same context | 1046 | * have both been cloned from the same version of the same context |
| 1047 | * and they both have the same number of enabled events. | 1047 | * and they both have the same number of enabled events. |
| 1048 | * If the number of enabled events is the same, then the set | 1048 | * If the number of enabled events is the same, then the set |
| 1049 | * of enabled events should be the same, because these are both | 1049 | * of enabled events should be the same, because these are both |
| 1050 | * inherited contexts, therefore we can't access individual events | 1050 | * inherited contexts, therefore we can't access individual events |
| 1051 | * in them directly with an fd; we can only enable/disable all | 1051 | * in them directly with an fd; we can only enable/disable all |
| 1052 | * events via prctl, or enable/disable all events in a family | 1052 | * events via prctl, or enable/disable all events in a family |
| 1053 | * via ioctl, which will have the same effect on both contexts. | 1053 | * via ioctl, which will have the same effect on both contexts. |
| 1054 | */ | 1054 | */ |
| 1055 | static int context_equiv(struct perf_event_context *ctx1, | 1055 | static int context_equiv(struct perf_event_context *ctx1, |
| 1056 | struct perf_event_context *ctx2) | 1056 | struct perf_event_context *ctx2) |
| 1057 | { | 1057 | { |
| 1058 | return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx | 1058 | return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx |
| 1059 | && ctx1->parent_gen == ctx2->parent_gen | 1059 | && ctx1->parent_gen == ctx2->parent_gen |
| 1060 | && !ctx1->pin_count && !ctx2->pin_count; | 1060 | && !ctx1->pin_count && !ctx2->pin_count; |
| 1061 | } | 1061 | } |
| 1062 | 1062 | ||
| 1063 | static void __perf_event_read(void *event); | 1063 | static void __perf_event_read(void *event); |
| 1064 | 1064 | ||
| 1065 | static void __perf_event_sync_stat(struct perf_event *event, | 1065 | static void __perf_event_sync_stat(struct perf_event *event, |
| 1066 | struct perf_event *next_event) | 1066 | struct perf_event *next_event) |
| 1067 | { | 1067 | { |
| 1068 | u64 value; | 1068 | u64 value; |
| 1069 | 1069 | ||
| 1070 | if (!event->attr.inherit_stat) | 1070 | if (!event->attr.inherit_stat) |
| 1071 | return; | 1071 | return; |
| 1072 | 1072 | ||
| 1073 | /* | 1073 | /* |
| 1074 | * Update the event value, we cannot use perf_event_read() | 1074 | * Update the event value, we cannot use perf_event_read() |
| 1075 | * because we're in the middle of a context switch and have IRQs | 1075 | * because we're in the middle of a context switch and have IRQs |
| 1076 | * disabled, which upsets smp_call_function_single(), however | 1076 | * disabled, which upsets smp_call_function_single(), however |
| 1077 | * we know the event must be on the current CPU, therefore we | 1077 | * we know the event must be on the current CPU, therefore we |
| 1078 | * don't need to use it. | 1078 | * don't need to use it. |
| 1079 | */ | 1079 | */ |
| 1080 | switch (event->state) { | 1080 | switch (event->state) { |
| 1081 | case PERF_EVENT_STATE_ACTIVE: | 1081 | case PERF_EVENT_STATE_ACTIVE: |
| 1082 | __perf_event_read(event); | 1082 | __perf_event_read(event); |
| 1083 | break; | 1083 | break; |
| 1084 | 1084 | ||
| 1085 | case PERF_EVENT_STATE_INACTIVE: | 1085 | case PERF_EVENT_STATE_INACTIVE: |
| 1086 | update_event_times(event); | 1086 | update_event_times(event); |
| 1087 | break; | 1087 | break; |
| 1088 | 1088 | ||
| 1089 | default: | 1089 | default: |
| 1090 | break; | 1090 | break; |
| 1091 | } | 1091 | } |
| 1092 | 1092 | ||
| 1093 | /* | 1093 | /* |
| 1094 | * In order to keep per-task stats reliable we need to flip the event | 1094 | * In order to keep per-task stats reliable we need to flip the event |
| 1095 | * values when we flip the contexts. | 1095 | * values when we flip the contexts. |
| 1096 | */ | 1096 | */ |
| 1097 | value = atomic64_read(&next_event->count); | 1097 | value = atomic64_read(&next_event->count); |
| 1098 | value = atomic64_xchg(&event->count, value); | 1098 | value = atomic64_xchg(&event->count, value); |
| 1099 | atomic64_set(&next_event->count, value); | 1099 | atomic64_set(&next_event->count, value); |
| 1100 | 1100 | ||
| 1101 | swap(event->total_time_enabled, next_event->total_time_enabled); | 1101 | swap(event->total_time_enabled, next_event->total_time_enabled); |
| 1102 | swap(event->total_time_running, next_event->total_time_running); | 1102 | swap(event->total_time_running, next_event->total_time_running); |
| 1103 | 1103 | ||
| 1104 | /* | 1104 | /* |
| 1105 | * Since we swizzled the values, update the user visible data too. | 1105 | * Since we swizzled the values, update the user visible data too. |
| 1106 | */ | 1106 | */ |
| 1107 | perf_event_update_userpage(event); | 1107 | perf_event_update_userpage(event); |
| 1108 | perf_event_update_userpage(next_event); | 1108 | perf_event_update_userpage(next_event); |
| 1109 | } | 1109 | } |
| 1110 | 1110 | ||
| 1111 | #define list_next_entry(pos, member) \ | 1111 | #define list_next_entry(pos, member) \ |
| 1112 | list_entry(pos->member.next, typeof(*pos), member) | 1112 | list_entry(pos->member.next, typeof(*pos), member) |
| 1113 | 1113 | ||
| 1114 | static void perf_event_sync_stat(struct perf_event_context *ctx, | 1114 | static void perf_event_sync_stat(struct perf_event_context *ctx, |
| 1115 | struct perf_event_context *next_ctx) | 1115 | struct perf_event_context *next_ctx) |
| 1116 | { | 1116 | { |
| 1117 | struct perf_event *event, *next_event; | 1117 | struct perf_event *event, *next_event; |
| 1118 | 1118 | ||
| 1119 | if (!ctx->nr_stat) | 1119 | if (!ctx->nr_stat) |
| 1120 | return; | 1120 | return; |
| 1121 | 1121 | ||
| 1122 | event = list_first_entry(&ctx->event_list, | 1122 | event = list_first_entry(&ctx->event_list, |
| 1123 | struct perf_event, event_entry); | 1123 | struct perf_event, event_entry); |
| 1124 | 1124 | ||
| 1125 | next_event = list_first_entry(&next_ctx->event_list, | 1125 | next_event = list_first_entry(&next_ctx->event_list, |
| 1126 | struct perf_event, event_entry); | 1126 | struct perf_event, event_entry); |
| 1127 | 1127 | ||
| 1128 | while (&event->event_entry != &ctx->event_list && | 1128 | while (&event->event_entry != &ctx->event_list && |
| 1129 | &next_event->event_entry != &next_ctx->event_list) { | 1129 | &next_event->event_entry != &next_ctx->event_list) { |
| 1130 | 1130 | ||
| 1131 | __perf_event_sync_stat(event, next_event); | 1131 | __perf_event_sync_stat(event, next_event); |
| 1132 | 1132 | ||
| 1133 | event = list_next_entry(event, event_entry); | 1133 | event = list_next_entry(event, event_entry); |
| 1134 | next_event = list_next_entry(next_event, event_entry); | 1134 | next_event = list_next_entry(next_event, event_entry); |
| 1135 | } | 1135 | } |
| 1136 | } | 1136 | } |
| 1137 | 1137 | ||
| 1138 | /* | 1138 | /* |
| 1139 | * Called from scheduler to remove the events of the current task, | 1139 | * Called from scheduler to remove the events of the current task, |
| 1140 | * with interrupts disabled. | 1140 | * with interrupts disabled. |
| 1141 | * | 1141 | * |
| 1142 | * We stop each event and update the event value in event->count. | 1142 | * We stop each event and update the event value in event->count. |
| 1143 | * | 1143 | * |
| 1144 | * This does not protect us against NMI, but disable() | 1144 | * This does not protect us against NMI, but disable() |
| 1145 | * sets the disabled bit in the control field of event _before_ | 1145 | * sets the disabled bit in the control field of event _before_ |
| 1146 | * accessing the event control register. If a NMI hits, then it will | 1146 | * accessing the event control register. If a NMI hits, then it will |
| 1147 | * not restart the event. | 1147 | * not restart the event. |
| 1148 | */ | 1148 | */ |
| 1149 | void perf_event_task_sched_out(struct task_struct *task, | 1149 | void perf_event_task_sched_out(struct task_struct *task, |
| 1150 | struct task_struct *next, int cpu) | 1150 | struct task_struct *next, int cpu) |
| 1151 | { | 1151 | { |
| 1152 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | 1152 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); |
| 1153 | struct perf_event_context *ctx = task->perf_event_ctxp; | 1153 | struct perf_event_context *ctx = task->perf_event_ctxp; |
| 1154 | struct perf_event_context *next_ctx; | 1154 | struct perf_event_context *next_ctx; |
| 1155 | struct perf_event_context *parent; | 1155 | struct perf_event_context *parent; |
| 1156 | struct pt_regs *regs; | 1156 | struct pt_regs *regs; |
| 1157 | int do_switch = 1; | 1157 | int do_switch = 1; |
| 1158 | 1158 | ||
| 1159 | regs = task_pt_regs(task); | 1159 | regs = task_pt_regs(task); |
| 1160 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); | 1160 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); |
| 1161 | 1161 | ||
| 1162 | if (likely(!ctx || !cpuctx->task_ctx)) | 1162 | if (likely(!ctx || !cpuctx->task_ctx)) |
| 1163 | return; | 1163 | return; |
| 1164 | 1164 | ||
| 1165 | update_context_time(ctx); | 1165 | update_context_time(ctx); |
| 1166 | 1166 | ||
| 1167 | rcu_read_lock(); | 1167 | rcu_read_lock(); |
| 1168 | parent = rcu_dereference(ctx->parent_ctx); | 1168 | parent = rcu_dereference(ctx->parent_ctx); |
| 1169 | next_ctx = next->perf_event_ctxp; | 1169 | next_ctx = next->perf_event_ctxp; |
| 1170 | if (parent && next_ctx && | 1170 | if (parent && next_ctx && |
| 1171 | rcu_dereference(next_ctx->parent_ctx) == parent) { | 1171 | rcu_dereference(next_ctx->parent_ctx) == parent) { |
| 1172 | /* | 1172 | /* |
| 1173 | * Looks like the two contexts are clones, so we might be | 1173 | * Looks like the two contexts are clones, so we might be |
| 1174 | * able to optimize the context switch. We lock both | 1174 | * able to optimize the context switch. We lock both |
| 1175 | * contexts and check that they are clones under the | 1175 | * contexts and check that they are clones under the |
| 1176 | * lock (including re-checking that neither has been | 1176 | * lock (including re-checking that neither has been |
| 1177 | * uncloned in the meantime). It doesn't matter which | 1177 | * uncloned in the meantime). It doesn't matter which |
| 1178 | * order we take the locks because no other cpu could | 1178 | * order we take the locks because no other cpu could |
| 1179 | * be trying to lock both of these tasks. | 1179 | * be trying to lock both of these tasks. |
| 1180 | */ | 1180 | */ |
| 1181 | spin_lock(&ctx->lock); | 1181 | spin_lock(&ctx->lock); |
| 1182 | spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); | 1182 | spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); |
| 1183 | if (context_equiv(ctx, next_ctx)) { | 1183 | if (context_equiv(ctx, next_ctx)) { |
| 1184 | /* | 1184 | /* |
| 1185 | * XXX do we need a memory barrier of sorts | 1185 | * XXX do we need a memory barrier of sorts |
| 1186 | * wrt to rcu_dereference() of perf_event_ctxp | 1186 | * wrt to rcu_dereference() of perf_event_ctxp |
| 1187 | */ | 1187 | */ |
| 1188 | task->perf_event_ctxp = next_ctx; | 1188 | task->perf_event_ctxp = next_ctx; |
| 1189 | next->perf_event_ctxp = ctx; | 1189 | next->perf_event_ctxp = ctx; |
| 1190 | ctx->task = next; | 1190 | ctx->task = next; |
| 1191 | next_ctx->task = task; | 1191 | next_ctx->task = task; |
| 1192 | do_switch = 0; | 1192 | do_switch = 0; |
| 1193 | 1193 | ||
| 1194 | perf_event_sync_stat(ctx, next_ctx); | 1194 | perf_event_sync_stat(ctx, next_ctx); |
| 1195 | } | 1195 | } |
| 1196 | spin_unlock(&next_ctx->lock); | 1196 | spin_unlock(&next_ctx->lock); |
| 1197 | spin_unlock(&ctx->lock); | 1197 | spin_unlock(&ctx->lock); |
| 1198 | } | 1198 | } |
| 1199 | rcu_read_unlock(); | 1199 | rcu_read_unlock(); |
| 1200 | 1200 | ||
| 1201 | if (do_switch) { | 1201 | if (do_switch) { |
| 1202 | __perf_event_sched_out(ctx, cpuctx); | 1202 | __perf_event_sched_out(ctx, cpuctx); |
| 1203 | cpuctx->task_ctx = NULL; | 1203 | cpuctx->task_ctx = NULL; |
| 1204 | } | 1204 | } |
| 1205 | } | 1205 | } |
| 1206 | 1206 | ||
| 1207 | /* | 1207 | /* |
| 1208 | * Called with IRQs disabled | 1208 | * Called with IRQs disabled |
| 1209 | */ | 1209 | */ |
| 1210 | static void __perf_event_task_sched_out(struct perf_event_context *ctx) | 1210 | static void __perf_event_task_sched_out(struct perf_event_context *ctx) |
| 1211 | { | 1211 | { |
| 1212 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 1212 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); |
| 1213 | 1213 | ||
| 1214 | if (!cpuctx->task_ctx) | 1214 | if (!cpuctx->task_ctx) |
| 1215 | return; | 1215 | return; |
| 1216 | 1216 | ||
| 1217 | if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) | 1217 | if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) |
| 1218 | return; | 1218 | return; |
| 1219 | 1219 | ||
| 1220 | __perf_event_sched_out(ctx, cpuctx); | 1220 | __perf_event_sched_out(ctx, cpuctx); |
| 1221 | cpuctx->task_ctx = NULL; | 1221 | cpuctx->task_ctx = NULL; |
| 1222 | } | 1222 | } |
| 1223 | 1223 | ||
| 1224 | /* | 1224 | /* |
| 1225 | * Called with IRQs disabled | 1225 | * Called with IRQs disabled |
| 1226 | */ | 1226 | */ |
| 1227 | static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx) | 1227 | static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx) |
| 1228 | { | 1228 | { |
| 1229 | __perf_event_sched_out(&cpuctx->ctx, cpuctx); | 1229 | __perf_event_sched_out(&cpuctx->ctx, cpuctx); |
| 1230 | } | 1230 | } |
| 1231 | 1231 | ||
| 1232 | static void | 1232 | static void |
| 1233 | __perf_event_sched_in(struct perf_event_context *ctx, | 1233 | __perf_event_sched_in(struct perf_event_context *ctx, |
| 1234 | struct perf_cpu_context *cpuctx, int cpu) | 1234 | struct perf_cpu_context *cpuctx, int cpu) |
| 1235 | { | 1235 | { |
| 1236 | struct perf_event *event; | 1236 | struct perf_event *event; |
| 1237 | int can_add_hw = 1; | 1237 | int can_add_hw = 1; |
| 1238 | 1238 | ||
| 1239 | spin_lock(&ctx->lock); | 1239 | spin_lock(&ctx->lock); |
| 1240 | ctx->is_active = 1; | 1240 | ctx->is_active = 1; |
| 1241 | if (likely(!ctx->nr_events)) | 1241 | if (likely(!ctx->nr_events)) |
| 1242 | goto out; | 1242 | goto out; |
| 1243 | 1243 | ||
| 1244 | ctx->timestamp = perf_clock(); | 1244 | ctx->timestamp = perf_clock(); |
| 1245 | 1245 | ||
| 1246 | perf_disable(); | 1246 | perf_disable(); |
| 1247 | 1247 | ||
| 1248 | /* | 1248 | /* |
| 1249 | * First go through the list and put on any pinned groups | 1249 | * First go through the list and put on any pinned groups |
| 1250 | * in order to give them the best chance of going on. | 1250 | * in order to give them the best chance of going on. |
| 1251 | */ | 1251 | */ |
| 1252 | list_for_each_entry(event, &ctx->group_list, group_entry) { | 1252 | list_for_each_entry(event, &ctx->group_list, group_entry) { |
| 1253 | if (event->state <= PERF_EVENT_STATE_OFF || | 1253 | if (event->state <= PERF_EVENT_STATE_OFF || |
| 1254 | !event->attr.pinned) | 1254 | !event->attr.pinned) |
| 1255 | continue; | 1255 | continue; |
| 1256 | if (event->cpu != -1 && event->cpu != cpu) | 1256 | if (event->cpu != -1 && event->cpu != cpu) |
| 1257 | continue; | 1257 | continue; |
| 1258 | 1258 | ||
| 1259 | if (group_can_go_on(event, cpuctx, 1)) | 1259 | if (group_can_go_on(event, cpuctx, 1)) |
| 1260 | group_sched_in(event, cpuctx, ctx, cpu); | 1260 | group_sched_in(event, cpuctx, ctx, cpu); |
| 1261 | 1261 | ||
| 1262 | /* | 1262 | /* |
| 1263 | * If this pinned group hasn't been scheduled, | 1263 | * If this pinned group hasn't been scheduled, |
| 1264 | * put it in error state. | 1264 | * put it in error state. |
| 1265 | */ | 1265 | */ |
| 1266 | if (event->state == PERF_EVENT_STATE_INACTIVE) { | 1266 | if (event->state == PERF_EVENT_STATE_INACTIVE) { |
| 1267 | update_group_times(event); | 1267 | update_group_times(event); |
| 1268 | event->state = PERF_EVENT_STATE_ERROR; | 1268 | event->state = PERF_EVENT_STATE_ERROR; |
| 1269 | } | 1269 | } |
| 1270 | } | 1270 | } |
| 1271 | 1271 | ||
| 1272 | list_for_each_entry(event, &ctx->group_list, group_entry) { | 1272 | list_for_each_entry(event, &ctx->group_list, group_entry) { |
| 1273 | /* | 1273 | /* |
| 1274 | * Ignore events in OFF or ERROR state, and | 1274 | * Ignore events in OFF or ERROR state, and |
| 1275 | * ignore pinned events since we did them already. | 1275 | * ignore pinned events since we did them already. |
| 1276 | */ | 1276 | */ |
| 1277 | if (event->state <= PERF_EVENT_STATE_OFF || | 1277 | if (event->state <= PERF_EVENT_STATE_OFF || |
| 1278 | event->attr.pinned) | 1278 | event->attr.pinned) |
| 1279 | continue; | 1279 | continue; |
| 1280 | 1280 | ||
| 1281 | /* | 1281 | /* |
| 1282 | * Listen to the 'cpu' scheduling filter constraint | 1282 | * Listen to the 'cpu' scheduling filter constraint |
| 1283 | * of events: | 1283 | * of events: |
| 1284 | */ | 1284 | */ |
| 1285 | if (event->cpu != -1 && event->cpu != cpu) | 1285 | if (event->cpu != -1 && event->cpu != cpu) |
| 1286 | continue; | 1286 | continue; |
| 1287 | 1287 | ||
| 1288 | if (group_can_go_on(event, cpuctx, can_add_hw)) | 1288 | if (group_can_go_on(event, cpuctx, can_add_hw)) |
| 1289 | if (group_sched_in(event, cpuctx, ctx, cpu)) | 1289 | if (group_sched_in(event, cpuctx, ctx, cpu)) |
| 1290 | can_add_hw = 0; | 1290 | can_add_hw = 0; |
| 1291 | } | 1291 | } |
| 1292 | perf_enable(); | 1292 | perf_enable(); |
| 1293 | out: | 1293 | out: |
| 1294 | spin_unlock(&ctx->lock); | 1294 | spin_unlock(&ctx->lock); |
| 1295 | } | 1295 | } |
| 1296 | 1296 | ||
| 1297 | /* | 1297 | /* |
| 1298 | * Called from scheduler to add the events of the current task | 1298 | * Called from scheduler to add the events of the current task |
| 1299 | * with interrupts disabled. | 1299 | * with interrupts disabled. |
| 1300 | * | 1300 | * |
| 1301 | * We restore the event value and then enable it. | 1301 | * We restore the event value and then enable it. |
| 1302 | * | 1302 | * |
| 1303 | * This does not protect us against NMI, but enable() | 1303 | * This does not protect us against NMI, but enable() |
| 1304 | * sets the enabled bit in the control field of event _before_ | 1304 | * sets the enabled bit in the control field of event _before_ |
| 1305 | * accessing the event control register. If a NMI hits, then it will | 1305 | * accessing the event control register. If a NMI hits, then it will |
| 1306 | * keep the event running. | 1306 | * keep the event running. |
| 1307 | */ | 1307 | */ |
| 1308 | void perf_event_task_sched_in(struct task_struct *task, int cpu) | 1308 | void perf_event_task_sched_in(struct task_struct *task, int cpu) |
| 1309 | { | 1309 | { |
| 1310 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | 1310 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); |
| 1311 | struct perf_event_context *ctx = task->perf_event_ctxp; | 1311 | struct perf_event_context *ctx = task->perf_event_ctxp; |
| 1312 | 1312 | ||
| 1313 | if (likely(!ctx)) | 1313 | if (likely(!ctx)) |
| 1314 | return; | 1314 | return; |
| 1315 | if (cpuctx->task_ctx == ctx) | 1315 | if (cpuctx->task_ctx == ctx) |
| 1316 | return; | 1316 | return; |
| 1317 | __perf_event_sched_in(ctx, cpuctx, cpu); | 1317 | __perf_event_sched_in(ctx, cpuctx, cpu); |
| 1318 | cpuctx->task_ctx = ctx; | 1318 | cpuctx->task_ctx = ctx; |
| 1319 | } | 1319 | } |
| 1320 | 1320 | ||
| 1321 | static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) | 1321 | static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) |
| 1322 | { | 1322 | { |
| 1323 | struct perf_event_context *ctx = &cpuctx->ctx; | 1323 | struct perf_event_context *ctx = &cpuctx->ctx; |
| 1324 | 1324 | ||
| 1325 | __perf_event_sched_in(ctx, cpuctx, cpu); | 1325 | __perf_event_sched_in(ctx, cpuctx, cpu); |
| 1326 | } | 1326 | } |
| 1327 | 1327 | ||
| 1328 | #define MAX_INTERRUPTS (~0ULL) | 1328 | #define MAX_INTERRUPTS (~0ULL) |
| 1329 | 1329 | ||
| 1330 | static void perf_log_throttle(struct perf_event *event, int enable); | 1330 | static void perf_log_throttle(struct perf_event *event, int enable); |
| 1331 | 1331 | ||
| 1332 | static void perf_adjust_period(struct perf_event *event, u64 events) | 1332 | static void perf_adjust_period(struct perf_event *event, u64 events) |
| 1333 | { | 1333 | { |
| 1334 | struct hw_perf_event *hwc = &event->hw; | 1334 | struct hw_perf_event *hwc = &event->hw; |
| 1335 | u64 period, sample_period; | 1335 | u64 period, sample_period; |
| 1336 | s64 delta; | 1336 | s64 delta; |
| 1337 | 1337 | ||
| 1338 | events *= hwc->sample_period; | 1338 | events *= hwc->sample_period; |
| 1339 | period = div64_u64(events, event->attr.sample_freq); | 1339 | period = div64_u64(events, event->attr.sample_freq); |
| 1340 | 1340 | ||
| 1341 | delta = (s64)(period - hwc->sample_period); | 1341 | delta = (s64)(period - hwc->sample_period); |
| 1342 | delta = (delta + 7) / 8; /* low pass filter */ | 1342 | delta = (delta + 7) / 8; /* low pass filter */ |
| 1343 | 1343 | ||
| 1344 | sample_period = hwc->sample_period + delta; | 1344 | sample_period = hwc->sample_period + delta; |
| 1345 | 1345 | ||
| 1346 | if (!sample_period) | 1346 | if (!sample_period) |
| 1347 | sample_period = 1; | 1347 | sample_period = 1; |
| 1348 | 1348 | ||
| 1349 | hwc->sample_period = sample_period; | 1349 | hwc->sample_period = sample_period; |
| 1350 | } | 1350 | } |
| 1351 | 1351 | ||
| 1352 | static void perf_ctx_adjust_freq(struct perf_event_context *ctx) | 1352 | static void perf_ctx_adjust_freq(struct perf_event_context *ctx) |
| 1353 | { | 1353 | { |
| 1354 | struct perf_event *event; | 1354 | struct perf_event *event; |
| 1355 | struct hw_perf_event *hwc; | 1355 | struct hw_perf_event *hwc; |
| 1356 | u64 interrupts, freq; | 1356 | u64 interrupts, freq; |
| 1357 | 1357 | ||
| 1358 | spin_lock(&ctx->lock); | 1358 | spin_lock(&ctx->lock); |
| 1359 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 1359 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
| 1360 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 1360 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| 1361 | continue; | 1361 | continue; |
| 1362 | 1362 | ||
| 1363 | hwc = &event->hw; | 1363 | hwc = &event->hw; |
| 1364 | 1364 | ||
| 1365 | interrupts = hwc->interrupts; | 1365 | interrupts = hwc->interrupts; |
| 1366 | hwc->interrupts = 0; | 1366 | hwc->interrupts = 0; |
| 1367 | 1367 | ||
| 1368 | /* | 1368 | /* |
| 1369 | * unthrottle events on the tick | 1369 | * unthrottle events on the tick |
| 1370 | */ | 1370 | */ |
| 1371 | if (interrupts == MAX_INTERRUPTS) { | 1371 | if (interrupts == MAX_INTERRUPTS) { |
| 1372 | perf_log_throttle(event, 1); | 1372 | perf_log_throttle(event, 1); |
| 1373 | event->pmu->unthrottle(event); | 1373 | event->pmu->unthrottle(event); |
| 1374 | interrupts = 2*sysctl_perf_event_sample_rate/HZ; | 1374 | interrupts = 2*sysctl_perf_event_sample_rate/HZ; |
| 1375 | } | 1375 | } |
| 1376 | 1376 | ||
| 1377 | if (!event->attr.freq || !event->attr.sample_freq) | 1377 | if (!event->attr.freq || !event->attr.sample_freq) |
| 1378 | continue; | 1378 | continue; |
| 1379 | 1379 | ||
| 1380 | /* | 1380 | /* |
| 1381 | * if the specified freq < HZ then we need to skip ticks | 1381 | * if the specified freq < HZ then we need to skip ticks |
| 1382 | */ | 1382 | */ |
| 1383 | if (event->attr.sample_freq < HZ) { | 1383 | if (event->attr.sample_freq < HZ) { |
| 1384 | freq = event->attr.sample_freq; | 1384 | freq = event->attr.sample_freq; |
| 1385 | 1385 | ||
| 1386 | hwc->freq_count += freq; | 1386 | hwc->freq_count += freq; |
| 1387 | hwc->freq_interrupts += interrupts; | 1387 | hwc->freq_interrupts += interrupts; |
| 1388 | 1388 | ||
| 1389 | if (hwc->freq_count < HZ) | 1389 | if (hwc->freq_count < HZ) |
| 1390 | continue; | 1390 | continue; |
| 1391 | 1391 | ||
| 1392 | interrupts = hwc->freq_interrupts; | 1392 | interrupts = hwc->freq_interrupts; |
| 1393 | hwc->freq_interrupts = 0; | 1393 | hwc->freq_interrupts = 0; |
| 1394 | hwc->freq_count -= HZ; | 1394 | hwc->freq_count -= HZ; |
| 1395 | } else | 1395 | } else |
| 1396 | freq = HZ; | 1396 | freq = HZ; |
| 1397 | 1397 | ||
| 1398 | perf_adjust_period(event, freq * interrupts); | 1398 | perf_adjust_period(event, freq * interrupts); |
| 1399 | 1399 | ||
| 1400 | /* | 1400 | /* |
| 1401 | * In order to avoid being stalled by an (accidental) huge | 1401 | * In order to avoid being stalled by an (accidental) huge |
| 1402 | * sample period, force reset the sample period if we didn't | 1402 | * sample period, force reset the sample period if we didn't |
| 1403 | * get any events in this freq period. | 1403 | * get any events in this freq period. |
| 1404 | */ | 1404 | */ |
| 1405 | if (!interrupts) { | 1405 | if (!interrupts) { |
| 1406 | perf_disable(); | 1406 | perf_disable(); |
| 1407 | event->pmu->disable(event); | 1407 | event->pmu->disable(event); |
| 1408 | atomic64_set(&hwc->period_left, 0); | 1408 | atomic64_set(&hwc->period_left, 0); |
| 1409 | event->pmu->enable(event); | 1409 | event->pmu->enable(event); |
| 1410 | perf_enable(); | 1410 | perf_enable(); |
| 1411 | } | 1411 | } |
| 1412 | } | 1412 | } |
| 1413 | spin_unlock(&ctx->lock); | 1413 | spin_unlock(&ctx->lock); |
| 1414 | } | 1414 | } |
| 1415 | 1415 | ||
| 1416 | /* | 1416 | /* |
| 1417 | * Round-robin a context's events: | 1417 | * Round-robin a context's events: |
| 1418 | */ | 1418 | */ |
| 1419 | static void rotate_ctx(struct perf_event_context *ctx) | 1419 | static void rotate_ctx(struct perf_event_context *ctx) |
| 1420 | { | 1420 | { |
| 1421 | struct perf_event *event; | 1421 | struct perf_event *event; |
| 1422 | 1422 | ||
| 1423 | if (!ctx->nr_events) | 1423 | if (!ctx->nr_events) |
| 1424 | return; | 1424 | return; |
| 1425 | 1425 | ||
| 1426 | spin_lock(&ctx->lock); | 1426 | spin_lock(&ctx->lock); |
| 1427 | /* | 1427 | /* |
| 1428 | * Rotate the first entry last (works just fine for group events too): | 1428 | * Rotate the first entry last (works just fine for group events too): |
| 1429 | */ | 1429 | */ |
| 1430 | perf_disable(); | 1430 | perf_disable(); |
| 1431 | list_for_each_entry(event, &ctx->group_list, group_entry) { | 1431 | list_for_each_entry(event, &ctx->group_list, group_entry) { |
| 1432 | list_move_tail(&event->group_entry, &ctx->group_list); | 1432 | list_move_tail(&event->group_entry, &ctx->group_list); |
| 1433 | break; | 1433 | break; |
| 1434 | } | 1434 | } |
| 1435 | perf_enable(); | 1435 | perf_enable(); |
| 1436 | 1436 | ||
| 1437 | spin_unlock(&ctx->lock); | 1437 | spin_unlock(&ctx->lock); |
| 1438 | } | 1438 | } |
| 1439 | 1439 | ||
| 1440 | void perf_event_task_tick(struct task_struct *curr, int cpu) | 1440 | void perf_event_task_tick(struct task_struct *curr, int cpu) |
| 1441 | { | 1441 | { |
| 1442 | struct perf_cpu_context *cpuctx; | 1442 | struct perf_cpu_context *cpuctx; |
| 1443 | struct perf_event_context *ctx; | 1443 | struct perf_event_context *ctx; |
| 1444 | 1444 | ||
| 1445 | if (!atomic_read(&nr_events)) | 1445 | if (!atomic_read(&nr_events)) |
| 1446 | return; | 1446 | return; |
| 1447 | 1447 | ||
| 1448 | cpuctx = &per_cpu(perf_cpu_context, cpu); | 1448 | cpuctx = &per_cpu(perf_cpu_context, cpu); |
| 1449 | ctx = curr->perf_event_ctxp; | 1449 | ctx = curr->perf_event_ctxp; |
| 1450 | 1450 | ||
| 1451 | perf_ctx_adjust_freq(&cpuctx->ctx); | 1451 | perf_ctx_adjust_freq(&cpuctx->ctx); |
| 1452 | if (ctx) | 1452 | if (ctx) |
| 1453 | perf_ctx_adjust_freq(ctx); | 1453 | perf_ctx_adjust_freq(ctx); |
| 1454 | 1454 | ||
| 1455 | perf_event_cpu_sched_out(cpuctx); | 1455 | perf_event_cpu_sched_out(cpuctx); |
| 1456 | if (ctx) | 1456 | if (ctx) |
| 1457 | __perf_event_task_sched_out(ctx); | 1457 | __perf_event_task_sched_out(ctx); |
| 1458 | 1458 | ||
| 1459 | rotate_ctx(&cpuctx->ctx); | 1459 | rotate_ctx(&cpuctx->ctx); |
| 1460 | if (ctx) | 1460 | if (ctx) |
| 1461 | rotate_ctx(ctx); | 1461 | rotate_ctx(ctx); |
| 1462 | 1462 | ||
| 1463 | perf_event_cpu_sched_in(cpuctx, cpu); | 1463 | perf_event_cpu_sched_in(cpuctx, cpu); |
| 1464 | if (ctx) | 1464 | if (ctx) |
| 1465 | perf_event_task_sched_in(curr, cpu); | 1465 | perf_event_task_sched_in(curr, cpu); |
| 1466 | } | 1466 | } |
| 1467 | 1467 | ||
| 1468 | /* | 1468 | /* |
| 1469 | * Enable all of a task's events that have been marked enable-on-exec. | 1469 | * Enable all of a task's events that have been marked enable-on-exec. |
| 1470 | * This expects task == current. | 1470 | * This expects task == current. |
| 1471 | */ | 1471 | */ |
| 1472 | static void perf_event_enable_on_exec(struct task_struct *task) | 1472 | static void perf_event_enable_on_exec(struct task_struct *task) |
| 1473 | { | 1473 | { |
| 1474 | struct perf_event_context *ctx; | 1474 | struct perf_event_context *ctx; |
| 1475 | struct perf_event *event; | 1475 | struct perf_event *event; |
| 1476 | unsigned long flags; | 1476 | unsigned long flags; |
| 1477 | int enabled = 0; | 1477 | int enabled = 0; |
| 1478 | 1478 | ||
| 1479 | local_irq_save(flags); | 1479 | local_irq_save(flags); |
| 1480 | ctx = task->perf_event_ctxp; | 1480 | ctx = task->perf_event_ctxp; |
| 1481 | if (!ctx || !ctx->nr_events) | 1481 | if (!ctx || !ctx->nr_events) |
| 1482 | goto out; | 1482 | goto out; |
| 1483 | 1483 | ||
| 1484 | __perf_event_task_sched_out(ctx); | 1484 | __perf_event_task_sched_out(ctx); |
| 1485 | 1485 | ||
| 1486 | spin_lock(&ctx->lock); | 1486 | spin_lock(&ctx->lock); |
| 1487 | 1487 | ||
| 1488 | list_for_each_entry(event, &ctx->group_list, group_entry) { | 1488 | list_for_each_entry(event, &ctx->group_list, group_entry) { |
| 1489 | if (!event->attr.enable_on_exec) | 1489 | if (!event->attr.enable_on_exec) |
| 1490 | continue; | 1490 | continue; |
| 1491 | event->attr.enable_on_exec = 0; | 1491 | event->attr.enable_on_exec = 0; |
| 1492 | if (event->state >= PERF_EVENT_STATE_INACTIVE) | 1492 | if (event->state >= PERF_EVENT_STATE_INACTIVE) |
| 1493 | continue; | 1493 | continue; |
| 1494 | __perf_event_mark_enabled(event, ctx); | 1494 | __perf_event_mark_enabled(event, ctx); |
| 1495 | enabled = 1; | 1495 | enabled = 1; |
| 1496 | } | 1496 | } |
| 1497 | 1497 | ||
| 1498 | /* | 1498 | /* |
| 1499 | * Unclone this context if we enabled any event. | 1499 | * Unclone this context if we enabled any event. |
| 1500 | */ | 1500 | */ |
| 1501 | if (enabled) | 1501 | if (enabled) |
| 1502 | unclone_ctx(ctx); | 1502 | unclone_ctx(ctx); |
| 1503 | 1503 | ||
| 1504 | spin_unlock(&ctx->lock); | 1504 | spin_unlock(&ctx->lock); |
| 1505 | 1505 | ||
| 1506 | perf_event_task_sched_in(task, smp_processor_id()); | 1506 | perf_event_task_sched_in(task, smp_processor_id()); |
| 1507 | out: | 1507 | out: |
| 1508 | local_irq_restore(flags); | 1508 | local_irq_restore(flags); |
| 1509 | } | 1509 | } |
| 1510 | 1510 | ||
| 1511 | /* | 1511 | /* |
| 1512 | * Cross CPU call to read the hardware event | 1512 | * Cross CPU call to read the hardware event |
| 1513 | */ | 1513 | */ |
| 1514 | static void __perf_event_read(void *info) | 1514 | static void __perf_event_read(void *info) |
| 1515 | { | 1515 | { |
| 1516 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 1516 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); |
| 1517 | struct perf_event *event = info; | 1517 | struct perf_event *event = info; |
| 1518 | struct perf_event_context *ctx = event->ctx; | 1518 | struct perf_event_context *ctx = event->ctx; |
| 1519 | unsigned long flags; | 1519 | unsigned long flags; |
| 1520 | 1520 | ||
| 1521 | /* | 1521 | /* |
| 1522 | * If this is a task context, we need to check whether it is | 1522 | * If this is a task context, we need to check whether it is |
| 1523 | * the current task context of this cpu. If not it has been | 1523 | * the current task context of this cpu. If not it has been |
| 1524 | * scheduled out before the smp call arrived. In that case | 1524 | * scheduled out before the smp call arrived. In that case |
| 1525 | * event->count would have been updated to a recent sample | 1525 | * event->count would have been updated to a recent sample |
| 1526 | * when the event was scheduled out. | 1526 | * when the event was scheduled out. |
| 1527 | */ | 1527 | */ |
| 1528 | if (ctx->task && cpuctx->task_ctx != ctx) | 1528 | if (ctx->task && cpuctx->task_ctx != ctx) |
| 1529 | return; | 1529 | return; |
| 1530 | 1530 | ||
| 1531 | local_irq_save(flags); | 1531 | local_irq_save(flags); |
| 1532 | if (ctx->is_active) | 1532 | if (ctx->is_active) |
| 1533 | update_context_time(ctx); | 1533 | update_context_time(ctx); |
| 1534 | event->pmu->read(event); | 1534 | event->pmu->read(event); |
| 1535 | update_event_times(event); | 1535 | update_event_times(event); |
| 1536 | local_irq_restore(flags); | 1536 | local_irq_restore(flags); |
| 1537 | } | 1537 | } |
| 1538 | 1538 | ||
| 1539 | static u64 perf_event_read(struct perf_event *event) | 1539 | static u64 perf_event_read(struct perf_event *event) |
| 1540 | { | 1540 | { |
| 1541 | /* | 1541 | /* |
| 1542 | * If event is enabled and currently active on a CPU, update the | 1542 | * If event is enabled and currently active on a CPU, update the |
| 1543 | * value in the event structure: | 1543 | * value in the event structure: |
| 1544 | */ | 1544 | */ |
| 1545 | if (event->state == PERF_EVENT_STATE_ACTIVE) { | 1545 | if (event->state == PERF_EVENT_STATE_ACTIVE) { |
| 1546 | smp_call_function_single(event->oncpu, | 1546 | smp_call_function_single(event->oncpu, |
| 1547 | __perf_event_read, event, 1); | 1547 | __perf_event_read, event, 1); |
| 1548 | } else if (event->state == PERF_EVENT_STATE_INACTIVE) { | 1548 | } else if (event->state == PERF_EVENT_STATE_INACTIVE) { |
| 1549 | update_event_times(event); | 1549 | update_event_times(event); |
| 1550 | } | 1550 | } |
| 1551 | 1551 | ||
| 1552 | return atomic64_read(&event->count); | 1552 | return atomic64_read(&event->count); |
| 1553 | } | 1553 | } |
| 1554 | 1554 | ||
| 1555 | /* | 1555 | /* |
| 1556 | * Initialize the perf_event context in a task_struct: | 1556 | * Initialize the perf_event context in a task_struct: |
| 1557 | */ | 1557 | */ |
| 1558 | static void | 1558 | static void |
| 1559 | __perf_event_init_context(struct perf_event_context *ctx, | 1559 | __perf_event_init_context(struct perf_event_context *ctx, |
| 1560 | struct task_struct *task) | 1560 | struct task_struct *task) |
| 1561 | { | 1561 | { |
| 1562 | memset(ctx, 0, sizeof(*ctx)); | 1562 | memset(ctx, 0, sizeof(*ctx)); |
| 1563 | spin_lock_init(&ctx->lock); | 1563 | spin_lock_init(&ctx->lock); |
| 1564 | mutex_init(&ctx->mutex); | 1564 | mutex_init(&ctx->mutex); |
| 1565 | INIT_LIST_HEAD(&ctx->group_list); | 1565 | INIT_LIST_HEAD(&ctx->group_list); |
| 1566 | INIT_LIST_HEAD(&ctx->event_list); | 1566 | INIT_LIST_HEAD(&ctx->event_list); |
| 1567 | atomic_set(&ctx->refcount, 1); | 1567 | atomic_set(&ctx->refcount, 1); |
| 1568 | ctx->task = task; | 1568 | ctx->task = task; |
| 1569 | } | 1569 | } |
| 1570 | 1570 | ||
| 1571 | static struct perf_event_context *find_get_context(pid_t pid, int cpu) | 1571 | static struct perf_event_context *find_get_context(pid_t pid, int cpu) |
| 1572 | { | 1572 | { |
| 1573 | struct perf_event_context *ctx; | 1573 | struct perf_event_context *ctx; |
| 1574 | struct perf_cpu_context *cpuctx; | 1574 | struct perf_cpu_context *cpuctx; |
| 1575 | struct task_struct *task; | 1575 | struct task_struct *task; |
| 1576 | unsigned long flags; | 1576 | unsigned long flags; |
| 1577 | int err; | 1577 | int err; |
| 1578 | 1578 | ||
| 1579 | /* | 1579 | /* |
| 1580 | * If cpu is not a wildcard then this is a percpu event: | 1580 | * If cpu is not a wildcard then this is a percpu event: |
| 1581 | */ | 1581 | */ |
| 1582 | if (cpu != -1) { | 1582 | if (cpu != -1) { |
| 1583 | /* Must be root to operate on a CPU event: */ | 1583 | /* Must be root to operate on a CPU event: */ |
| 1584 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | 1584 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) |
| 1585 | return ERR_PTR(-EACCES); | 1585 | return ERR_PTR(-EACCES); |
| 1586 | 1586 | ||
| 1587 | if (cpu < 0 || cpu > num_possible_cpus()) | 1587 | if (cpu < 0 || cpu > num_possible_cpus()) |
| 1588 | return ERR_PTR(-EINVAL); | 1588 | return ERR_PTR(-EINVAL); |
| 1589 | 1589 | ||
| 1590 | /* | 1590 | /* |
| 1591 | * We could be clever and allow to attach a event to an | 1591 | * We could be clever and allow to attach a event to an |
| 1592 | * offline CPU and activate it when the CPU comes up, but | 1592 | * offline CPU and activate it when the CPU comes up, but |
| 1593 | * that's for later. | 1593 | * that's for later. |
| 1594 | */ | 1594 | */ |
| 1595 | if (!cpu_isset(cpu, cpu_online_map)) | 1595 | if (!cpu_isset(cpu, cpu_online_map)) |
| 1596 | return ERR_PTR(-ENODEV); | 1596 | return ERR_PTR(-ENODEV); |
| 1597 | 1597 | ||
| 1598 | cpuctx = &per_cpu(perf_cpu_context, cpu); | 1598 | cpuctx = &per_cpu(perf_cpu_context, cpu); |
| 1599 | ctx = &cpuctx->ctx; | 1599 | ctx = &cpuctx->ctx; |
| 1600 | get_ctx(ctx); | 1600 | get_ctx(ctx); |
| 1601 | 1601 | ||
| 1602 | return ctx; | 1602 | return ctx; |
| 1603 | } | 1603 | } |
| 1604 | 1604 | ||
| 1605 | rcu_read_lock(); | 1605 | rcu_read_lock(); |
| 1606 | if (!pid) | 1606 | if (!pid) |
| 1607 | task = current; | 1607 | task = current; |
| 1608 | else | 1608 | else |
| 1609 | task = find_task_by_vpid(pid); | 1609 | task = find_task_by_vpid(pid); |
| 1610 | if (task) | 1610 | if (task) |
| 1611 | get_task_struct(task); | 1611 | get_task_struct(task); |
| 1612 | rcu_read_unlock(); | 1612 | rcu_read_unlock(); |
| 1613 | 1613 | ||
| 1614 | if (!task) | 1614 | if (!task) |
| 1615 | return ERR_PTR(-ESRCH); | 1615 | return ERR_PTR(-ESRCH); |
| 1616 | 1616 | ||
| 1617 | /* | 1617 | /* |
| 1618 | * Can't attach events to a dying task. | 1618 | * Can't attach events to a dying task. |
| 1619 | */ | 1619 | */ |
| 1620 | err = -ESRCH; | 1620 | err = -ESRCH; |
| 1621 | if (task->flags & PF_EXITING) | 1621 | if (task->flags & PF_EXITING) |
| 1622 | goto errout; | 1622 | goto errout; |
| 1623 | 1623 | ||
| 1624 | /* Reuse ptrace permission checks for now. */ | 1624 | /* Reuse ptrace permission checks for now. */ |
| 1625 | err = -EACCES; | 1625 | err = -EACCES; |
| 1626 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) | 1626 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) |
| 1627 | goto errout; | 1627 | goto errout; |
| 1628 | 1628 | ||
| 1629 | retry: | 1629 | retry: |
| 1630 | ctx = perf_lock_task_context(task, &flags); | 1630 | ctx = perf_lock_task_context(task, &flags); |
| 1631 | if (ctx) { | 1631 | if (ctx) { |
| 1632 | unclone_ctx(ctx); | 1632 | unclone_ctx(ctx); |
| 1633 | spin_unlock_irqrestore(&ctx->lock, flags); | 1633 | spin_unlock_irqrestore(&ctx->lock, flags); |
| 1634 | } | 1634 | } |
| 1635 | 1635 | ||
| 1636 | if (!ctx) { | 1636 | if (!ctx) { |
| 1637 | ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL); | 1637 | ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL); |
| 1638 | err = -ENOMEM; | 1638 | err = -ENOMEM; |
| 1639 | if (!ctx) | 1639 | if (!ctx) |
| 1640 | goto errout; | 1640 | goto errout; |
| 1641 | __perf_event_init_context(ctx, task); | 1641 | __perf_event_init_context(ctx, task); |
| 1642 | get_ctx(ctx); | 1642 | get_ctx(ctx); |
| 1643 | if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) { | 1643 | if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) { |
| 1644 | /* | 1644 | /* |
| 1645 | * We raced with some other task; use | 1645 | * We raced with some other task; use |
| 1646 | * the context they set. | 1646 | * the context they set. |
| 1647 | */ | 1647 | */ |
| 1648 | kfree(ctx); | 1648 | kfree(ctx); |
| 1649 | goto retry; | 1649 | goto retry; |
| 1650 | } | 1650 | } |
| 1651 | get_task_struct(task); | 1651 | get_task_struct(task); |
| 1652 | } | 1652 | } |
| 1653 | 1653 | ||
| 1654 | put_task_struct(task); | 1654 | put_task_struct(task); |
| 1655 | return ctx; | 1655 | return ctx; |
| 1656 | 1656 | ||
| 1657 | errout: | 1657 | errout: |
| 1658 | put_task_struct(task); | 1658 | put_task_struct(task); |
| 1659 | return ERR_PTR(err); | 1659 | return ERR_PTR(err); |
| 1660 | } | 1660 | } |
| 1661 | 1661 | ||
| 1662 | static void perf_event_free_filter(struct perf_event *event); | 1662 | static void perf_event_free_filter(struct perf_event *event); |
| 1663 | 1663 | ||
| 1664 | static void free_event_rcu(struct rcu_head *head) | 1664 | static void free_event_rcu(struct rcu_head *head) |
| 1665 | { | 1665 | { |
| 1666 | struct perf_event *event; | 1666 | struct perf_event *event; |
| 1667 | 1667 | ||
| 1668 | event = container_of(head, struct perf_event, rcu_head); | 1668 | event = container_of(head, struct perf_event, rcu_head); |
| 1669 | if (event->ns) | 1669 | if (event->ns) |
| 1670 | put_pid_ns(event->ns); | 1670 | put_pid_ns(event->ns); |
| 1671 | perf_event_free_filter(event); | 1671 | perf_event_free_filter(event); |
| 1672 | kfree(event); | 1672 | kfree(event); |
| 1673 | } | 1673 | } |
| 1674 | 1674 | ||
| 1675 | static void perf_pending_sync(struct perf_event *event); | 1675 | static void perf_pending_sync(struct perf_event *event); |
| 1676 | 1676 | ||
| 1677 | static void free_event(struct perf_event *event) | 1677 | static void free_event(struct perf_event *event) |
| 1678 | { | 1678 | { |
| 1679 | perf_pending_sync(event); | 1679 | perf_pending_sync(event); |
| 1680 | 1680 | ||
| 1681 | if (!event->parent) { | 1681 | if (!event->parent) { |
| 1682 | atomic_dec(&nr_events); | 1682 | atomic_dec(&nr_events); |
| 1683 | if (event->attr.mmap) | 1683 | if (event->attr.mmap) |
| 1684 | atomic_dec(&nr_mmap_events); | 1684 | atomic_dec(&nr_mmap_events); |
| 1685 | if (event->attr.comm) | 1685 | if (event->attr.comm) |
| 1686 | atomic_dec(&nr_comm_events); | 1686 | atomic_dec(&nr_comm_events); |
| 1687 | if (event->attr.task) | 1687 | if (event->attr.task) |
| 1688 | atomic_dec(&nr_task_events); | 1688 | atomic_dec(&nr_task_events); |
| 1689 | } | 1689 | } |
| 1690 | 1690 | ||
| 1691 | if (event->output) { | 1691 | if (event->output) { |
| 1692 | fput(event->output->filp); | 1692 | fput(event->output->filp); |
| 1693 | event->output = NULL; | 1693 | event->output = NULL; |
| 1694 | } | 1694 | } |
| 1695 | 1695 | ||
| 1696 | if (event->destroy) | 1696 | if (event->destroy) |
| 1697 | event->destroy(event); | 1697 | event->destroy(event); |
| 1698 | 1698 | ||
| 1699 | put_ctx(event->ctx); | 1699 | put_ctx(event->ctx); |
| 1700 | call_rcu(&event->rcu_head, free_event_rcu); | 1700 | call_rcu(&event->rcu_head, free_event_rcu); |
| 1701 | } | 1701 | } |
| 1702 | 1702 | ||
| 1703 | /* | 1703 | /* |
| 1704 | * Called when the last reference to the file is gone. | 1704 | * Called when the last reference to the file is gone. |
| 1705 | */ | 1705 | */ |
| 1706 | static int perf_release(struct inode *inode, struct file *file) | 1706 | static int perf_release(struct inode *inode, struct file *file) |
| 1707 | { | 1707 | { |
| 1708 | struct perf_event *event = file->private_data; | 1708 | struct perf_event *event = file->private_data; |
| 1709 | struct perf_event_context *ctx = event->ctx; | 1709 | struct perf_event_context *ctx = event->ctx; |
| 1710 | 1710 | ||
| 1711 | file->private_data = NULL; | 1711 | file->private_data = NULL; |
| 1712 | 1712 | ||
| 1713 | WARN_ON_ONCE(ctx->parent_ctx); | 1713 | WARN_ON_ONCE(ctx->parent_ctx); |
| 1714 | mutex_lock(&ctx->mutex); | 1714 | mutex_lock(&ctx->mutex); |
| 1715 | perf_event_remove_from_context(event); | 1715 | perf_event_remove_from_context(event); |
| 1716 | mutex_unlock(&ctx->mutex); | 1716 | mutex_unlock(&ctx->mutex); |
| 1717 | 1717 | ||
| 1718 | mutex_lock(&event->owner->perf_event_mutex); | 1718 | mutex_lock(&event->owner->perf_event_mutex); |
| 1719 | list_del_init(&event->owner_entry); | 1719 | list_del_init(&event->owner_entry); |
| 1720 | mutex_unlock(&event->owner->perf_event_mutex); | 1720 | mutex_unlock(&event->owner->perf_event_mutex); |
| 1721 | put_task_struct(event->owner); | 1721 | put_task_struct(event->owner); |
| 1722 | 1722 | ||
| 1723 | free_event(event); | 1723 | free_event(event); |
| 1724 | 1724 | ||
| 1725 | return 0; | 1725 | return 0; |
| 1726 | } | 1726 | } |
| 1727 | 1727 | ||
| 1728 | static int perf_event_read_size(struct perf_event *event) | 1728 | static int perf_event_read_size(struct perf_event *event) |
| 1729 | { | 1729 | { |
| 1730 | int entry = sizeof(u64); /* value */ | 1730 | int entry = sizeof(u64); /* value */ |
| 1731 | int size = 0; | 1731 | int size = 0; |
| 1732 | int nr = 1; | 1732 | int nr = 1; |
| 1733 | 1733 | ||
| 1734 | if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | 1734 | if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
| 1735 | size += sizeof(u64); | 1735 | size += sizeof(u64); |
| 1736 | 1736 | ||
| 1737 | if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | 1737 | if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) |
| 1738 | size += sizeof(u64); | 1738 | size += sizeof(u64); |
| 1739 | 1739 | ||
| 1740 | if (event->attr.read_format & PERF_FORMAT_ID) | 1740 | if (event->attr.read_format & PERF_FORMAT_ID) |
| 1741 | entry += sizeof(u64); | 1741 | entry += sizeof(u64); |
| 1742 | 1742 | ||
| 1743 | if (event->attr.read_format & PERF_FORMAT_GROUP) { | 1743 | if (event->attr.read_format & PERF_FORMAT_GROUP) { |
| 1744 | nr += event->group_leader->nr_siblings; | 1744 | nr += event->group_leader->nr_siblings; |
| 1745 | size += sizeof(u64); | 1745 | size += sizeof(u64); |
| 1746 | } | 1746 | } |
| 1747 | 1747 | ||
| 1748 | size += entry * nr; | 1748 | size += entry * nr; |
| 1749 | 1749 | ||
| 1750 | return size; | 1750 | return size; |
| 1751 | } | 1751 | } |
| 1752 | 1752 | ||
| 1753 | static u64 perf_event_read_value(struct perf_event *event) | 1753 | static u64 perf_event_read_value(struct perf_event *event) |
| 1754 | { | 1754 | { |
| 1755 | struct perf_event *child; | 1755 | struct perf_event *child; |
| 1756 | u64 total = 0; | 1756 | u64 total = 0; |
| 1757 | 1757 | ||
| 1758 | total += perf_event_read(event); | 1758 | total += perf_event_read(event); |
| 1759 | list_for_each_entry(child, &event->child_list, child_list) | 1759 | list_for_each_entry(child, &event->child_list, child_list) |
| 1760 | total += perf_event_read(child); | 1760 | total += perf_event_read(child); |
| 1761 | 1761 | ||
| 1762 | return total; | 1762 | return total; |
| 1763 | } | 1763 | } |
| 1764 | 1764 | ||
| 1765 | static int perf_event_read_entry(struct perf_event *event, | 1765 | static int perf_event_read_entry(struct perf_event *event, |
| 1766 | u64 read_format, char __user *buf) | 1766 | u64 read_format, char __user *buf) |
| 1767 | { | 1767 | { |
| 1768 | int n = 0, count = 0; | 1768 | int n = 0, count = 0; |
| 1769 | u64 values[2]; | 1769 | u64 values[2]; |
| 1770 | 1770 | ||
| 1771 | values[n++] = perf_event_read_value(event); | 1771 | values[n++] = perf_event_read_value(event); |
| 1772 | if (read_format & PERF_FORMAT_ID) | 1772 | if (read_format & PERF_FORMAT_ID) |
| 1773 | values[n++] = primary_event_id(event); | 1773 | values[n++] = primary_event_id(event); |
| 1774 | 1774 | ||
| 1775 | count = n * sizeof(u64); | 1775 | count = n * sizeof(u64); |
| 1776 | 1776 | ||
| 1777 | if (copy_to_user(buf, values, count)) | 1777 | if (copy_to_user(buf, values, count)) |
| 1778 | return -EFAULT; | 1778 | return -EFAULT; |
| 1779 | 1779 | ||
| 1780 | return count; | 1780 | return count; |
| 1781 | } | 1781 | } |
| 1782 | 1782 | ||
| 1783 | static int perf_event_read_group(struct perf_event *event, | 1783 | static int perf_event_read_group(struct perf_event *event, |
| 1784 | u64 read_format, char __user *buf) | 1784 | u64 read_format, char __user *buf) |
| 1785 | { | 1785 | { |
| 1786 | struct perf_event *leader = event->group_leader, *sub; | 1786 | struct perf_event *leader = event->group_leader, *sub; |
| 1787 | int n = 0, size = 0, err = -EFAULT; | 1787 | int n = 0, size = 0, err = -EFAULT; |
| 1788 | u64 values[3]; | 1788 | u64 values[3]; |
| 1789 | 1789 | ||
| 1790 | values[n++] = 1 + leader->nr_siblings; | 1790 | values[n++] = 1 + leader->nr_siblings; |
| 1791 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | 1791 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { |
| 1792 | values[n++] = leader->total_time_enabled + | 1792 | values[n++] = leader->total_time_enabled + |
| 1793 | atomic64_read(&leader->child_total_time_enabled); | 1793 | atomic64_read(&leader->child_total_time_enabled); |
| 1794 | } | 1794 | } |
| 1795 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | 1795 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { |
| 1796 | values[n++] = leader->total_time_running + | 1796 | values[n++] = leader->total_time_running + |
| 1797 | atomic64_read(&leader->child_total_time_running); | 1797 | atomic64_read(&leader->child_total_time_running); |
| 1798 | } | 1798 | } |
| 1799 | 1799 | ||
| 1800 | size = n * sizeof(u64); | 1800 | size = n * sizeof(u64); |
| 1801 | 1801 | ||
| 1802 | if (copy_to_user(buf, values, size)) | 1802 | if (copy_to_user(buf, values, size)) |
| 1803 | return -EFAULT; | 1803 | return -EFAULT; |
| 1804 | 1804 | ||
| 1805 | err = perf_event_read_entry(leader, read_format, buf + size); | 1805 | err = perf_event_read_entry(leader, read_format, buf + size); |
| 1806 | if (err < 0) | 1806 | if (err < 0) |
| 1807 | return err; | 1807 | return err; |
| 1808 | 1808 | ||
| 1809 | size += err; | 1809 | size += err; |
| 1810 | 1810 | ||
| 1811 | list_for_each_entry(sub, &leader->sibling_list, group_entry) { | 1811 | list_for_each_entry(sub, &leader->sibling_list, group_entry) { |
| 1812 | err = perf_event_read_entry(sub, read_format, | 1812 | err = perf_event_read_entry(sub, read_format, |
| 1813 | buf + size); | 1813 | buf + size); |
| 1814 | if (err < 0) | 1814 | if (err < 0) |
| 1815 | return err; | 1815 | return err; |
| 1816 | 1816 | ||
| 1817 | size += err; | 1817 | size += err; |
| 1818 | } | 1818 | } |
| 1819 | 1819 | ||
| 1820 | return size; | 1820 | return size; |
| 1821 | } | 1821 | } |
| 1822 | 1822 | ||
| 1823 | static int perf_event_read_one(struct perf_event *event, | 1823 | static int perf_event_read_one(struct perf_event *event, |
| 1824 | u64 read_format, char __user *buf) | 1824 | u64 read_format, char __user *buf) |
| 1825 | { | 1825 | { |
| 1826 | u64 values[4]; | 1826 | u64 values[4]; |
| 1827 | int n = 0; | 1827 | int n = 0; |
| 1828 | 1828 | ||
| 1829 | values[n++] = perf_event_read_value(event); | 1829 | values[n++] = perf_event_read_value(event); |
| 1830 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | 1830 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { |
| 1831 | values[n++] = event->total_time_enabled + | 1831 | values[n++] = event->total_time_enabled + |
| 1832 | atomic64_read(&event->child_total_time_enabled); | 1832 | atomic64_read(&event->child_total_time_enabled); |
| 1833 | } | 1833 | } |
| 1834 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | 1834 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { |
| 1835 | values[n++] = event->total_time_running + | 1835 | values[n++] = event->total_time_running + |
| 1836 | atomic64_read(&event->child_total_time_running); | 1836 | atomic64_read(&event->child_total_time_running); |
| 1837 | } | 1837 | } |
| 1838 | if (read_format & PERF_FORMAT_ID) | 1838 | if (read_format & PERF_FORMAT_ID) |
| 1839 | values[n++] = primary_event_id(event); | 1839 | values[n++] = primary_event_id(event); |
| 1840 | 1840 | ||
| 1841 | if (copy_to_user(buf, values, n * sizeof(u64))) | 1841 | if (copy_to_user(buf, values, n * sizeof(u64))) |
| 1842 | return -EFAULT; | 1842 | return -EFAULT; |
| 1843 | 1843 | ||
| 1844 | return n * sizeof(u64); | 1844 | return n * sizeof(u64); |
| 1845 | } | 1845 | } |
| 1846 | 1846 | ||
| 1847 | /* | 1847 | /* |
| 1848 | * Read the performance event - simple non blocking version for now | 1848 | * Read the performance event - simple non blocking version for now |
| 1849 | */ | 1849 | */ |
| 1850 | static ssize_t | 1850 | static ssize_t |
| 1851 | perf_read_hw(struct perf_event *event, char __user *buf, size_t count) | 1851 | perf_read_hw(struct perf_event *event, char __user *buf, size_t count) |
| 1852 | { | 1852 | { |
| 1853 | u64 read_format = event->attr.read_format; | 1853 | u64 read_format = event->attr.read_format; |
| 1854 | int ret; | 1854 | int ret; |
| 1855 | 1855 | ||
| 1856 | /* | 1856 | /* |
| 1857 | * Return end-of-file for a read on a event that is in | 1857 | * Return end-of-file for a read on a event that is in |
| 1858 | * error state (i.e. because it was pinned but it couldn't be | 1858 | * error state (i.e. because it was pinned but it couldn't be |
| 1859 | * scheduled on to the CPU at some point). | 1859 | * scheduled on to the CPU at some point). |
| 1860 | */ | 1860 | */ |
| 1861 | if (event->state == PERF_EVENT_STATE_ERROR) | 1861 | if (event->state == PERF_EVENT_STATE_ERROR) |
| 1862 | return 0; | 1862 | return 0; |
| 1863 | 1863 | ||
| 1864 | if (count < perf_event_read_size(event)) | 1864 | if (count < perf_event_read_size(event)) |
| 1865 | return -ENOSPC; | 1865 | return -ENOSPC; |
| 1866 | 1866 | ||
| 1867 | WARN_ON_ONCE(event->ctx->parent_ctx); | 1867 | WARN_ON_ONCE(event->ctx->parent_ctx); |
| 1868 | mutex_lock(&event->child_mutex); | 1868 | mutex_lock(&event->child_mutex); |
| 1869 | if (read_format & PERF_FORMAT_GROUP) | 1869 | if (read_format & PERF_FORMAT_GROUP) |
| 1870 | ret = perf_event_read_group(event, read_format, buf); | 1870 | ret = perf_event_read_group(event, read_format, buf); |
| 1871 | else | 1871 | else |
| 1872 | ret = perf_event_read_one(event, read_format, buf); | 1872 | ret = perf_event_read_one(event, read_format, buf); |
| 1873 | mutex_unlock(&event->child_mutex); | 1873 | mutex_unlock(&event->child_mutex); |
| 1874 | 1874 | ||
| 1875 | return ret; | 1875 | return ret; |
| 1876 | } | 1876 | } |
| 1877 | 1877 | ||
| 1878 | static ssize_t | 1878 | static ssize_t |
| 1879 | perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | 1879 | perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
| 1880 | { | 1880 | { |
| 1881 | struct perf_event *event = file->private_data; | 1881 | struct perf_event *event = file->private_data; |
| 1882 | 1882 | ||
| 1883 | return perf_read_hw(event, buf, count); | 1883 | return perf_read_hw(event, buf, count); |
| 1884 | } | 1884 | } |
| 1885 | 1885 | ||
| 1886 | static unsigned int perf_poll(struct file *file, poll_table *wait) | 1886 | static unsigned int perf_poll(struct file *file, poll_table *wait) |
| 1887 | { | 1887 | { |
| 1888 | struct perf_event *event = file->private_data; | 1888 | struct perf_event *event = file->private_data; |
| 1889 | struct perf_mmap_data *data; | 1889 | struct perf_mmap_data *data; |
| 1890 | unsigned int events = POLL_HUP; | 1890 | unsigned int events = POLL_HUP; |
| 1891 | 1891 | ||
| 1892 | rcu_read_lock(); | 1892 | rcu_read_lock(); |
| 1893 | data = rcu_dereference(event->data); | 1893 | data = rcu_dereference(event->data); |
| 1894 | if (data) | 1894 | if (data) |
| 1895 | events = atomic_xchg(&data->poll, 0); | 1895 | events = atomic_xchg(&data->poll, 0); |
| 1896 | rcu_read_unlock(); | 1896 | rcu_read_unlock(); |
| 1897 | 1897 | ||
| 1898 | poll_wait(file, &event->waitq, wait); | 1898 | poll_wait(file, &event->waitq, wait); |
| 1899 | 1899 | ||
| 1900 | return events; | 1900 | return events; |
| 1901 | } | 1901 | } |
| 1902 | 1902 | ||
| 1903 | static void perf_event_reset(struct perf_event *event) | 1903 | static void perf_event_reset(struct perf_event *event) |
| 1904 | { | 1904 | { |
| 1905 | (void)perf_event_read(event); | 1905 | (void)perf_event_read(event); |
| 1906 | atomic64_set(&event->count, 0); | 1906 | atomic64_set(&event->count, 0); |
| 1907 | perf_event_update_userpage(event); | 1907 | perf_event_update_userpage(event); |
| 1908 | } | 1908 | } |
| 1909 | 1909 | ||
| 1910 | /* | 1910 | /* |
| 1911 | * Holding the top-level event's child_mutex means that any | 1911 | * Holding the top-level event's child_mutex means that any |
| 1912 | * descendant process that has inherited this event will block | 1912 | * descendant process that has inherited this event will block |
| 1913 | * in sync_child_event if it goes to exit, thus satisfying the | 1913 | * in sync_child_event if it goes to exit, thus satisfying the |
| 1914 | * task existence requirements of perf_event_enable/disable. | 1914 | * task existence requirements of perf_event_enable/disable. |
| 1915 | */ | 1915 | */ |
| 1916 | static void perf_event_for_each_child(struct perf_event *event, | 1916 | static void perf_event_for_each_child(struct perf_event *event, |
| 1917 | void (*func)(struct perf_event *)) | 1917 | void (*func)(struct perf_event *)) |
| 1918 | { | 1918 | { |
| 1919 | struct perf_event *child; | 1919 | struct perf_event *child; |
| 1920 | 1920 | ||
| 1921 | WARN_ON_ONCE(event->ctx->parent_ctx); | 1921 | WARN_ON_ONCE(event->ctx->parent_ctx); |
| 1922 | mutex_lock(&event->child_mutex); | 1922 | mutex_lock(&event->child_mutex); |
| 1923 | func(event); | 1923 | func(event); |
| 1924 | list_for_each_entry(child, &event->child_list, child_list) | 1924 | list_for_each_entry(child, &event->child_list, child_list) |
| 1925 | func(child); | 1925 | func(child); |
| 1926 | mutex_unlock(&event->child_mutex); | 1926 | mutex_unlock(&event->child_mutex); |
| 1927 | } | 1927 | } |
| 1928 | 1928 | ||
| 1929 | static void perf_event_for_each(struct perf_event *event, | 1929 | static void perf_event_for_each(struct perf_event *event, |
| 1930 | void (*func)(struct perf_event *)) | 1930 | void (*func)(struct perf_event *)) |
| 1931 | { | 1931 | { |
| 1932 | struct perf_event_context *ctx = event->ctx; | 1932 | struct perf_event_context *ctx = event->ctx; |
| 1933 | struct perf_event *sibling; | 1933 | struct perf_event *sibling; |
| 1934 | 1934 | ||
| 1935 | WARN_ON_ONCE(ctx->parent_ctx); | 1935 | WARN_ON_ONCE(ctx->parent_ctx); |
| 1936 | mutex_lock(&ctx->mutex); | 1936 | mutex_lock(&ctx->mutex); |
| 1937 | event = event->group_leader; | 1937 | event = event->group_leader; |
| 1938 | 1938 | ||
| 1939 | perf_event_for_each_child(event, func); | 1939 | perf_event_for_each_child(event, func); |
| 1940 | func(event); | 1940 | func(event); |
| 1941 | list_for_each_entry(sibling, &event->sibling_list, group_entry) | 1941 | list_for_each_entry(sibling, &event->sibling_list, group_entry) |
| 1942 | perf_event_for_each_child(event, func); | 1942 | perf_event_for_each_child(event, func); |
| 1943 | mutex_unlock(&ctx->mutex); | 1943 | mutex_unlock(&ctx->mutex); |
| 1944 | } | 1944 | } |
| 1945 | 1945 | ||
| 1946 | static int perf_event_period(struct perf_event *event, u64 __user *arg) | 1946 | static int perf_event_period(struct perf_event *event, u64 __user *arg) |
| 1947 | { | 1947 | { |
| 1948 | struct perf_event_context *ctx = event->ctx; | 1948 | struct perf_event_context *ctx = event->ctx; |
| 1949 | unsigned long size; | 1949 | unsigned long size; |
| 1950 | int ret = 0; | 1950 | int ret = 0; |
| 1951 | u64 value; | 1951 | u64 value; |
| 1952 | 1952 | ||
| 1953 | if (!event->attr.sample_period) | 1953 | if (!event->attr.sample_period) |
| 1954 | return -EINVAL; | 1954 | return -EINVAL; |
| 1955 | 1955 | ||
| 1956 | size = copy_from_user(&value, arg, sizeof(value)); | 1956 | size = copy_from_user(&value, arg, sizeof(value)); |
| 1957 | if (size != sizeof(value)) | 1957 | if (size != sizeof(value)) |
| 1958 | return -EFAULT; | 1958 | return -EFAULT; |
| 1959 | 1959 | ||
| 1960 | if (!value) | 1960 | if (!value) |
| 1961 | return -EINVAL; | 1961 | return -EINVAL; |
| 1962 | 1962 | ||
| 1963 | spin_lock_irq(&ctx->lock); | 1963 | spin_lock_irq(&ctx->lock); |
| 1964 | if (event->attr.freq) { | 1964 | if (event->attr.freq) { |
| 1965 | if (value > sysctl_perf_event_sample_rate) { | 1965 | if (value > sysctl_perf_event_sample_rate) { |
| 1966 | ret = -EINVAL; | 1966 | ret = -EINVAL; |
| 1967 | goto unlock; | 1967 | goto unlock; |
| 1968 | } | 1968 | } |
| 1969 | 1969 | ||
| 1970 | event->attr.sample_freq = value; | 1970 | event->attr.sample_freq = value; |
| 1971 | } else { | 1971 | } else { |
| 1972 | event->attr.sample_period = value; | 1972 | event->attr.sample_period = value; |
| 1973 | event->hw.sample_period = value; | 1973 | event->hw.sample_period = value; |
| 1974 | } | 1974 | } |
| 1975 | unlock: | 1975 | unlock: |
| 1976 | spin_unlock_irq(&ctx->lock); | 1976 | spin_unlock_irq(&ctx->lock); |
| 1977 | 1977 | ||
| 1978 | return ret; | 1978 | return ret; |
| 1979 | } | 1979 | } |
| 1980 | 1980 | ||
| 1981 | static int perf_event_set_output(struct perf_event *event, int output_fd); | 1981 | static int perf_event_set_output(struct perf_event *event, int output_fd); |
| 1982 | static int perf_event_set_filter(struct perf_event *event, void __user *arg); | 1982 | static int perf_event_set_filter(struct perf_event *event, void __user *arg); |
| 1983 | 1983 | ||
| 1984 | static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 1984 | static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
| 1985 | { | 1985 | { |
| 1986 | struct perf_event *event = file->private_data; | 1986 | struct perf_event *event = file->private_data; |
| 1987 | void (*func)(struct perf_event *); | 1987 | void (*func)(struct perf_event *); |
| 1988 | u32 flags = arg; | 1988 | u32 flags = arg; |
| 1989 | 1989 | ||
| 1990 | switch (cmd) { | 1990 | switch (cmd) { |
| 1991 | case PERF_EVENT_IOC_ENABLE: | 1991 | case PERF_EVENT_IOC_ENABLE: |
| 1992 | func = perf_event_enable; | 1992 | func = perf_event_enable; |
| 1993 | break; | 1993 | break; |
| 1994 | case PERF_EVENT_IOC_DISABLE: | 1994 | case PERF_EVENT_IOC_DISABLE: |
| 1995 | func = perf_event_disable; | 1995 | func = perf_event_disable; |
| 1996 | break; | 1996 | break; |
| 1997 | case PERF_EVENT_IOC_RESET: | 1997 | case PERF_EVENT_IOC_RESET: |
| 1998 | func = perf_event_reset; | 1998 | func = perf_event_reset; |
| 1999 | break; | 1999 | break; |
| 2000 | 2000 | ||
| 2001 | case PERF_EVENT_IOC_REFRESH: | 2001 | case PERF_EVENT_IOC_REFRESH: |
| 2002 | return perf_event_refresh(event, arg); | 2002 | return perf_event_refresh(event, arg); |
| 2003 | 2003 | ||
| 2004 | case PERF_EVENT_IOC_PERIOD: | 2004 | case PERF_EVENT_IOC_PERIOD: |
| 2005 | return perf_event_period(event, (u64 __user *)arg); | 2005 | return perf_event_period(event, (u64 __user *)arg); |
| 2006 | 2006 | ||
| 2007 | case PERF_EVENT_IOC_SET_OUTPUT: | 2007 | case PERF_EVENT_IOC_SET_OUTPUT: |
| 2008 | return perf_event_set_output(event, arg); | 2008 | return perf_event_set_output(event, arg); |
| 2009 | 2009 | ||
| 2010 | case PERF_EVENT_IOC_SET_FILTER: | 2010 | case PERF_EVENT_IOC_SET_FILTER: |
| 2011 | return perf_event_set_filter(event, (void __user *)arg); | 2011 | return perf_event_set_filter(event, (void __user *)arg); |
| 2012 | 2012 | ||
| 2013 | default: | 2013 | default: |
| 2014 | return -ENOTTY; | 2014 | return -ENOTTY; |
| 2015 | } | 2015 | } |
| 2016 | 2016 | ||
| 2017 | if (flags & PERF_IOC_FLAG_GROUP) | 2017 | if (flags & PERF_IOC_FLAG_GROUP) |
| 2018 | perf_event_for_each(event, func); | 2018 | perf_event_for_each(event, func); |
| 2019 | else | 2019 | else |
| 2020 | perf_event_for_each_child(event, func); | 2020 | perf_event_for_each_child(event, func); |
| 2021 | 2021 | ||
| 2022 | return 0; | 2022 | return 0; |
| 2023 | } | 2023 | } |
| 2024 | 2024 | ||
| 2025 | int perf_event_task_enable(void) | 2025 | int perf_event_task_enable(void) |
| 2026 | { | 2026 | { |
| 2027 | struct perf_event *event; | 2027 | struct perf_event *event; |
| 2028 | 2028 | ||
| 2029 | mutex_lock(¤t->perf_event_mutex); | 2029 | mutex_lock(¤t->perf_event_mutex); |
| 2030 | list_for_each_entry(event, ¤t->perf_event_list, owner_entry) | 2030 | list_for_each_entry(event, ¤t->perf_event_list, owner_entry) |
| 2031 | perf_event_for_each_child(event, perf_event_enable); | 2031 | perf_event_for_each_child(event, perf_event_enable); |
| 2032 | mutex_unlock(¤t->perf_event_mutex); | 2032 | mutex_unlock(¤t->perf_event_mutex); |
| 2033 | 2033 | ||
| 2034 | return 0; | 2034 | return 0; |
| 2035 | } | 2035 | } |
| 2036 | 2036 | ||
| 2037 | int perf_event_task_disable(void) | 2037 | int perf_event_task_disable(void) |
| 2038 | { | 2038 | { |
| 2039 | struct perf_event *event; | 2039 | struct perf_event *event; |
| 2040 | 2040 | ||
| 2041 | mutex_lock(¤t->perf_event_mutex); | 2041 | mutex_lock(¤t->perf_event_mutex); |
| 2042 | list_for_each_entry(event, ¤t->perf_event_list, owner_entry) | 2042 | list_for_each_entry(event, ¤t->perf_event_list, owner_entry) |
| 2043 | perf_event_for_each_child(event, perf_event_disable); | 2043 | perf_event_for_each_child(event, perf_event_disable); |
| 2044 | mutex_unlock(¤t->perf_event_mutex); | 2044 | mutex_unlock(¤t->perf_event_mutex); |
| 2045 | 2045 | ||
| 2046 | return 0; | 2046 | return 0; |
| 2047 | } | 2047 | } |
| 2048 | 2048 | ||
| 2049 | #ifndef PERF_EVENT_INDEX_OFFSET | 2049 | #ifndef PERF_EVENT_INDEX_OFFSET |
| 2050 | # define PERF_EVENT_INDEX_OFFSET 0 | 2050 | # define PERF_EVENT_INDEX_OFFSET 0 |
| 2051 | #endif | 2051 | #endif |
| 2052 | 2052 | ||
| 2053 | static int perf_event_index(struct perf_event *event) | 2053 | static int perf_event_index(struct perf_event *event) |
| 2054 | { | 2054 | { |
| 2055 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 2055 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| 2056 | return 0; | 2056 | return 0; |
| 2057 | 2057 | ||
| 2058 | return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET; | 2058 | return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET; |
| 2059 | } | 2059 | } |
| 2060 | 2060 | ||
| 2061 | /* | 2061 | /* |
| 2062 | * Callers need to ensure there can be no nesting of this function, otherwise | 2062 | * Callers need to ensure there can be no nesting of this function, otherwise |
| 2063 | * the seqlock logic goes bad. We can not serialize this because the arch | 2063 | * the seqlock logic goes bad. We can not serialize this because the arch |
| 2064 | * code calls this from NMI context. | 2064 | * code calls this from NMI context. |
| 2065 | */ | 2065 | */ |
| 2066 | void perf_event_update_userpage(struct perf_event *event) | 2066 | void perf_event_update_userpage(struct perf_event *event) |
| 2067 | { | 2067 | { |
| 2068 | struct perf_event_mmap_page *userpg; | 2068 | struct perf_event_mmap_page *userpg; |
| 2069 | struct perf_mmap_data *data; | 2069 | struct perf_mmap_data *data; |
| 2070 | 2070 | ||
| 2071 | rcu_read_lock(); | 2071 | rcu_read_lock(); |
| 2072 | data = rcu_dereference(event->data); | 2072 | data = rcu_dereference(event->data); |
| 2073 | if (!data) | 2073 | if (!data) |
| 2074 | goto unlock; | 2074 | goto unlock; |
| 2075 | 2075 | ||
| 2076 | userpg = data->user_page; | 2076 | userpg = data->user_page; |
| 2077 | 2077 | ||
| 2078 | /* | 2078 | /* |
| 2079 | * Disable preemption so as to not let the corresponding user-space | 2079 | * Disable preemption so as to not let the corresponding user-space |
| 2080 | * spin too long if we get preempted. | 2080 | * spin too long if we get preempted. |
| 2081 | */ | 2081 | */ |
| 2082 | preempt_disable(); | 2082 | preempt_disable(); |
| 2083 | ++userpg->lock; | 2083 | ++userpg->lock; |
| 2084 | barrier(); | 2084 | barrier(); |
| 2085 | userpg->index = perf_event_index(event); | 2085 | userpg->index = perf_event_index(event); |
| 2086 | userpg->offset = atomic64_read(&event->count); | 2086 | userpg->offset = atomic64_read(&event->count); |
| 2087 | if (event->state == PERF_EVENT_STATE_ACTIVE) | 2087 | if (event->state == PERF_EVENT_STATE_ACTIVE) |
| 2088 | userpg->offset -= atomic64_read(&event->hw.prev_count); | 2088 | userpg->offset -= atomic64_read(&event->hw.prev_count); |
| 2089 | 2089 | ||
| 2090 | userpg->time_enabled = event->total_time_enabled + | 2090 | userpg->time_enabled = event->total_time_enabled + |
| 2091 | atomic64_read(&event->child_total_time_enabled); | 2091 | atomic64_read(&event->child_total_time_enabled); |
| 2092 | 2092 | ||
| 2093 | userpg->time_running = event->total_time_running + | 2093 | userpg->time_running = event->total_time_running + |
| 2094 | atomic64_read(&event->child_total_time_running); | 2094 | atomic64_read(&event->child_total_time_running); |
| 2095 | 2095 | ||
| 2096 | barrier(); | 2096 | barrier(); |
| 2097 | ++userpg->lock; | 2097 | ++userpg->lock; |
| 2098 | preempt_enable(); | 2098 | preempt_enable(); |
| 2099 | unlock: | 2099 | unlock: |
| 2100 | rcu_read_unlock(); | 2100 | rcu_read_unlock(); |
| 2101 | } | 2101 | } |
| 2102 | 2102 | ||
| 2103 | static unsigned long perf_data_size(struct perf_mmap_data *data) | 2103 | static unsigned long perf_data_size(struct perf_mmap_data *data) |
| 2104 | { | 2104 | { |
| 2105 | return data->nr_pages << (PAGE_SHIFT + data->data_order); | 2105 | return data->nr_pages << (PAGE_SHIFT + data->data_order); |
| 2106 | } | 2106 | } |
| 2107 | 2107 | ||
| 2108 | #ifndef CONFIG_PERF_USE_VMALLOC | 2108 | #ifndef CONFIG_PERF_USE_VMALLOC |
| 2109 | 2109 | ||
| 2110 | /* | 2110 | /* |
| 2111 | * Back perf_mmap() with regular GFP_KERNEL-0 pages. | 2111 | * Back perf_mmap() with regular GFP_KERNEL-0 pages. |
| 2112 | */ | 2112 | */ |
| 2113 | 2113 | ||
| 2114 | static struct page * | 2114 | static struct page * |
| 2115 | perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) | 2115 | perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) |
| 2116 | { | 2116 | { |
| 2117 | if (pgoff > data->nr_pages) | 2117 | if (pgoff > data->nr_pages) |
| 2118 | return NULL; | 2118 | return NULL; |
| 2119 | 2119 | ||
| 2120 | if (pgoff == 0) | 2120 | if (pgoff == 0) |
| 2121 | return virt_to_page(data->user_page); | 2121 | return virt_to_page(data->user_page); |
| 2122 | 2122 | ||
| 2123 | return virt_to_page(data->data_pages[pgoff - 1]); | 2123 | return virt_to_page(data->data_pages[pgoff - 1]); |
| 2124 | } | 2124 | } |
| 2125 | 2125 | ||
| 2126 | static struct perf_mmap_data * | 2126 | static struct perf_mmap_data * |
| 2127 | perf_mmap_data_alloc(struct perf_event *event, int nr_pages) | 2127 | perf_mmap_data_alloc(struct perf_event *event, int nr_pages) |
| 2128 | { | 2128 | { |
| 2129 | struct perf_mmap_data *data; | 2129 | struct perf_mmap_data *data; |
| 2130 | unsigned long size; | 2130 | unsigned long size; |
| 2131 | int i; | 2131 | int i; |
| 2132 | 2132 | ||
| 2133 | WARN_ON(atomic_read(&event->mmap_count)); | 2133 | WARN_ON(atomic_read(&event->mmap_count)); |
| 2134 | 2134 | ||
| 2135 | size = sizeof(struct perf_mmap_data); | 2135 | size = sizeof(struct perf_mmap_data); |
| 2136 | size += nr_pages * sizeof(void *); | 2136 | size += nr_pages * sizeof(void *); |
| 2137 | 2137 | ||
| 2138 | data = kzalloc(size, GFP_KERNEL); | 2138 | data = kzalloc(size, GFP_KERNEL); |
| 2139 | if (!data) | 2139 | if (!data) |
| 2140 | goto fail; | 2140 | goto fail; |
| 2141 | 2141 | ||
| 2142 | data->user_page = (void *)get_zeroed_page(GFP_KERNEL); | 2142 | data->user_page = (void *)get_zeroed_page(GFP_KERNEL); |
| 2143 | if (!data->user_page) | 2143 | if (!data->user_page) |
| 2144 | goto fail_user_page; | 2144 | goto fail_user_page; |
| 2145 | 2145 | ||
| 2146 | for (i = 0; i < nr_pages; i++) { | 2146 | for (i = 0; i < nr_pages; i++) { |
| 2147 | data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL); | 2147 | data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL); |
| 2148 | if (!data->data_pages[i]) | 2148 | if (!data->data_pages[i]) |
| 2149 | goto fail_data_pages; | 2149 | goto fail_data_pages; |
| 2150 | } | 2150 | } |
| 2151 | 2151 | ||
| 2152 | data->data_order = 0; | 2152 | data->data_order = 0; |
| 2153 | data->nr_pages = nr_pages; | 2153 | data->nr_pages = nr_pages; |
| 2154 | 2154 | ||
| 2155 | return data; | 2155 | return data; |
| 2156 | 2156 | ||
| 2157 | fail_data_pages: | 2157 | fail_data_pages: |
| 2158 | for (i--; i >= 0; i--) | 2158 | for (i--; i >= 0; i--) |
| 2159 | free_page((unsigned long)data->data_pages[i]); | 2159 | free_page((unsigned long)data->data_pages[i]); |
| 2160 | 2160 | ||
| 2161 | free_page((unsigned long)data->user_page); | 2161 | free_page((unsigned long)data->user_page); |
| 2162 | 2162 | ||
| 2163 | fail_user_page: | 2163 | fail_user_page: |
| 2164 | kfree(data); | 2164 | kfree(data); |
| 2165 | 2165 | ||
| 2166 | fail: | 2166 | fail: |
| 2167 | return NULL; | 2167 | return NULL; |
| 2168 | } | 2168 | } |
| 2169 | 2169 | ||
| 2170 | static void perf_mmap_free_page(unsigned long addr) | 2170 | static void perf_mmap_free_page(unsigned long addr) |
| 2171 | { | 2171 | { |
| 2172 | struct page *page = virt_to_page((void *)addr); | 2172 | struct page *page = virt_to_page((void *)addr); |
| 2173 | 2173 | ||
| 2174 | page->mapping = NULL; | 2174 | page->mapping = NULL; |
| 2175 | __free_page(page); | 2175 | __free_page(page); |
| 2176 | } | 2176 | } |
| 2177 | 2177 | ||
| 2178 | static void perf_mmap_data_free(struct perf_mmap_data *data) | 2178 | static void perf_mmap_data_free(struct perf_mmap_data *data) |
| 2179 | { | 2179 | { |
| 2180 | int i; | 2180 | int i; |
| 2181 | 2181 | ||
| 2182 | perf_mmap_free_page((unsigned long)data->user_page); | 2182 | perf_mmap_free_page((unsigned long)data->user_page); |
| 2183 | for (i = 0; i < data->nr_pages; i++) | 2183 | for (i = 0; i < data->nr_pages; i++) |
| 2184 | perf_mmap_free_page((unsigned long)data->data_pages[i]); | 2184 | perf_mmap_free_page((unsigned long)data->data_pages[i]); |
| 2185 | } | 2185 | } |
| 2186 | 2186 | ||
| 2187 | #else | 2187 | #else |
| 2188 | 2188 | ||
| 2189 | /* | 2189 | /* |
| 2190 | * Back perf_mmap() with vmalloc memory. | 2190 | * Back perf_mmap() with vmalloc memory. |
| 2191 | * | 2191 | * |
| 2192 | * Required for architectures that have d-cache aliasing issues. | 2192 | * Required for architectures that have d-cache aliasing issues. |
| 2193 | */ | 2193 | */ |
| 2194 | 2194 | ||
| 2195 | static struct page * | 2195 | static struct page * |
| 2196 | perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) | 2196 | perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) |
| 2197 | { | 2197 | { |
| 2198 | if (pgoff > (1UL << data->data_order)) | 2198 | if (pgoff > (1UL << data->data_order)) |
| 2199 | return NULL; | 2199 | return NULL; |
| 2200 | 2200 | ||
| 2201 | return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE); | 2201 | return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE); |
| 2202 | } | 2202 | } |
| 2203 | 2203 | ||
| 2204 | static void perf_mmap_unmark_page(void *addr) | 2204 | static void perf_mmap_unmark_page(void *addr) |
| 2205 | { | 2205 | { |
| 2206 | struct page *page = vmalloc_to_page(addr); | 2206 | struct page *page = vmalloc_to_page(addr); |
| 2207 | 2207 | ||
| 2208 | page->mapping = NULL; | 2208 | page->mapping = NULL; |
| 2209 | } | 2209 | } |
| 2210 | 2210 | ||
| 2211 | static void perf_mmap_data_free_work(struct work_struct *work) | 2211 | static void perf_mmap_data_free_work(struct work_struct *work) |
| 2212 | { | 2212 | { |
| 2213 | struct perf_mmap_data *data; | 2213 | struct perf_mmap_data *data; |
| 2214 | void *base; | 2214 | void *base; |
| 2215 | int i, nr; | 2215 | int i, nr; |
| 2216 | 2216 | ||
| 2217 | data = container_of(work, struct perf_mmap_data, work); | 2217 | data = container_of(work, struct perf_mmap_data, work); |
| 2218 | nr = 1 << data->data_order; | 2218 | nr = 1 << data->data_order; |
| 2219 | 2219 | ||
| 2220 | base = data->user_page; | 2220 | base = data->user_page; |
| 2221 | for (i = 0; i < nr + 1; i++) | 2221 | for (i = 0; i < nr + 1; i++) |
| 2222 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); | 2222 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); |
| 2223 | 2223 | ||
| 2224 | vfree(base); | 2224 | vfree(base); |
| 2225 | } | 2225 | } |
| 2226 | 2226 | ||
| 2227 | static void perf_mmap_data_free(struct perf_mmap_data *data) | 2227 | static void perf_mmap_data_free(struct perf_mmap_data *data) |
| 2228 | { | 2228 | { |
| 2229 | schedule_work(&data->work); | 2229 | schedule_work(&data->work); |
| 2230 | } | 2230 | } |
| 2231 | 2231 | ||
| 2232 | static struct perf_mmap_data * | 2232 | static struct perf_mmap_data * |
| 2233 | perf_mmap_data_alloc(struct perf_event *event, int nr_pages) | 2233 | perf_mmap_data_alloc(struct perf_event *event, int nr_pages) |
| 2234 | { | 2234 | { |
| 2235 | struct perf_mmap_data *data; | 2235 | struct perf_mmap_data *data; |
| 2236 | unsigned long size; | 2236 | unsigned long size; |
| 2237 | void *all_buf; | 2237 | void *all_buf; |
| 2238 | 2238 | ||
| 2239 | WARN_ON(atomic_read(&event->mmap_count)); | 2239 | WARN_ON(atomic_read(&event->mmap_count)); |
| 2240 | 2240 | ||
| 2241 | size = sizeof(struct perf_mmap_data); | 2241 | size = sizeof(struct perf_mmap_data); |
| 2242 | size += sizeof(void *); | 2242 | size += sizeof(void *); |
| 2243 | 2243 | ||
| 2244 | data = kzalloc(size, GFP_KERNEL); | 2244 | data = kzalloc(size, GFP_KERNEL); |
| 2245 | if (!data) | 2245 | if (!data) |
| 2246 | goto fail; | 2246 | goto fail; |
| 2247 | 2247 | ||
| 2248 | INIT_WORK(&data->work, perf_mmap_data_free_work); | 2248 | INIT_WORK(&data->work, perf_mmap_data_free_work); |
| 2249 | 2249 | ||
| 2250 | all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); | 2250 | all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); |
| 2251 | if (!all_buf) | 2251 | if (!all_buf) |
| 2252 | goto fail_all_buf; | 2252 | goto fail_all_buf; |
| 2253 | 2253 | ||
| 2254 | data->user_page = all_buf; | 2254 | data->user_page = all_buf; |
| 2255 | data->data_pages[0] = all_buf + PAGE_SIZE; | 2255 | data->data_pages[0] = all_buf + PAGE_SIZE; |
| 2256 | data->data_order = ilog2(nr_pages); | 2256 | data->data_order = ilog2(nr_pages); |
| 2257 | data->nr_pages = 1; | 2257 | data->nr_pages = 1; |
| 2258 | 2258 | ||
| 2259 | return data; | 2259 | return data; |
| 2260 | 2260 | ||
| 2261 | fail_all_buf: | 2261 | fail_all_buf: |
| 2262 | kfree(data); | 2262 | kfree(data); |
| 2263 | 2263 | ||
| 2264 | fail: | 2264 | fail: |
| 2265 | return NULL; | 2265 | return NULL; |
| 2266 | } | 2266 | } |
| 2267 | 2267 | ||
| 2268 | #endif | 2268 | #endif |
| 2269 | 2269 | ||
| 2270 | static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 2270 | static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
| 2271 | { | 2271 | { |
| 2272 | struct perf_event *event = vma->vm_file->private_data; | 2272 | struct perf_event *event = vma->vm_file->private_data; |
| 2273 | struct perf_mmap_data *data; | 2273 | struct perf_mmap_data *data; |
| 2274 | int ret = VM_FAULT_SIGBUS; | 2274 | int ret = VM_FAULT_SIGBUS; |
| 2275 | 2275 | ||
| 2276 | if (vmf->flags & FAULT_FLAG_MKWRITE) { | 2276 | if (vmf->flags & FAULT_FLAG_MKWRITE) { |
| 2277 | if (vmf->pgoff == 0) | 2277 | if (vmf->pgoff == 0) |
| 2278 | ret = 0; | 2278 | ret = 0; |
| 2279 | return ret; | 2279 | return ret; |
| 2280 | } | 2280 | } |
| 2281 | 2281 | ||
| 2282 | rcu_read_lock(); | 2282 | rcu_read_lock(); |
| 2283 | data = rcu_dereference(event->data); | 2283 | data = rcu_dereference(event->data); |
| 2284 | if (!data) | 2284 | if (!data) |
| 2285 | goto unlock; | 2285 | goto unlock; |
| 2286 | 2286 | ||
| 2287 | if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) | 2287 | if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) |
| 2288 | goto unlock; | 2288 | goto unlock; |
| 2289 | 2289 | ||
| 2290 | vmf->page = perf_mmap_to_page(data, vmf->pgoff); | 2290 | vmf->page = perf_mmap_to_page(data, vmf->pgoff); |
| 2291 | if (!vmf->page) | 2291 | if (!vmf->page) |
| 2292 | goto unlock; | 2292 | goto unlock; |
| 2293 | 2293 | ||
| 2294 | get_page(vmf->page); | 2294 | get_page(vmf->page); |
| 2295 | vmf->page->mapping = vma->vm_file->f_mapping; | 2295 | vmf->page->mapping = vma->vm_file->f_mapping; |
| 2296 | vmf->page->index = vmf->pgoff; | 2296 | vmf->page->index = vmf->pgoff; |
| 2297 | 2297 | ||
| 2298 | ret = 0; | 2298 | ret = 0; |
| 2299 | unlock: | 2299 | unlock: |
| 2300 | rcu_read_unlock(); | 2300 | rcu_read_unlock(); |
| 2301 | 2301 | ||
| 2302 | return ret; | 2302 | return ret; |
| 2303 | } | 2303 | } |
| 2304 | 2304 | ||
| 2305 | static void | 2305 | static void |
| 2306 | perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data) | 2306 | perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data) |
| 2307 | { | 2307 | { |
| 2308 | long max_size = perf_data_size(data); | 2308 | long max_size = perf_data_size(data); |
| 2309 | 2309 | ||
| 2310 | atomic_set(&data->lock, -1); | 2310 | atomic_set(&data->lock, -1); |
| 2311 | 2311 | ||
| 2312 | if (event->attr.watermark) { | 2312 | if (event->attr.watermark) { |
| 2313 | data->watermark = min_t(long, max_size, | 2313 | data->watermark = min_t(long, max_size, |
| 2314 | event->attr.wakeup_watermark); | 2314 | event->attr.wakeup_watermark); |
| 2315 | } | 2315 | } |
| 2316 | 2316 | ||
| 2317 | if (!data->watermark) | 2317 | if (!data->watermark) |
| 2318 | data->watermark = max_t(long, PAGE_SIZE, max_size / 2); | 2318 | data->watermark = max_t(long, PAGE_SIZE, max_size / 2); |
| 2319 | 2319 | ||
| 2320 | 2320 | ||
| 2321 | rcu_assign_pointer(event->data, data); | 2321 | rcu_assign_pointer(event->data, data); |
| 2322 | } | 2322 | } |
| 2323 | 2323 | ||
| 2324 | static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head) | 2324 | static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head) |
| 2325 | { | 2325 | { |
| 2326 | struct perf_mmap_data *data; | 2326 | struct perf_mmap_data *data; |
| 2327 | 2327 | ||
| 2328 | data = container_of(rcu_head, struct perf_mmap_data, rcu_head); | 2328 | data = container_of(rcu_head, struct perf_mmap_data, rcu_head); |
| 2329 | perf_mmap_data_free(data); | 2329 | perf_mmap_data_free(data); |
| 2330 | kfree(data); | 2330 | kfree(data); |
| 2331 | } | 2331 | } |
| 2332 | 2332 | ||
| 2333 | static void perf_mmap_data_release(struct perf_event *event) | 2333 | static void perf_mmap_data_release(struct perf_event *event) |
| 2334 | { | 2334 | { |
| 2335 | struct perf_mmap_data *data = event->data; | 2335 | struct perf_mmap_data *data = event->data; |
| 2336 | 2336 | ||
| 2337 | WARN_ON(atomic_read(&event->mmap_count)); | 2337 | WARN_ON(atomic_read(&event->mmap_count)); |
| 2338 | 2338 | ||
| 2339 | rcu_assign_pointer(event->data, NULL); | 2339 | rcu_assign_pointer(event->data, NULL); |
| 2340 | call_rcu(&data->rcu_head, perf_mmap_data_free_rcu); | 2340 | call_rcu(&data->rcu_head, perf_mmap_data_free_rcu); |
| 2341 | } | 2341 | } |
| 2342 | 2342 | ||
| 2343 | static void perf_mmap_open(struct vm_area_struct *vma) | 2343 | static void perf_mmap_open(struct vm_area_struct *vma) |
| 2344 | { | 2344 | { |
| 2345 | struct perf_event *event = vma->vm_file->private_data; | 2345 | struct perf_event *event = vma->vm_file->private_data; |
| 2346 | 2346 | ||
| 2347 | atomic_inc(&event->mmap_count); | 2347 | atomic_inc(&event->mmap_count); |
| 2348 | } | 2348 | } |
| 2349 | 2349 | ||
| 2350 | static void perf_mmap_close(struct vm_area_struct *vma) | 2350 | static void perf_mmap_close(struct vm_area_struct *vma) |
| 2351 | { | 2351 | { |
| 2352 | struct perf_event *event = vma->vm_file->private_data; | 2352 | struct perf_event *event = vma->vm_file->private_data; |
| 2353 | 2353 | ||
| 2354 | WARN_ON_ONCE(event->ctx->parent_ctx); | 2354 | WARN_ON_ONCE(event->ctx->parent_ctx); |
| 2355 | if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { | 2355 | if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { |
| 2356 | unsigned long size = perf_data_size(event->data); | 2356 | unsigned long size = perf_data_size(event->data); |
| 2357 | struct user_struct *user = current_user(); | 2357 | struct user_struct *user = current_user(); |
| 2358 | 2358 | ||
| 2359 | atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); | 2359 | atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); |
| 2360 | vma->vm_mm->locked_vm -= event->data->nr_locked; | 2360 | vma->vm_mm->locked_vm -= event->data->nr_locked; |
| 2361 | perf_mmap_data_release(event); | 2361 | perf_mmap_data_release(event); |
| 2362 | mutex_unlock(&event->mmap_mutex); | 2362 | mutex_unlock(&event->mmap_mutex); |
| 2363 | } | 2363 | } |
| 2364 | } | 2364 | } |
| 2365 | 2365 | ||
| 2366 | static const struct vm_operations_struct perf_mmap_vmops = { | 2366 | static const struct vm_operations_struct perf_mmap_vmops = { |
| 2367 | .open = perf_mmap_open, | 2367 | .open = perf_mmap_open, |
| 2368 | .close = perf_mmap_close, | 2368 | .close = perf_mmap_close, |
| 2369 | .fault = perf_mmap_fault, | 2369 | .fault = perf_mmap_fault, |
| 2370 | .page_mkwrite = perf_mmap_fault, | 2370 | .page_mkwrite = perf_mmap_fault, |
| 2371 | }; | 2371 | }; |
| 2372 | 2372 | ||
| 2373 | static int perf_mmap(struct file *file, struct vm_area_struct *vma) | 2373 | static int perf_mmap(struct file *file, struct vm_area_struct *vma) |
| 2374 | { | 2374 | { |
| 2375 | struct perf_event *event = file->private_data; | 2375 | struct perf_event *event = file->private_data; |
| 2376 | unsigned long user_locked, user_lock_limit; | 2376 | unsigned long user_locked, user_lock_limit; |
| 2377 | struct user_struct *user = current_user(); | 2377 | struct user_struct *user = current_user(); |
| 2378 | unsigned long locked, lock_limit; | 2378 | unsigned long locked, lock_limit; |
| 2379 | struct perf_mmap_data *data; | 2379 | struct perf_mmap_data *data; |
| 2380 | unsigned long vma_size; | 2380 | unsigned long vma_size; |
| 2381 | unsigned long nr_pages; | 2381 | unsigned long nr_pages; |
| 2382 | long user_extra, extra; | 2382 | long user_extra, extra; |
| 2383 | int ret = 0; | 2383 | int ret = 0; |
| 2384 | 2384 | ||
| 2385 | if (!(vma->vm_flags & VM_SHARED)) | 2385 | if (!(vma->vm_flags & VM_SHARED)) |
| 2386 | return -EINVAL; | 2386 | return -EINVAL; |
| 2387 | 2387 | ||
| 2388 | vma_size = vma->vm_end - vma->vm_start; | 2388 | vma_size = vma->vm_end - vma->vm_start; |
| 2389 | nr_pages = (vma_size / PAGE_SIZE) - 1; | 2389 | nr_pages = (vma_size / PAGE_SIZE) - 1; |
| 2390 | 2390 | ||
| 2391 | /* | 2391 | /* |
| 2392 | * If we have data pages ensure they're a power-of-two number, so we | 2392 | * If we have data pages ensure they're a power-of-two number, so we |
| 2393 | * can do bitmasks instead of modulo. | 2393 | * can do bitmasks instead of modulo. |
| 2394 | */ | 2394 | */ |
| 2395 | if (nr_pages != 0 && !is_power_of_2(nr_pages)) | 2395 | if (nr_pages != 0 && !is_power_of_2(nr_pages)) |
| 2396 | return -EINVAL; | 2396 | return -EINVAL; |
| 2397 | 2397 | ||
| 2398 | if (vma_size != PAGE_SIZE * (1 + nr_pages)) | 2398 | if (vma_size != PAGE_SIZE * (1 + nr_pages)) |
| 2399 | return -EINVAL; | 2399 | return -EINVAL; |
| 2400 | 2400 | ||
| 2401 | if (vma->vm_pgoff != 0) | 2401 | if (vma->vm_pgoff != 0) |
| 2402 | return -EINVAL; | 2402 | return -EINVAL; |
| 2403 | 2403 | ||
| 2404 | WARN_ON_ONCE(event->ctx->parent_ctx); | 2404 | WARN_ON_ONCE(event->ctx->parent_ctx); |
| 2405 | mutex_lock(&event->mmap_mutex); | 2405 | mutex_lock(&event->mmap_mutex); |
| 2406 | if (event->output) { | 2406 | if (event->output) { |
| 2407 | ret = -EINVAL; | 2407 | ret = -EINVAL; |
| 2408 | goto unlock; | 2408 | goto unlock; |
| 2409 | } | 2409 | } |
| 2410 | 2410 | ||
| 2411 | if (atomic_inc_not_zero(&event->mmap_count)) { | 2411 | if (atomic_inc_not_zero(&event->mmap_count)) { |
| 2412 | if (nr_pages != event->data->nr_pages) | 2412 | if (nr_pages != event->data->nr_pages) |
| 2413 | ret = -EINVAL; | 2413 | ret = -EINVAL; |
| 2414 | goto unlock; | 2414 | goto unlock; |
| 2415 | } | 2415 | } |
| 2416 | 2416 | ||
| 2417 | user_extra = nr_pages + 1; | 2417 | user_extra = nr_pages + 1; |
| 2418 | user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); | 2418 | user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); |
| 2419 | 2419 | ||
| 2420 | /* | 2420 | /* |
| 2421 | * Increase the limit linearly with more CPUs: | 2421 | * Increase the limit linearly with more CPUs: |
| 2422 | */ | 2422 | */ |
| 2423 | user_lock_limit *= num_online_cpus(); | 2423 | user_lock_limit *= num_online_cpus(); |
| 2424 | 2424 | ||
| 2425 | user_locked = atomic_long_read(&user->locked_vm) + user_extra; | 2425 | user_locked = atomic_long_read(&user->locked_vm) + user_extra; |
| 2426 | 2426 | ||
| 2427 | extra = 0; | 2427 | extra = 0; |
| 2428 | if (user_locked > user_lock_limit) | 2428 | if (user_locked > user_lock_limit) |
| 2429 | extra = user_locked - user_lock_limit; | 2429 | extra = user_locked - user_lock_limit; |
| 2430 | 2430 | ||
| 2431 | lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; | 2431 | lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; |
| 2432 | lock_limit >>= PAGE_SHIFT; | 2432 | lock_limit >>= PAGE_SHIFT; |
| 2433 | locked = vma->vm_mm->locked_vm + extra; | 2433 | locked = vma->vm_mm->locked_vm + extra; |
| 2434 | 2434 | ||
| 2435 | if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && | 2435 | if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && |
| 2436 | !capable(CAP_IPC_LOCK)) { | 2436 | !capable(CAP_IPC_LOCK)) { |
| 2437 | ret = -EPERM; | 2437 | ret = -EPERM; |
| 2438 | goto unlock; | 2438 | goto unlock; |
| 2439 | } | 2439 | } |
| 2440 | 2440 | ||
| 2441 | WARN_ON(event->data); | 2441 | WARN_ON(event->data); |
| 2442 | 2442 | ||
| 2443 | data = perf_mmap_data_alloc(event, nr_pages); | 2443 | data = perf_mmap_data_alloc(event, nr_pages); |
| 2444 | ret = -ENOMEM; | 2444 | ret = -ENOMEM; |
| 2445 | if (!data) | 2445 | if (!data) |
| 2446 | goto unlock; | 2446 | goto unlock; |
| 2447 | 2447 | ||
| 2448 | ret = 0; | 2448 | ret = 0; |
| 2449 | perf_mmap_data_init(event, data); | 2449 | perf_mmap_data_init(event, data); |
| 2450 | 2450 | ||
| 2451 | atomic_set(&event->mmap_count, 1); | 2451 | atomic_set(&event->mmap_count, 1); |
| 2452 | atomic_long_add(user_extra, &user->locked_vm); | 2452 | atomic_long_add(user_extra, &user->locked_vm); |
| 2453 | vma->vm_mm->locked_vm += extra; | 2453 | vma->vm_mm->locked_vm += extra; |
| 2454 | event->data->nr_locked = extra; | 2454 | event->data->nr_locked = extra; |
| 2455 | if (vma->vm_flags & VM_WRITE) | 2455 | if (vma->vm_flags & VM_WRITE) |
| 2456 | event->data->writable = 1; | 2456 | event->data->writable = 1; |
| 2457 | 2457 | ||
| 2458 | unlock: | 2458 | unlock: |
| 2459 | mutex_unlock(&event->mmap_mutex); | 2459 | mutex_unlock(&event->mmap_mutex); |
| 2460 | 2460 | ||
| 2461 | vma->vm_flags |= VM_RESERVED; | 2461 | vma->vm_flags |= VM_RESERVED; |
| 2462 | vma->vm_ops = &perf_mmap_vmops; | 2462 | vma->vm_ops = &perf_mmap_vmops; |
| 2463 | 2463 | ||
| 2464 | return ret; | 2464 | return ret; |
| 2465 | } | 2465 | } |
| 2466 | 2466 | ||
| 2467 | static int perf_fasync(int fd, struct file *filp, int on) | 2467 | static int perf_fasync(int fd, struct file *filp, int on) |
| 2468 | { | 2468 | { |
| 2469 | struct inode *inode = filp->f_path.dentry->d_inode; | 2469 | struct inode *inode = filp->f_path.dentry->d_inode; |
| 2470 | struct perf_event *event = filp->private_data; | 2470 | struct perf_event *event = filp->private_data; |
| 2471 | int retval; | 2471 | int retval; |
| 2472 | 2472 | ||
| 2473 | mutex_lock(&inode->i_mutex); | 2473 | mutex_lock(&inode->i_mutex); |
| 2474 | retval = fasync_helper(fd, filp, on, &event->fasync); | 2474 | retval = fasync_helper(fd, filp, on, &event->fasync); |
| 2475 | mutex_unlock(&inode->i_mutex); | 2475 | mutex_unlock(&inode->i_mutex); |
| 2476 | 2476 | ||
| 2477 | if (retval < 0) | 2477 | if (retval < 0) |
| 2478 | return retval; | 2478 | return retval; |
| 2479 | 2479 | ||
| 2480 | return 0; | 2480 | return 0; |
| 2481 | } | 2481 | } |
| 2482 | 2482 | ||
| 2483 | static const struct file_operations perf_fops = { | 2483 | static const struct file_operations perf_fops = { |
| 2484 | .release = perf_release, | 2484 | .release = perf_release, |
| 2485 | .read = perf_read, | 2485 | .read = perf_read, |
| 2486 | .poll = perf_poll, | 2486 | .poll = perf_poll, |
| 2487 | .unlocked_ioctl = perf_ioctl, | 2487 | .unlocked_ioctl = perf_ioctl, |
| 2488 | .compat_ioctl = perf_ioctl, | 2488 | .compat_ioctl = perf_ioctl, |
| 2489 | .mmap = perf_mmap, | 2489 | .mmap = perf_mmap, |
| 2490 | .fasync = perf_fasync, | 2490 | .fasync = perf_fasync, |
| 2491 | }; | 2491 | }; |
| 2492 | 2492 | ||
| 2493 | /* | 2493 | /* |
| 2494 | * Perf event wakeup | 2494 | * Perf event wakeup |
| 2495 | * | 2495 | * |
| 2496 | * If there's data, ensure we set the poll() state and publish everything | 2496 | * If there's data, ensure we set the poll() state and publish everything |
| 2497 | * to user-space before waking everybody up. | 2497 | * to user-space before waking everybody up. |
| 2498 | */ | 2498 | */ |
| 2499 | 2499 | ||
| 2500 | void perf_event_wakeup(struct perf_event *event) | 2500 | void perf_event_wakeup(struct perf_event *event) |
| 2501 | { | 2501 | { |
| 2502 | wake_up_all(&event->waitq); | 2502 | wake_up_all(&event->waitq); |
| 2503 | 2503 | ||
| 2504 | if (event->pending_kill) { | 2504 | if (event->pending_kill) { |
| 2505 | kill_fasync(&event->fasync, SIGIO, event->pending_kill); | 2505 | kill_fasync(&event->fasync, SIGIO, event->pending_kill); |
| 2506 | event->pending_kill = 0; | 2506 | event->pending_kill = 0; |
| 2507 | } | 2507 | } |
| 2508 | } | 2508 | } |
| 2509 | 2509 | ||
| 2510 | /* | 2510 | /* |
| 2511 | * Pending wakeups | 2511 | * Pending wakeups |
| 2512 | * | 2512 | * |
| 2513 | * Handle the case where we need to wakeup up from NMI (or rq->lock) context. | 2513 | * Handle the case where we need to wakeup up from NMI (or rq->lock) context. |
| 2514 | * | 2514 | * |
| 2515 | * The NMI bit means we cannot possibly take locks. Therefore, maintain a | 2515 | * The NMI bit means we cannot possibly take locks. Therefore, maintain a |
| 2516 | * single linked list and use cmpxchg() to add entries lockless. | 2516 | * single linked list and use cmpxchg() to add entries lockless. |
| 2517 | */ | 2517 | */ |
| 2518 | 2518 | ||
| 2519 | static void perf_pending_event(struct perf_pending_entry *entry) | 2519 | static void perf_pending_event(struct perf_pending_entry *entry) |
| 2520 | { | 2520 | { |
| 2521 | struct perf_event *event = container_of(entry, | 2521 | struct perf_event *event = container_of(entry, |
| 2522 | struct perf_event, pending); | 2522 | struct perf_event, pending); |
| 2523 | 2523 | ||
| 2524 | if (event->pending_disable) { | 2524 | if (event->pending_disable) { |
| 2525 | event->pending_disable = 0; | 2525 | event->pending_disable = 0; |
| 2526 | __perf_event_disable(event); | 2526 | __perf_event_disable(event); |
| 2527 | } | 2527 | } |
| 2528 | 2528 | ||
| 2529 | if (event->pending_wakeup) { | 2529 | if (event->pending_wakeup) { |
| 2530 | event->pending_wakeup = 0; | 2530 | event->pending_wakeup = 0; |
| 2531 | perf_event_wakeup(event); | 2531 | perf_event_wakeup(event); |
| 2532 | } | 2532 | } |
| 2533 | } | 2533 | } |
| 2534 | 2534 | ||
| 2535 | #define PENDING_TAIL ((struct perf_pending_entry *)-1UL) | 2535 | #define PENDING_TAIL ((struct perf_pending_entry *)-1UL) |
| 2536 | 2536 | ||
| 2537 | static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = { | 2537 | static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = { |
| 2538 | PENDING_TAIL, | 2538 | PENDING_TAIL, |
| 2539 | }; | 2539 | }; |
| 2540 | 2540 | ||
| 2541 | static void perf_pending_queue(struct perf_pending_entry *entry, | 2541 | static void perf_pending_queue(struct perf_pending_entry *entry, |
| 2542 | void (*func)(struct perf_pending_entry *)) | 2542 | void (*func)(struct perf_pending_entry *)) |
| 2543 | { | 2543 | { |
| 2544 | struct perf_pending_entry **head; | 2544 | struct perf_pending_entry **head; |
| 2545 | 2545 | ||
| 2546 | if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL) | 2546 | if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL) |
| 2547 | return; | 2547 | return; |
| 2548 | 2548 | ||
| 2549 | entry->func = func; | 2549 | entry->func = func; |
| 2550 | 2550 | ||
| 2551 | head = &get_cpu_var(perf_pending_head); | 2551 | head = &get_cpu_var(perf_pending_head); |
| 2552 | 2552 | ||
| 2553 | do { | 2553 | do { |
| 2554 | entry->next = *head; | 2554 | entry->next = *head; |
| 2555 | } while (cmpxchg(head, entry->next, entry) != entry->next); | 2555 | } while (cmpxchg(head, entry->next, entry) != entry->next); |
| 2556 | 2556 | ||
| 2557 | set_perf_event_pending(); | 2557 | set_perf_event_pending(); |
| 2558 | 2558 | ||
| 2559 | put_cpu_var(perf_pending_head); | 2559 | put_cpu_var(perf_pending_head); |
| 2560 | } | 2560 | } |
| 2561 | 2561 | ||
| 2562 | static int __perf_pending_run(void) | 2562 | static int __perf_pending_run(void) |
| 2563 | { | 2563 | { |
| 2564 | struct perf_pending_entry *list; | 2564 | struct perf_pending_entry *list; |
| 2565 | int nr = 0; | 2565 | int nr = 0; |
| 2566 | 2566 | ||
| 2567 | list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL); | 2567 | list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL); |
| 2568 | while (list != PENDING_TAIL) { | 2568 | while (list != PENDING_TAIL) { |
| 2569 | void (*func)(struct perf_pending_entry *); | 2569 | void (*func)(struct perf_pending_entry *); |
| 2570 | struct perf_pending_entry *entry = list; | 2570 | struct perf_pending_entry *entry = list; |
| 2571 | 2571 | ||
| 2572 | list = list->next; | 2572 | list = list->next; |
| 2573 | 2573 | ||
| 2574 | func = entry->func; | 2574 | func = entry->func; |
| 2575 | entry->next = NULL; | 2575 | entry->next = NULL; |
| 2576 | /* | 2576 | /* |
| 2577 | * Ensure we observe the unqueue before we issue the wakeup, | 2577 | * Ensure we observe the unqueue before we issue the wakeup, |
| 2578 | * so that we won't be waiting forever. | 2578 | * so that we won't be waiting forever. |
| 2579 | * -- see perf_not_pending(). | 2579 | * -- see perf_not_pending(). |
| 2580 | */ | 2580 | */ |
| 2581 | smp_wmb(); | 2581 | smp_wmb(); |
| 2582 | 2582 | ||
| 2583 | func(entry); | 2583 | func(entry); |
| 2584 | nr++; | 2584 | nr++; |
| 2585 | } | 2585 | } |
| 2586 | 2586 | ||
| 2587 | return nr; | 2587 | return nr; |
| 2588 | } | 2588 | } |
| 2589 | 2589 | ||
| 2590 | static inline int perf_not_pending(struct perf_event *event) | 2590 | static inline int perf_not_pending(struct perf_event *event) |
| 2591 | { | 2591 | { |
| 2592 | /* | 2592 | /* |
| 2593 | * If we flush on whatever cpu we run, there is a chance we don't | 2593 | * If we flush on whatever cpu we run, there is a chance we don't |
| 2594 | * need to wait. | 2594 | * need to wait. |
| 2595 | */ | 2595 | */ |
| 2596 | get_cpu(); | 2596 | get_cpu(); |
| 2597 | __perf_pending_run(); | 2597 | __perf_pending_run(); |
| 2598 | put_cpu(); | 2598 | put_cpu(); |
| 2599 | 2599 | ||
| 2600 | /* | 2600 | /* |
| 2601 | * Ensure we see the proper queue state before going to sleep | 2601 | * Ensure we see the proper queue state before going to sleep |
| 2602 | * so that we do not miss the wakeup. -- see perf_pending_handle() | 2602 | * so that we do not miss the wakeup. -- see perf_pending_handle() |
| 2603 | */ | 2603 | */ |
| 2604 | smp_rmb(); | 2604 | smp_rmb(); |
| 2605 | return event->pending.next == NULL; | 2605 | return event->pending.next == NULL; |
| 2606 | } | 2606 | } |
| 2607 | 2607 | ||
| 2608 | static void perf_pending_sync(struct perf_event *event) | 2608 | static void perf_pending_sync(struct perf_event *event) |
| 2609 | { | 2609 | { |
| 2610 | wait_event(event->waitq, perf_not_pending(event)); | 2610 | wait_event(event->waitq, perf_not_pending(event)); |
| 2611 | } | 2611 | } |
| 2612 | 2612 | ||
| 2613 | void perf_event_do_pending(void) | 2613 | void perf_event_do_pending(void) |
| 2614 | { | 2614 | { |
| 2615 | __perf_pending_run(); | 2615 | __perf_pending_run(); |
| 2616 | } | 2616 | } |
| 2617 | 2617 | ||
| 2618 | /* | 2618 | /* |
| 2619 | * Callchain support -- arch specific | 2619 | * Callchain support -- arch specific |
| 2620 | */ | 2620 | */ |
| 2621 | 2621 | ||
| 2622 | __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | 2622 | __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) |
| 2623 | { | 2623 | { |
| 2624 | return NULL; | 2624 | return NULL; |
| 2625 | } | 2625 | } |
| 2626 | 2626 | ||
| 2627 | /* | 2627 | /* |
| 2628 | * Output | 2628 | * Output |
| 2629 | */ | 2629 | */ |
| 2630 | static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail, | 2630 | static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail, |
| 2631 | unsigned long offset, unsigned long head) | 2631 | unsigned long offset, unsigned long head) |
| 2632 | { | 2632 | { |
| 2633 | unsigned long mask; | 2633 | unsigned long mask; |
| 2634 | 2634 | ||
| 2635 | if (!data->writable) | 2635 | if (!data->writable) |
| 2636 | return true; | 2636 | return true; |
| 2637 | 2637 | ||
| 2638 | mask = perf_data_size(data) - 1; | 2638 | mask = perf_data_size(data) - 1; |
| 2639 | 2639 | ||
| 2640 | offset = (offset - tail) & mask; | 2640 | offset = (offset - tail) & mask; |
| 2641 | head = (head - tail) & mask; | 2641 | head = (head - tail) & mask; |
| 2642 | 2642 | ||
| 2643 | if ((int)(head - offset) < 0) | 2643 | if ((int)(head - offset) < 0) |
| 2644 | return false; | 2644 | return false; |
| 2645 | 2645 | ||
| 2646 | return true; | 2646 | return true; |
| 2647 | } | 2647 | } |
| 2648 | 2648 | ||
| 2649 | static void perf_output_wakeup(struct perf_output_handle *handle) | 2649 | static void perf_output_wakeup(struct perf_output_handle *handle) |
| 2650 | { | 2650 | { |
| 2651 | atomic_set(&handle->data->poll, POLL_IN); | 2651 | atomic_set(&handle->data->poll, POLL_IN); |
| 2652 | 2652 | ||
| 2653 | if (handle->nmi) { | 2653 | if (handle->nmi) { |
| 2654 | handle->event->pending_wakeup = 1; | 2654 | handle->event->pending_wakeup = 1; |
| 2655 | perf_pending_queue(&handle->event->pending, | 2655 | perf_pending_queue(&handle->event->pending, |
| 2656 | perf_pending_event); | 2656 | perf_pending_event); |
| 2657 | } else | 2657 | } else |
| 2658 | perf_event_wakeup(handle->event); | 2658 | perf_event_wakeup(handle->event); |
| 2659 | } | 2659 | } |
| 2660 | 2660 | ||
| 2661 | /* | 2661 | /* |
| 2662 | * Curious locking construct. | 2662 | * Curious locking construct. |
| 2663 | * | 2663 | * |
| 2664 | * We need to ensure a later event_id doesn't publish a head when a former | 2664 | * We need to ensure a later event_id doesn't publish a head when a former |
| 2665 | * event_id isn't done writing. However since we need to deal with NMIs we | 2665 | * event_id isn't done writing. However since we need to deal with NMIs we |
| 2666 | * cannot fully serialize things. | 2666 | * cannot fully serialize things. |
| 2667 | * | 2667 | * |
| 2668 | * What we do is serialize between CPUs so we only have to deal with NMI | 2668 | * What we do is serialize between CPUs so we only have to deal with NMI |
| 2669 | * nesting on a single CPU. | 2669 | * nesting on a single CPU. |
| 2670 | * | 2670 | * |
| 2671 | * We only publish the head (and generate a wakeup) when the outer-most | 2671 | * We only publish the head (and generate a wakeup) when the outer-most |
| 2672 | * event_id completes. | 2672 | * event_id completes. |
| 2673 | */ | 2673 | */ |
| 2674 | static void perf_output_lock(struct perf_output_handle *handle) | 2674 | static void perf_output_lock(struct perf_output_handle *handle) |
| 2675 | { | 2675 | { |
| 2676 | struct perf_mmap_data *data = handle->data; | 2676 | struct perf_mmap_data *data = handle->data; |
| 2677 | int cpu; | 2677 | int cpu; |
| 2678 | 2678 | ||
| 2679 | handle->locked = 0; | 2679 | handle->locked = 0; |
| 2680 | 2680 | ||
| 2681 | local_irq_save(handle->flags); | 2681 | local_irq_save(handle->flags); |
| 2682 | cpu = smp_processor_id(); | 2682 | cpu = smp_processor_id(); |
| 2683 | 2683 | ||
| 2684 | if (in_nmi() && atomic_read(&data->lock) == cpu) | 2684 | if (in_nmi() && atomic_read(&data->lock) == cpu) |
| 2685 | return; | 2685 | return; |
| 2686 | 2686 | ||
| 2687 | while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) | 2687 | while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) |
| 2688 | cpu_relax(); | 2688 | cpu_relax(); |
| 2689 | 2689 | ||
| 2690 | handle->locked = 1; | 2690 | handle->locked = 1; |
| 2691 | } | 2691 | } |
| 2692 | 2692 | ||
| 2693 | static void perf_output_unlock(struct perf_output_handle *handle) | 2693 | static void perf_output_unlock(struct perf_output_handle *handle) |
| 2694 | { | 2694 | { |
| 2695 | struct perf_mmap_data *data = handle->data; | 2695 | struct perf_mmap_data *data = handle->data; |
| 2696 | unsigned long head; | 2696 | unsigned long head; |
| 2697 | int cpu; | 2697 | int cpu; |
| 2698 | 2698 | ||
| 2699 | data->done_head = data->head; | 2699 | data->done_head = data->head; |
| 2700 | 2700 | ||
| 2701 | if (!handle->locked) | 2701 | if (!handle->locked) |
| 2702 | goto out; | 2702 | goto out; |
| 2703 | 2703 | ||
| 2704 | again: | 2704 | again: |
| 2705 | /* | 2705 | /* |
| 2706 | * The xchg implies a full barrier that ensures all writes are done | 2706 | * The xchg implies a full barrier that ensures all writes are done |
| 2707 | * before we publish the new head, matched by a rmb() in userspace when | 2707 | * before we publish the new head, matched by a rmb() in userspace when |
| 2708 | * reading this position. | 2708 | * reading this position. |
| 2709 | */ | 2709 | */ |
| 2710 | while ((head = atomic_long_xchg(&data->done_head, 0))) | 2710 | while ((head = atomic_long_xchg(&data->done_head, 0))) |
| 2711 | data->user_page->data_head = head; | 2711 | data->user_page->data_head = head; |
| 2712 | 2712 | ||
| 2713 | /* | 2713 | /* |
| 2714 | * NMI can happen here, which means we can miss a done_head update. | 2714 | * NMI can happen here, which means we can miss a done_head update. |
| 2715 | */ | 2715 | */ |
| 2716 | 2716 | ||
| 2717 | cpu = atomic_xchg(&data->lock, -1); | 2717 | cpu = atomic_xchg(&data->lock, -1); |
| 2718 | WARN_ON_ONCE(cpu != smp_processor_id()); | 2718 | WARN_ON_ONCE(cpu != smp_processor_id()); |
| 2719 | 2719 | ||
| 2720 | /* | 2720 | /* |
| 2721 | * Therefore we have to validate we did not indeed do so. | 2721 | * Therefore we have to validate we did not indeed do so. |
| 2722 | */ | 2722 | */ |
| 2723 | if (unlikely(atomic_long_read(&data->done_head))) { | 2723 | if (unlikely(atomic_long_read(&data->done_head))) { |
| 2724 | /* | 2724 | /* |
| 2725 | * Since we had it locked, we can lock it again. | 2725 | * Since we had it locked, we can lock it again. |
| 2726 | */ | 2726 | */ |
| 2727 | while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) | 2727 | while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) |
| 2728 | cpu_relax(); | 2728 | cpu_relax(); |
| 2729 | 2729 | ||
| 2730 | goto again; | 2730 | goto again; |
| 2731 | } | 2731 | } |
| 2732 | 2732 | ||
| 2733 | if (atomic_xchg(&data->wakeup, 0)) | 2733 | if (atomic_xchg(&data->wakeup, 0)) |
| 2734 | perf_output_wakeup(handle); | 2734 | perf_output_wakeup(handle); |
| 2735 | out: | 2735 | out: |
| 2736 | local_irq_restore(handle->flags); | 2736 | local_irq_restore(handle->flags); |
| 2737 | } | 2737 | } |
| 2738 | 2738 | ||
| 2739 | void perf_output_copy(struct perf_output_handle *handle, | 2739 | void perf_output_copy(struct perf_output_handle *handle, |
| 2740 | const void *buf, unsigned int len) | 2740 | const void *buf, unsigned int len) |
| 2741 | { | 2741 | { |
| 2742 | unsigned int pages_mask; | 2742 | unsigned int pages_mask; |
| 2743 | unsigned long offset; | 2743 | unsigned long offset; |
| 2744 | unsigned int size; | 2744 | unsigned int size; |
| 2745 | void **pages; | 2745 | void **pages; |
| 2746 | 2746 | ||
| 2747 | offset = handle->offset; | 2747 | offset = handle->offset; |
| 2748 | pages_mask = handle->data->nr_pages - 1; | 2748 | pages_mask = handle->data->nr_pages - 1; |
| 2749 | pages = handle->data->data_pages; | 2749 | pages = handle->data->data_pages; |
| 2750 | 2750 | ||
| 2751 | do { | 2751 | do { |
| 2752 | unsigned long page_offset; | 2752 | unsigned long page_offset; |
| 2753 | unsigned long page_size; | 2753 | unsigned long page_size; |
| 2754 | int nr; | 2754 | int nr; |
| 2755 | 2755 | ||
| 2756 | nr = (offset >> PAGE_SHIFT) & pages_mask; | 2756 | nr = (offset >> PAGE_SHIFT) & pages_mask; |
| 2757 | page_size = 1UL << (handle->data->data_order + PAGE_SHIFT); | 2757 | page_size = 1UL << (handle->data->data_order + PAGE_SHIFT); |
| 2758 | page_offset = offset & (page_size - 1); | 2758 | page_offset = offset & (page_size - 1); |
| 2759 | size = min_t(unsigned int, page_size - page_offset, len); | 2759 | size = min_t(unsigned int, page_size - page_offset, len); |
| 2760 | 2760 | ||
| 2761 | memcpy(pages[nr] + page_offset, buf, size); | 2761 | memcpy(pages[nr] + page_offset, buf, size); |
| 2762 | 2762 | ||
| 2763 | len -= size; | 2763 | len -= size; |
| 2764 | buf += size; | 2764 | buf += size; |
| 2765 | offset += size; | 2765 | offset += size; |
| 2766 | } while (len); | 2766 | } while (len); |
| 2767 | 2767 | ||
| 2768 | handle->offset = offset; | 2768 | handle->offset = offset; |
| 2769 | 2769 | ||
| 2770 | /* | 2770 | /* |
| 2771 | * Check we didn't copy past our reservation window, taking the | 2771 | * Check we didn't copy past our reservation window, taking the |
| 2772 | * possible unsigned int wrap into account. | 2772 | * possible unsigned int wrap into account. |
| 2773 | */ | 2773 | */ |
| 2774 | WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0); | 2774 | WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0); |
| 2775 | } | 2775 | } |
| 2776 | 2776 | ||
| 2777 | int perf_output_begin(struct perf_output_handle *handle, | 2777 | int perf_output_begin(struct perf_output_handle *handle, |
| 2778 | struct perf_event *event, unsigned int size, | 2778 | struct perf_event *event, unsigned int size, |
| 2779 | int nmi, int sample) | 2779 | int nmi, int sample) |
| 2780 | { | 2780 | { |
| 2781 | struct perf_event *output_event; | 2781 | struct perf_event *output_event; |
| 2782 | struct perf_mmap_data *data; | 2782 | struct perf_mmap_data *data; |
| 2783 | unsigned long tail, offset, head; | 2783 | unsigned long tail, offset, head; |
| 2784 | int have_lost; | 2784 | int have_lost; |
| 2785 | struct { | 2785 | struct { |
| 2786 | struct perf_event_header header; | 2786 | struct perf_event_header header; |
| 2787 | u64 id; | 2787 | u64 id; |
| 2788 | u64 lost; | 2788 | u64 lost; |
| 2789 | } lost_event; | 2789 | } lost_event; |
| 2790 | 2790 | ||
| 2791 | rcu_read_lock(); | 2791 | rcu_read_lock(); |
| 2792 | /* | 2792 | /* |
| 2793 | * For inherited events we send all the output towards the parent. | 2793 | * For inherited events we send all the output towards the parent. |
| 2794 | */ | 2794 | */ |
| 2795 | if (event->parent) | 2795 | if (event->parent) |
| 2796 | event = event->parent; | 2796 | event = event->parent; |
| 2797 | 2797 | ||
| 2798 | output_event = rcu_dereference(event->output); | 2798 | output_event = rcu_dereference(event->output); |
| 2799 | if (output_event) | 2799 | if (output_event) |
| 2800 | event = output_event; | 2800 | event = output_event; |
| 2801 | 2801 | ||
| 2802 | data = rcu_dereference(event->data); | 2802 | data = rcu_dereference(event->data); |
| 2803 | if (!data) | 2803 | if (!data) |
| 2804 | goto out; | 2804 | goto out; |
| 2805 | 2805 | ||
| 2806 | handle->data = data; | 2806 | handle->data = data; |
| 2807 | handle->event = event; | 2807 | handle->event = event; |
| 2808 | handle->nmi = nmi; | 2808 | handle->nmi = nmi; |
| 2809 | handle->sample = sample; | 2809 | handle->sample = sample; |
| 2810 | 2810 | ||
| 2811 | if (!data->nr_pages) | 2811 | if (!data->nr_pages) |
| 2812 | goto fail; | 2812 | goto fail; |
| 2813 | 2813 | ||
| 2814 | have_lost = atomic_read(&data->lost); | 2814 | have_lost = atomic_read(&data->lost); |
| 2815 | if (have_lost) | 2815 | if (have_lost) |
| 2816 | size += sizeof(lost_event); | 2816 | size += sizeof(lost_event); |
| 2817 | 2817 | ||
| 2818 | perf_output_lock(handle); | 2818 | perf_output_lock(handle); |
| 2819 | 2819 | ||
| 2820 | do { | 2820 | do { |
| 2821 | /* | 2821 | /* |
| 2822 | * Userspace could choose to issue a mb() before updating the | 2822 | * Userspace could choose to issue a mb() before updating the |
| 2823 | * tail pointer. So that all reads will be completed before the | 2823 | * tail pointer. So that all reads will be completed before the |
| 2824 | * write is issued. | 2824 | * write is issued. |
| 2825 | */ | 2825 | */ |
| 2826 | tail = ACCESS_ONCE(data->user_page->data_tail); | 2826 | tail = ACCESS_ONCE(data->user_page->data_tail); |
| 2827 | smp_rmb(); | 2827 | smp_rmb(); |
| 2828 | offset = head = atomic_long_read(&data->head); | 2828 | offset = head = atomic_long_read(&data->head); |
| 2829 | head += size; | 2829 | head += size; |
| 2830 | if (unlikely(!perf_output_space(data, tail, offset, head))) | 2830 | if (unlikely(!perf_output_space(data, tail, offset, head))) |
| 2831 | goto fail; | 2831 | goto fail; |
| 2832 | } while (atomic_long_cmpxchg(&data->head, offset, head) != offset); | 2832 | } while (atomic_long_cmpxchg(&data->head, offset, head) != offset); |
| 2833 | 2833 | ||
| 2834 | handle->offset = offset; | 2834 | handle->offset = offset; |
| 2835 | handle->head = head; | 2835 | handle->head = head; |
| 2836 | 2836 | ||
| 2837 | if (head - tail > data->watermark) | 2837 | if (head - tail > data->watermark) |
| 2838 | atomic_set(&data->wakeup, 1); | 2838 | atomic_set(&data->wakeup, 1); |
| 2839 | 2839 | ||
| 2840 | if (have_lost) { | 2840 | if (have_lost) { |
| 2841 | lost_event.header.type = PERF_RECORD_LOST; | 2841 | lost_event.header.type = PERF_RECORD_LOST; |
| 2842 | lost_event.header.misc = 0; | 2842 | lost_event.header.misc = 0; |
| 2843 | lost_event.header.size = sizeof(lost_event); | 2843 | lost_event.header.size = sizeof(lost_event); |
| 2844 | lost_event.id = event->id; | 2844 | lost_event.id = event->id; |
| 2845 | lost_event.lost = atomic_xchg(&data->lost, 0); | 2845 | lost_event.lost = atomic_xchg(&data->lost, 0); |
| 2846 | 2846 | ||
| 2847 | perf_output_put(handle, lost_event); | 2847 | perf_output_put(handle, lost_event); |
| 2848 | } | 2848 | } |
| 2849 | 2849 | ||
| 2850 | return 0; | 2850 | return 0; |
| 2851 | 2851 | ||
| 2852 | fail: | 2852 | fail: |
| 2853 | atomic_inc(&data->lost); | 2853 | atomic_inc(&data->lost); |
| 2854 | perf_output_unlock(handle); | 2854 | perf_output_unlock(handle); |
| 2855 | out: | 2855 | out: |
| 2856 | rcu_read_unlock(); | 2856 | rcu_read_unlock(); |
| 2857 | 2857 | ||
| 2858 | return -ENOSPC; | 2858 | return -ENOSPC; |
| 2859 | } | 2859 | } |
| 2860 | 2860 | ||
| 2861 | void perf_output_end(struct perf_output_handle *handle) | 2861 | void perf_output_end(struct perf_output_handle *handle) |
| 2862 | { | 2862 | { |
| 2863 | struct perf_event *event = handle->event; | 2863 | struct perf_event *event = handle->event; |
| 2864 | struct perf_mmap_data *data = handle->data; | 2864 | struct perf_mmap_data *data = handle->data; |
| 2865 | 2865 | ||
| 2866 | int wakeup_events = event->attr.wakeup_events; | 2866 | int wakeup_events = event->attr.wakeup_events; |
| 2867 | 2867 | ||
| 2868 | if (handle->sample && wakeup_events) { | 2868 | if (handle->sample && wakeup_events) { |
| 2869 | int events = atomic_inc_return(&data->events); | 2869 | int events = atomic_inc_return(&data->events); |
| 2870 | if (events >= wakeup_events) { | 2870 | if (events >= wakeup_events) { |
| 2871 | atomic_sub(wakeup_events, &data->events); | 2871 | atomic_sub(wakeup_events, &data->events); |
| 2872 | atomic_set(&data->wakeup, 1); | 2872 | atomic_set(&data->wakeup, 1); |
| 2873 | } | 2873 | } |
| 2874 | } | 2874 | } |
| 2875 | 2875 | ||
| 2876 | perf_output_unlock(handle); | 2876 | perf_output_unlock(handle); |
| 2877 | rcu_read_unlock(); | 2877 | rcu_read_unlock(); |
| 2878 | } | 2878 | } |
| 2879 | 2879 | ||
| 2880 | static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) | 2880 | static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) |
| 2881 | { | 2881 | { |
| 2882 | /* | 2882 | /* |
| 2883 | * only top level events have the pid namespace they were created in | 2883 | * only top level events have the pid namespace they were created in |
| 2884 | */ | 2884 | */ |
| 2885 | if (event->parent) | 2885 | if (event->parent) |
| 2886 | event = event->parent; | 2886 | event = event->parent; |
| 2887 | 2887 | ||
| 2888 | return task_tgid_nr_ns(p, event->ns); | 2888 | return task_tgid_nr_ns(p, event->ns); |
| 2889 | } | 2889 | } |
| 2890 | 2890 | ||
| 2891 | static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) | 2891 | static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) |
| 2892 | { | 2892 | { |
| 2893 | /* | 2893 | /* |
| 2894 | * only top level events have the pid namespace they were created in | 2894 | * only top level events have the pid namespace they were created in |
| 2895 | */ | 2895 | */ |
| 2896 | if (event->parent) | 2896 | if (event->parent) |
| 2897 | event = event->parent; | 2897 | event = event->parent; |
| 2898 | 2898 | ||
| 2899 | return task_pid_nr_ns(p, event->ns); | 2899 | return task_pid_nr_ns(p, event->ns); |
| 2900 | } | 2900 | } |
| 2901 | 2901 | ||
| 2902 | static void perf_output_read_one(struct perf_output_handle *handle, | 2902 | static void perf_output_read_one(struct perf_output_handle *handle, |
| 2903 | struct perf_event *event) | 2903 | struct perf_event *event) |
| 2904 | { | 2904 | { |
| 2905 | u64 read_format = event->attr.read_format; | 2905 | u64 read_format = event->attr.read_format; |
| 2906 | u64 values[4]; | 2906 | u64 values[4]; |
| 2907 | int n = 0; | 2907 | int n = 0; |
| 2908 | 2908 | ||
| 2909 | values[n++] = atomic64_read(&event->count); | 2909 | values[n++] = atomic64_read(&event->count); |
| 2910 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | 2910 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { |
| 2911 | values[n++] = event->total_time_enabled + | 2911 | values[n++] = event->total_time_enabled + |
| 2912 | atomic64_read(&event->child_total_time_enabled); | 2912 | atomic64_read(&event->child_total_time_enabled); |
| 2913 | } | 2913 | } |
| 2914 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | 2914 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { |
| 2915 | values[n++] = event->total_time_running + | 2915 | values[n++] = event->total_time_running + |
| 2916 | atomic64_read(&event->child_total_time_running); | 2916 | atomic64_read(&event->child_total_time_running); |
| 2917 | } | 2917 | } |
| 2918 | if (read_format & PERF_FORMAT_ID) | 2918 | if (read_format & PERF_FORMAT_ID) |
| 2919 | values[n++] = primary_event_id(event); | 2919 | values[n++] = primary_event_id(event); |
| 2920 | 2920 | ||
| 2921 | perf_output_copy(handle, values, n * sizeof(u64)); | 2921 | perf_output_copy(handle, values, n * sizeof(u64)); |
| 2922 | } | 2922 | } |
| 2923 | 2923 | ||
| 2924 | /* | 2924 | /* |
| 2925 | * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. | 2925 | * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. |
| 2926 | */ | 2926 | */ |
| 2927 | static void perf_output_read_group(struct perf_output_handle *handle, | 2927 | static void perf_output_read_group(struct perf_output_handle *handle, |
| 2928 | struct perf_event *event) | 2928 | struct perf_event *event) |
| 2929 | { | 2929 | { |
| 2930 | struct perf_event *leader = event->group_leader, *sub; | 2930 | struct perf_event *leader = event->group_leader, *sub; |
| 2931 | u64 read_format = event->attr.read_format; | 2931 | u64 read_format = event->attr.read_format; |
| 2932 | u64 values[5]; | 2932 | u64 values[5]; |
| 2933 | int n = 0; | 2933 | int n = 0; |
| 2934 | 2934 | ||
| 2935 | values[n++] = 1 + leader->nr_siblings; | 2935 | values[n++] = 1 + leader->nr_siblings; |
| 2936 | 2936 | ||
| 2937 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | 2937 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
| 2938 | values[n++] = leader->total_time_enabled; | 2938 | values[n++] = leader->total_time_enabled; |
| 2939 | 2939 | ||
| 2940 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | 2940 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) |
| 2941 | values[n++] = leader->total_time_running; | 2941 | values[n++] = leader->total_time_running; |
| 2942 | 2942 | ||
| 2943 | if (leader != event) | 2943 | if (leader != event) |
| 2944 | leader->pmu->read(leader); | 2944 | leader->pmu->read(leader); |
| 2945 | 2945 | ||
| 2946 | values[n++] = atomic64_read(&leader->count); | 2946 | values[n++] = atomic64_read(&leader->count); |
| 2947 | if (read_format & PERF_FORMAT_ID) | 2947 | if (read_format & PERF_FORMAT_ID) |
| 2948 | values[n++] = primary_event_id(leader); | 2948 | values[n++] = primary_event_id(leader); |
| 2949 | 2949 | ||
| 2950 | perf_output_copy(handle, values, n * sizeof(u64)); | 2950 | perf_output_copy(handle, values, n * sizeof(u64)); |
| 2951 | 2951 | ||
| 2952 | list_for_each_entry(sub, &leader->sibling_list, group_entry) { | 2952 | list_for_each_entry(sub, &leader->sibling_list, group_entry) { |
| 2953 | n = 0; | 2953 | n = 0; |
| 2954 | 2954 | ||
| 2955 | if (sub != event) | 2955 | if (sub != event) |
| 2956 | sub->pmu->read(sub); | 2956 | sub->pmu->read(sub); |
| 2957 | 2957 | ||
| 2958 | values[n++] = atomic64_read(&sub->count); | 2958 | values[n++] = atomic64_read(&sub->count); |
| 2959 | if (read_format & PERF_FORMAT_ID) | 2959 | if (read_format & PERF_FORMAT_ID) |
| 2960 | values[n++] = primary_event_id(sub); | 2960 | values[n++] = primary_event_id(sub); |
| 2961 | 2961 | ||
| 2962 | perf_output_copy(handle, values, n * sizeof(u64)); | 2962 | perf_output_copy(handle, values, n * sizeof(u64)); |
| 2963 | } | 2963 | } |
| 2964 | } | 2964 | } |
| 2965 | 2965 | ||
| 2966 | static void perf_output_read(struct perf_output_handle *handle, | 2966 | static void perf_output_read(struct perf_output_handle *handle, |
| 2967 | struct perf_event *event) | 2967 | struct perf_event *event) |
| 2968 | { | 2968 | { |
| 2969 | if (event->attr.read_format & PERF_FORMAT_GROUP) | 2969 | if (event->attr.read_format & PERF_FORMAT_GROUP) |
| 2970 | perf_output_read_group(handle, event); | 2970 | perf_output_read_group(handle, event); |
| 2971 | else | 2971 | else |
| 2972 | perf_output_read_one(handle, event); | 2972 | perf_output_read_one(handle, event); |
| 2973 | } | 2973 | } |
| 2974 | 2974 | ||
| 2975 | void perf_output_sample(struct perf_output_handle *handle, | 2975 | void perf_output_sample(struct perf_output_handle *handle, |
| 2976 | struct perf_event_header *header, | 2976 | struct perf_event_header *header, |
| 2977 | struct perf_sample_data *data, | 2977 | struct perf_sample_data *data, |
| 2978 | struct perf_event *event) | 2978 | struct perf_event *event) |
| 2979 | { | 2979 | { |
| 2980 | u64 sample_type = data->type; | 2980 | u64 sample_type = data->type; |
| 2981 | 2981 | ||
| 2982 | perf_output_put(handle, *header); | 2982 | perf_output_put(handle, *header); |
| 2983 | 2983 | ||
| 2984 | if (sample_type & PERF_SAMPLE_IP) | 2984 | if (sample_type & PERF_SAMPLE_IP) |
| 2985 | perf_output_put(handle, data->ip); | 2985 | perf_output_put(handle, data->ip); |
| 2986 | 2986 | ||
| 2987 | if (sample_type & PERF_SAMPLE_TID) | 2987 | if (sample_type & PERF_SAMPLE_TID) |
| 2988 | perf_output_put(handle, data->tid_entry); | 2988 | perf_output_put(handle, data->tid_entry); |
| 2989 | 2989 | ||
| 2990 | if (sample_type & PERF_SAMPLE_TIME) | 2990 | if (sample_type & PERF_SAMPLE_TIME) |
| 2991 | perf_output_put(handle, data->time); | 2991 | perf_output_put(handle, data->time); |
| 2992 | 2992 | ||
| 2993 | if (sample_type & PERF_SAMPLE_ADDR) | 2993 | if (sample_type & PERF_SAMPLE_ADDR) |
| 2994 | perf_output_put(handle, data->addr); | 2994 | perf_output_put(handle, data->addr); |
| 2995 | 2995 | ||
| 2996 | if (sample_type & PERF_SAMPLE_ID) | 2996 | if (sample_type & PERF_SAMPLE_ID) |
| 2997 | perf_output_put(handle, data->id); | 2997 | perf_output_put(handle, data->id); |
| 2998 | 2998 | ||
| 2999 | if (sample_type & PERF_SAMPLE_STREAM_ID) | 2999 | if (sample_type & PERF_SAMPLE_STREAM_ID) |
| 3000 | perf_output_put(handle, data->stream_id); | 3000 | perf_output_put(handle, data->stream_id); |
| 3001 | 3001 | ||
| 3002 | if (sample_type & PERF_SAMPLE_CPU) | 3002 | if (sample_type & PERF_SAMPLE_CPU) |
| 3003 | perf_output_put(handle, data->cpu_entry); | 3003 | perf_output_put(handle, data->cpu_entry); |
| 3004 | 3004 | ||
| 3005 | if (sample_type & PERF_SAMPLE_PERIOD) | 3005 | if (sample_type & PERF_SAMPLE_PERIOD) |
| 3006 | perf_output_put(handle, data->period); | 3006 | perf_output_put(handle, data->period); |
| 3007 | 3007 | ||
| 3008 | if (sample_type & PERF_SAMPLE_READ) | 3008 | if (sample_type & PERF_SAMPLE_READ) |
| 3009 | perf_output_read(handle, event); | 3009 | perf_output_read(handle, event); |
| 3010 | 3010 | ||
| 3011 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | 3011 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
| 3012 | if (data->callchain) { | 3012 | if (data->callchain) { |
| 3013 | int size = 1; | 3013 | int size = 1; |
| 3014 | 3014 | ||
| 3015 | if (data->callchain) | 3015 | if (data->callchain) |
| 3016 | size += data->callchain->nr; | 3016 | size += data->callchain->nr; |
| 3017 | 3017 | ||
| 3018 | size *= sizeof(u64); | 3018 | size *= sizeof(u64); |
| 3019 | 3019 | ||
| 3020 | perf_output_copy(handle, data->callchain, size); | 3020 | perf_output_copy(handle, data->callchain, size); |
| 3021 | } else { | 3021 | } else { |
| 3022 | u64 nr = 0; | 3022 | u64 nr = 0; |
| 3023 | perf_output_put(handle, nr); | 3023 | perf_output_put(handle, nr); |
| 3024 | } | 3024 | } |
| 3025 | } | 3025 | } |
| 3026 | 3026 | ||
| 3027 | if (sample_type & PERF_SAMPLE_RAW) { | 3027 | if (sample_type & PERF_SAMPLE_RAW) { |
| 3028 | if (data->raw) { | 3028 | if (data->raw) { |
| 3029 | perf_output_put(handle, data->raw->size); | 3029 | perf_output_put(handle, data->raw->size); |
| 3030 | perf_output_copy(handle, data->raw->data, | 3030 | perf_output_copy(handle, data->raw->data, |
| 3031 | data->raw->size); | 3031 | data->raw->size); |
| 3032 | } else { | 3032 | } else { |
| 3033 | struct { | 3033 | struct { |
| 3034 | u32 size; | 3034 | u32 size; |
| 3035 | u32 data; | 3035 | u32 data; |
| 3036 | } raw = { | 3036 | } raw = { |
| 3037 | .size = sizeof(u32), | 3037 | .size = sizeof(u32), |
| 3038 | .data = 0, | 3038 | .data = 0, |
| 3039 | }; | 3039 | }; |
| 3040 | perf_output_put(handle, raw); | 3040 | perf_output_put(handle, raw); |
| 3041 | } | 3041 | } |
| 3042 | } | 3042 | } |
| 3043 | } | 3043 | } |
| 3044 | 3044 | ||
| 3045 | void perf_prepare_sample(struct perf_event_header *header, | 3045 | void perf_prepare_sample(struct perf_event_header *header, |
| 3046 | struct perf_sample_data *data, | 3046 | struct perf_sample_data *data, |
| 3047 | struct perf_event *event, | 3047 | struct perf_event *event, |
| 3048 | struct pt_regs *regs) | 3048 | struct pt_regs *regs) |
| 3049 | { | 3049 | { |
| 3050 | u64 sample_type = event->attr.sample_type; | 3050 | u64 sample_type = event->attr.sample_type; |
| 3051 | 3051 | ||
| 3052 | data->type = sample_type; | 3052 | data->type = sample_type; |
| 3053 | 3053 | ||
| 3054 | header->type = PERF_RECORD_SAMPLE; | 3054 | header->type = PERF_RECORD_SAMPLE; |
| 3055 | header->size = sizeof(*header); | 3055 | header->size = sizeof(*header); |
| 3056 | 3056 | ||
| 3057 | header->misc = 0; | 3057 | header->misc = 0; |
| 3058 | header->misc |= perf_misc_flags(regs); | 3058 | header->misc |= perf_misc_flags(regs); |
| 3059 | 3059 | ||
| 3060 | if (sample_type & PERF_SAMPLE_IP) { | 3060 | if (sample_type & PERF_SAMPLE_IP) { |
| 3061 | data->ip = perf_instruction_pointer(regs); | 3061 | data->ip = perf_instruction_pointer(regs); |
| 3062 | 3062 | ||
| 3063 | header->size += sizeof(data->ip); | 3063 | header->size += sizeof(data->ip); |
| 3064 | } | 3064 | } |
| 3065 | 3065 | ||
| 3066 | if (sample_type & PERF_SAMPLE_TID) { | 3066 | if (sample_type & PERF_SAMPLE_TID) { |
| 3067 | /* namespace issues */ | 3067 | /* namespace issues */ |
| 3068 | data->tid_entry.pid = perf_event_pid(event, current); | 3068 | data->tid_entry.pid = perf_event_pid(event, current); |
| 3069 | data->tid_entry.tid = perf_event_tid(event, current); | 3069 | data->tid_entry.tid = perf_event_tid(event, current); |
| 3070 | 3070 | ||
| 3071 | header->size += sizeof(data->tid_entry); | 3071 | header->size += sizeof(data->tid_entry); |
| 3072 | } | 3072 | } |
| 3073 | 3073 | ||
| 3074 | if (sample_type & PERF_SAMPLE_TIME) { | 3074 | if (sample_type & PERF_SAMPLE_TIME) { |
| 3075 | data->time = perf_clock(); | 3075 | data->time = perf_clock(); |
| 3076 | 3076 | ||
| 3077 | header->size += sizeof(data->time); | 3077 | header->size += sizeof(data->time); |
| 3078 | } | 3078 | } |
| 3079 | 3079 | ||
| 3080 | if (sample_type & PERF_SAMPLE_ADDR) | 3080 | if (sample_type & PERF_SAMPLE_ADDR) |
| 3081 | header->size += sizeof(data->addr); | 3081 | header->size += sizeof(data->addr); |
| 3082 | 3082 | ||
| 3083 | if (sample_type & PERF_SAMPLE_ID) { | 3083 | if (sample_type & PERF_SAMPLE_ID) { |
| 3084 | data->id = primary_event_id(event); | 3084 | data->id = primary_event_id(event); |
| 3085 | 3085 | ||
| 3086 | header->size += sizeof(data->id); | 3086 | header->size += sizeof(data->id); |
| 3087 | } | 3087 | } |
| 3088 | 3088 | ||
| 3089 | if (sample_type & PERF_SAMPLE_STREAM_ID) { | 3089 | if (sample_type & PERF_SAMPLE_STREAM_ID) { |
| 3090 | data->stream_id = event->id; | 3090 | data->stream_id = event->id; |
| 3091 | 3091 | ||
| 3092 | header->size += sizeof(data->stream_id); | 3092 | header->size += sizeof(data->stream_id); |
| 3093 | } | 3093 | } |
| 3094 | 3094 | ||
| 3095 | if (sample_type & PERF_SAMPLE_CPU) { | 3095 | if (sample_type & PERF_SAMPLE_CPU) { |
| 3096 | data->cpu_entry.cpu = raw_smp_processor_id(); | 3096 | data->cpu_entry.cpu = raw_smp_processor_id(); |
| 3097 | data->cpu_entry.reserved = 0; | 3097 | data->cpu_entry.reserved = 0; |
| 3098 | 3098 | ||
| 3099 | header->size += sizeof(data->cpu_entry); | 3099 | header->size += sizeof(data->cpu_entry); |
| 3100 | } | 3100 | } |
| 3101 | 3101 | ||
| 3102 | if (sample_type & PERF_SAMPLE_PERIOD) | 3102 | if (sample_type & PERF_SAMPLE_PERIOD) |
| 3103 | header->size += sizeof(data->period); | 3103 | header->size += sizeof(data->period); |
| 3104 | 3104 | ||
| 3105 | if (sample_type & PERF_SAMPLE_READ) | 3105 | if (sample_type & PERF_SAMPLE_READ) |
| 3106 | header->size += perf_event_read_size(event); | 3106 | header->size += perf_event_read_size(event); |
| 3107 | 3107 | ||
| 3108 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | 3108 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
| 3109 | int size = 1; | 3109 | int size = 1; |
| 3110 | 3110 | ||
| 3111 | data->callchain = perf_callchain(regs); | 3111 | data->callchain = perf_callchain(regs); |
| 3112 | 3112 | ||
| 3113 | if (data->callchain) | 3113 | if (data->callchain) |
| 3114 | size += data->callchain->nr; | 3114 | size += data->callchain->nr; |
| 3115 | 3115 | ||
| 3116 | header->size += size * sizeof(u64); | 3116 | header->size += size * sizeof(u64); |
| 3117 | } | 3117 | } |
| 3118 | 3118 | ||
| 3119 | if (sample_type & PERF_SAMPLE_RAW) { | 3119 | if (sample_type & PERF_SAMPLE_RAW) { |
| 3120 | int size = sizeof(u32); | 3120 | int size = sizeof(u32); |
| 3121 | 3121 | ||
| 3122 | if (data->raw) | 3122 | if (data->raw) |
| 3123 | size += data->raw->size; | 3123 | size += data->raw->size; |
| 3124 | else | 3124 | else |
| 3125 | size += sizeof(u32); | 3125 | size += sizeof(u32); |
| 3126 | 3126 | ||
| 3127 | WARN_ON_ONCE(size & (sizeof(u64)-1)); | 3127 | WARN_ON_ONCE(size & (sizeof(u64)-1)); |
| 3128 | header->size += size; | 3128 | header->size += size; |
| 3129 | } | 3129 | } |
| 3130 | } | 3130 | } |
| 3131 | 3131 | ||
| 3132 | static void perf_event_output(struct perf_event *event, int nmi, | 3132 | static void perf_event_output(struct perf_event *event, int nmi, |
| 3133 | struct perf_sample_data *data, | 3133 | struct perf_sample_data *data, |
| 3134 | struct pt_regs *regs) | 3134 | struct pt_regs *regs) |
| 3135 | { | 3135 | { |
| 3136 | struct perf_output_handle handle; | 3136 | struct perf_output_handle handle; |
| 3137 | struct perf_event_header header; | 3137 | struct perf_event_header header; |
| 3138 | 3138 | ||
| 3139 | perf_prepare_sample(&header, data, event, regs); | 3139 | perf_prepare_sample(&header, data, event, regs); |
| 3140 | 3140 | ||
| 3141 | if (perf_output_begin(&handle, event, header.size, nmi, 1)) | 3141 | if (perf_output_begin(&handle, event, header.size, nmi, 1)) |
| 3142 | return; | 3142 | return; |
| 3143 | 3143 | ||
| 3144 | perf_output_sample(&handle, &header, data, event); | 3144 | perf_output_sample(&handle, &header, data, event); |
| 3145 | 3145 | ||
| 3146 | perf_output_end(&handle); | 3146 | perf_output_end(&handle); |
| 3147 | } | 3147 | } |
| 3148 | 3148 | ||
| 3149 | /* | 3149 | /* |
| 3150 | * read event_id | 3150 | * read event_id |
| 3151 | */ | 3151 | */ |
| 3152 | 3152 | ||
| 3153 | struct perf_read_event { | 3153 | struct perf_read_event { |
| 3154 | struct perf_event_header header; | 3154 | struct perf_event_header header; |
| 3155 | 3155 | ||
| 3156 | u32 pid; | 3156 | u32 pid; |
| 3157 | u32 tid; | 3157 | u32 tid; |
| 3158 | }; | 3158 | }; |
| 3159 | 3159 | ||
| 3160 | static void | 3160 | static void |
| 3161 | perf_event_read_event(struct perf_event *event, | 3161 | perf_event_read_event(struct perf_event *event, |
| 3162 | struct task_struct *task) | 3162 | struct task_struct *task) |
| 3163 | { | 3163 | { |
| 3164 | struct perf_output_handle handle; | 3164 | struct perf_output_handle handle; |
| 3165 | struct perf_read_event read_event = { | 3165 | struct perf_read_event read_event = { |
| 3166 | .header = { | 3166 | .header = { |
| 3167 | .type = PERF_RECORD_READ, | 3167 | .type = PERF_RECORD_READ, |
| 3168 | .misc = 0, | 3168 | .misc = 0, |
| 3169 | .size = sizeof(read_event) + perf_event_read_size(event), | 3169 | .size = sizeof(read_event) + perf_event_read_size(event), |
| 3170 | }, | 3170 | }, |
| 3171 | .pid = perf_event_pid(event, task), | 3171 | .pid = perf_event_pid(event, task), |
| 3172 | .tid = perf_event_tid(event, task), | 3172 | .tid = perf_event_tid(event, task), |
| 3173 | }; | 3173 | }; |
| 3174 | int ret; | 3174 | int ret; |
| 3175 | 3175 | ||
| 3176 | ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); | 3176 | ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); |
| 3177 | if (ret) | 3177 | if (ret) |
| 3178 | return; | 3178 | return; |
| 3179 | 3179 | ||
| 3180 | perf_output_put(&handle, read_event); | 3180 | perf_output_put(&handle, read_event); |
| 3181 | perf_output_read(&handle, event); | 3181 | perf_output_read(&handle, event); |
| 3182 | 3182 | ||
| 3183 | perf_output_end(&handle); | 3183 | perf_output_end(&handle); |
| 3184 | } | 3184 | } |
| 3185 | 3185 | ||
| 3186 | /* | 3186 | /* |
| 3187 | * task tracking -- fork/exit | 3187 | * task tracking -- fork/exit |
| 3188 | * | 3188 | * |
| 3189 | * enabled by: attr.comm | attr.mmap | attr.task | 3189 | * enabled by: attr.comm | attr.mmap | attr.task |
| 3190 | */ | 3190 | */ |
| 3191 | 3191 | ||
| 3192 | struct perf_task_event { | 3192 | struct perf_task_event { |
| 3193 | struct task_struct *task; | 3193 | struct task_struct *task; |
| 3194 | struct perf_event_context *task_ctx; | 3194 | struct perf_event_context *task_ctx; |
| 3195 | 3195 | ||
| 3196 | struct { | 3196 | struct { |
| 3197 | struct perf_event_header header; | 3197 | struct perf_event_header header; |
| 3198 | 3198 | ||
| 3199 | u32 pid; | 3199 | u32 pid; |
| 3200 | u32 ppid; | 3200 | u32 ppid; |
| 3201 | u32 tid; | 3201 | u32 tid; |
| 3202 | u32 ptid; | 3202 | u32 ptid; |
| 3203 | u64 time; | 3203 | u64 time; |
| 3204 | } event_id; | 3204 | } event_id; |
| 3205 | }; | 3205 | }; |
| 3206 | 3206 | ||
| 3207 | static void perf_event_task_output(struct perf_event *event, | 3207 | static void perf_event_task_output(struct perf_event *event, |
| 3208 | struct perf_task_event *task_event) | 3208 | struct perf_task_event *task_event) |
| 3209 | { | 3209 | { |
| 3210 | struct perf_output_handle handle; | 3210 | struct perf_output_handle handle; |
| 3211 | int size; | 3211 | int size; |
| 3212 | struct task_struct *task = task_event->task; | 3212 | struct task_struct *task = task_event->task; |
| 3213 | int ret; | 3213 | int ret; |
| 3214 | 3214 | ||
| 3215 | size = task_event->event_id.header.size; | 3215 | size = task_event->event_id.header.size; |
| 3216 | ret = perf_output_begin(&handle, event, size, 0, 0); | 3216 | ret = perf_output_begin(&handle, event, size, 0, 0); |
| 3217 | 3217 | ||
| 3218 | if (ret) | 3218 | if (ret) |
| 3219 | return; | 3219 | return; |
| 3220 | 3220 | ||
| 3221 | task_event->event_id.pid = perf_event_pid(event, task); | 3221 | task_event->event_id.pid = perf_event_pid(event, task); |
| 3222 | task_event->event_id.ppid = perf_event_pid(event, current); | 3222 | task_event->event_id.ppid = perf_event_pid(event, current); |
| 3223 | 3223 | ||
| 3224 | task_event->event_id.tid = perf_event_tid(event, task); | 3224 | task_event->event_id.tid = perf_event_tid(event, task); |
| 3225 | task_event->event_id.ptid = perf_event_tid(event, current); | 3225 | task_event->event_id.ptid = perf_event_tid(event, current); |
| 3226 | 3226 | ||
| 3227 | task_event->event_id.time = perf_clock(); | 3227 | task_event->event_id.time = perf_clock(); |
| 3228 | 3228 | ||
| 3229 | perf_output_put(&handle, task_event->event_id); | 3229 | perf_output_put(&handle, task_event->event_id); |
| 3230 | 3230 | ||
| 3231 | perf_output_end(&handle); | 3231 | perf_output_end(&handle); |
| 3232 | } | 3232 | } |
| 3233 | 3233 | ||
| 3234 | static int perf_event_task_match(struct perf_event *event) | 3234 | static int perf_event_task_match(struct perf_event *event) |
| 3235 | { | 3235 | { |
| 3236 | if (event->attr.comm || event->attr.mmap || event->attr.task) | 3236 | if (event->attr.comm || event->attr.mmap || event->attr.task) |
| 3237 | return 1; | 3237 | return 1; |
| 3238 | 3238 | ||
| 3239 | return 0; | 3239 | return 0; |
| 3240 | } | 3240 | } |
| 3241 | 3241 | ||
| 3242 | static void perf_event_task_ctx(struct perf_event_context *ctx, | 3242 | static void perf_event_task_ctx(struct perf_event_context *ctx, |
| 3243 | struct perf_task_event *task_event) | 3243 | struct perf_task_event *task_event) |
| 3244 | { | 3244 | { |
| 3245 | struct perf_event *event; | 3245 | struct perf_event *event; |
| 3246 | 3246 | ||
| 3247 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | 3247 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) |
| 3248 | return; | 3248 | return; |
| 3249 | 3249 | ||
| 3250 | rcu_read_lock(); | 3250 | rcu_read_lock(); |
| 3251 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 3251 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
| 3252 | if (perf_event_task_match(event)) | 3252 | if (perf_event_task_match(event)) |
| 3253 | perf_event_task_output(event, task_event); | 3253 | perf_event_task_output(event, task_event); |
| 3254 | } | 3254 | } |
| 3255 | rcu_read_unlock(); | 3255 | rcu_read_unlock(); |
| 3256 | } | 3256 | } |
| 3257 | 3257 | ||
| 3258 | static void perf_event_task_event(struct perf_task_event *task_event) | 3258 | static void perf_event_task_event(struct perf_task_event *task_event) |
| 3259 | { | 3259 | { |
| 3260 | struct perf_cpu_context *cpuctx; | 3260 | struct perf_cpu_context *cpuctx; |
| 3261 | struct perf_event_context *ctx = task_event->task_ctx; | 3261 | struct perf_event_context *ctx = task_event->task_ctx; |
| 3262 | 3262 | ||
| 3263 | cpuctx = &get_cpu_var(perf_cpu_context); | 3263 | cpuctx = &get_cpu_var(perf_cpu_context); |
| 3264 | perf_event_task_ctx(&cpuctx->ctx, task_event); | 3264 | perf_event_task_ctx(&cpuctx->ctx, task_event); |
| 3265 | put_cpu_var(perf_cpu_context); | 3265 | put_cpu_var(perf_cpu_context); |
| 3266 | 3266 | ||
| 3267 | rcu_read_lock(); | 3267 | rcu_read_lock(); |
| 3268 | if (!ctx) | 3268 | if (!ctx) |
| 3269 | ctx = rcu_dereference(task_event->task->perf_event_ctxp); | 3269 | ctx = rcu_dereference(task_event->task->perf_event_ctxp); |
| 3270 | if (ctx) | 3270 | if (ctx) |
| 3271 | perf_event_task_ctx(ctx, task_event); | 3271 | perf_event_task_ctx(ctx, task_event); |
| 3272 | rcu_read_unlock(); | 3272 | rcu_read_unlock(); |
| 3273 | } | 3273 | } |
| 3274 | 3274 | ||
| 3275 | static void perf_event_task(struct task_struct *task, | 3275 | static void perf_event_task(struct task_struct *task, |
| 3276 | struct perf_event_context *task_ctx, | 3276 | struct perf_event_context *task_ctx, |
| 3277 | int new) | 3277 | int new) |
| 3278 | { | 3278 | { |
| 3279 | struct perf_task_event task_event; | 3279 | struct perf_task_event task_event; |
| 3280 | 3280 | ||
| 3281 | if (!atomic_read(&nr_comm_events) && | 3281 | if (!atomic_read(&nr_comm_events) && |
| 3282 | !atomic_read(&nr_mmap_events) && | 3282 | !atomic_read(&nr_mmap_events) && |
| 3283 | !atomic_read(&nr_task_events)) | 3283 | !atomic_read(&nr_task_events)) |
| 3284 | return; | 3284 | return; |
| 3285 | 3285 | ||
| 3286 | task_event = (struct perf_task_event){ | 3286 | task_event = (struct perf_task_event){ |
| 3287 | .task = task, | 3287 | .task = task, |
| 3288 | .task_ctx = task_ctx, | 3288 | .task_ctx = task_ctx, |
| 3289 | .event_id = { | 3289 | .event_id = { |
| 3290 | .header = { | 3290 | .header = { |
| 3291 | .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, | 3291 | .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, |
| 3292 | .misc = 0, | 3292 | .misc = 0, |
| 3293 | .size = sizeof(task_event.event_id), | 3293 | .size = sizeof(task_event.event_id), |
| 3294 | }, | 3294 | }, |
| 3295 | /* .pid */ | 3295 | /* .pid */ |
| 3296 | /* .ppid */ | 3296 | /* .ppid */ |
| 3297 | /* .tid */ | 3297 | /* .tid */ |
| 3298 | /* .ptid */ | 3298 | /* .ptid */ |
| 3299 | }, | 3299 | }, |
| 3300 | }; | 3300 | }; |
| 3301 | 3301 | ||
| 3302 | perf_event_task_event(&task_event); | 3302 | perf_event_task_event(&task_event); |
| 3303 | } | 3303 | } |
| 3304 | 3304 | ||
| 3305 | void perf_event_fork(struct task_struct *task) | 3305 | void perf_event_fork(struct task_struct *task) |
| 3306 | { | 3306 | { |
| 3307 | perf_event_task(task, NULL, 1); | 3307 | perf_event_task(task, NULL, 1); |
| 3308 | } | 3308 | } |
| 3309 | 3309 | ||
| 3310 | /* | 3310 | /* |
| 3311 | * comm tracking | 3311 | * comm tracking |
| 3312 | */ | 3312 | */ |
| 3313 | 3313 | ||
| 3314 | struct perf_comm_event { | 3314 | struct perf_comm_event { |
| 3315 | struct task_struct *task; | 3315 | struct task_struct *task; |
| 3316 | char *comm; | 3316 | char *comm; |
| 3317 | int comm_size; | 3317 | int comm_size; |
| 3318 | 3318 | ||
| 3319 | struct { | 3319 | struct { |
| 3320 | struct perf_event_header header; | 3320 | struct perf_event_header header; |
| 3321 | 3321 | ||
| 3322 | u32 pid; | 3322 | u32 pid; |
| 3323 | u32 tid; | 3323 | u32 tid; |
| 3324 | } event_id; | 3324 | } event_id; |
| 3325 | }; | 3325 | }; |
| 3326 | 3326 | ||
| 3327 | static void perf_event_comm_output(struct perf_event *event, | 3327 | static void perf_event_comm_output(struct perf_event *event, |
| 3328 | struct perf_comm_event *comm_event) | 3328 | struct perf_comm_event *comm_event) |
| 3329 | { | 3329 | { |
| 3330 | struct perf_output_handle handle; | 3330 | struct perf_output_handle handle; |
| 3331 | int size = comm_event->event_id.header.size; | 3331 | int size = comm_event->event_id.header.size; |
| 3332 | int ret = perf_output_begin(&handle, event, size, 0, 0); | 3332 | int ret = perf_output_begin(&handle, event, size, 0, 0); |
| 3333 | 3333 | ||
| 3334 | if (ret) | 3334 | if (ret) |
| 3335 | return; | 3335 | return; |
| 3336 | 3336 | ||
| 3337 | comm_event->event_id.pid = perf_event_pid(event, comm_event->task); | 3337 | comm_event->event_id.pid = perf_event_pid(event, comm_event->task); |
| 3338 | comm_event->event_id.tid = perf_event_tid(event, comm_event->task); | 3338 | comm_event->event_id.tid = perf_event_tid(event, comm_event->task); |
| 3339 | 3339 | ||
| 3340 | perf_output_put(&handle, comm_event->event_id); | 3340 | perf_output_put(&handle, comm_event->event_id); |
| 3341 | perf_output_copy(&handle, comm_event->comm, | 3341 | perf_output_copy(&handle, comm_event->comm, |
| 3342 | comm_event->comm_size); | 3342 | comm_event->comm_size); |
| 3343 | perf_output_end(&handle); | 3343 | perf_output_end(&handle); |
| 3344 | } | 3344 | } |
| 3345 | 3345 | ||
| 3346 | static int perf_event_comm_match(struct perf_event *event) | 3346 | static int perf_event_comm_match(struct perf_event *event) |
| 3347 | { | 3347 | { |
| 3348 | if (event->attr.comm) | 3348 | if (event->attr.comm) |
| 3349 | return 1; | 3349 | return 1; |
| 3350 | 3350 | ||
| 3351 | return 0; | 3351 | return 0; |
| 3352 | } | 3352 | } |
| 3353 | 3353 | ||
| 3354 | static void perf_event_comm_ctx(struct perf_event_context *ctx, | 3354 | static void perf_event_comm_ctx(struct perf_event_context *ctx, |
| 3355 | struct perf_comm_event *comm_event) | 3355 | struct perf_comm_event *comm_event) |
| 3356 | { | 3356 | { |
| 3357 | struct perf_event *event; | 3357 | struct perf_event *event; |
| 3358 | 3358 | ||
| 3359 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | 3359 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) |
| 3360 | return; | 3360 | return; |
| 3361 | 3361 | ||
| 3362 | rcu_read_lock(); | 3362 | rcu_read_lock(); |
| 3363 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 3363 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
| 3364 | if (perf_event_comm_match(event)) | 3364 | if (perf_event_comm_match(event)) |
| 3365 | perf_event_comm_output(event, comm_event); | 3365 | perf_event_comm_output(event, comm_event); |
| 3366 | } | 3366 | } |
| 3367 | rcu_read_unlock(); | 3367 | rcu_read_unlock(); |
| 3368 | } | 3368 | } |
| 3369 | 3369 | ||
| 3370 | static void perf_event_comm_event(struct perf_comm_event *comm_event) | 3370 | static void perf_event_comm_event(struct perf_comm_event *comm_event) |
| 3371 | { | 3371 | { |
| 3372 | struct perf_cpu_context *cpuctx; | 3372 | struct perf_cpu_context *cpuctx; |
| 3373 | struct perf_event_context *ctx; | 3373 | struct perf_event_context *ctx; |
| 3374 | unsigned int size; | 3374 | unsigned int size; |
| 3375 | char comm[TASK_COMM_LEN]; | 3375 | char comm[TASK_COMM_LEN]; |
| 3376 | 3376 | ||
| 3377 | memset(comm, 0, sizeof(comm)); | 3377 | memset(comm, 0, sizeof(comm)); |
| 3378 | strncpy(comm, comm_event->task->comm, sizeof(comm)); | 3378 | strncpy(comm, comm_event->task->comm, sizeof(comm)); |
| 3379 | size = ALIGN(strlen(comm)+1, sizeof(u64)); | 3379 | size = ALIGN(strlen(comm)+1, sizeof(u64)); |
| 3380 | 3380 | ||
| 3381 | comm_event->comm = comm; | 3381 | comm_event->comm = comm; |
| 3382 | comm_event->comm_size = size; | 3382 | comm_event->comm_size = size; |
| 3383 | 3383 | ||
| 3384 | comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; | 3384 | comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; |
| 3385 | 3385 | ||
| 3386 | cpuctx = &get_cpu_var(perf_cpu_context); | 3386 | cpuctx = &get_cpu_var(perf_cpu_context); |
| 3387 | perf_event_comm_ctx(&cpuctx->ctx, comm_event); | 3387 | perf_event_comm_ctx(&cpuctx->ctx, comm_event); |
| 3388 | put_cpu_var(perf_cpu_context); | 3388 | put_cpu_var(perf_cpu_context); |
| 3389 | 3389 | ||
| 3390 | rcu_read_lock(); | 3390 | rcu_read_lock(); |
| 3391 | /* | 3391 | /* |
| 3392 | * doesn't really matter which of the child contexts the | 3392 | * doesn't really matter which of the child contexts the |
| 3393 | * events ends up in. | 3393 | * events ends up in. |
| 3394 | */ | 3394 | */ |
| 3395 | ctx = rcu_dereference(current->perf_event_ctxp); | 3395 | ctx = rcu_dereference(current->perf_event_ctxp); |
| 3396 | if (ctx) | 3396 | if (ctx) |
| 3397 | perf_event_comm_ctx(ctx, comm_event); | 3397 | perf_event_comm_ctx(ctx, comm_event); |
| 3398 | rcu_read_unlock(); | 3398 | rcu_read_unlock(); |
| 3399 | } | 3399 | } |
| 3400 | 3400 | ||
| 3401 | void perf_event_comm(struct task_struct *task) | 3401 | void perf_event_comm(struct task_struct *task) |
| 3402 | { | 3402 | { |
| 3403 | struct perf_comm_event comm_event; | 3403 | struct perf_comm_event comm_event; |
| 3404 | 3404 | ||
| 3405 | if (task->perf_event_ctxp) | 3405 | if (task->perf_event_ctxp) |
| 3406 | perf_event_enable_on_exec(task); | 3406 | perf_event_enable_on_exec(task); |
| 3407 | 3407 | ||
| 3408 | if (!atomic_read(&nr_comm_events)) | 3408 | if (!atomic_read(&nr_comm_events)) |
| 3409 | return; | 3409 | return; |
| 3410 | 3410 | ||
| 3411 | comm_event = (struct perf_comm_event){ | 3411 | comm_event = (struct perf_comm_event){ |
| 3412 | .task = task, | 3412 | .task = task, |
| 3413 | /* .comm */ | 3413 | /* .comm */ |
| 3414 | /* .comm_size */ | 3414 | /* .comm_size */ |
| 3415 | .event_id = { | 3415 | .event_id = { |
| 3416 | .header = { | 3416 | .header = { |
| 3417 | .type = PERF_RECORD_COMM, | 3417 | .type = PERF_RECORD_COMM, |
| 3418 | .misc = 0, | 3418 | .misc = 0, |
| 3419 | /* .size */ | 3419 | /* .size */ |
| 3420 | }, | 3420 | }, |
| 3421 | /* .pid */ | 3421 | /* .pid */ |
| 3422 | /* .tid */ | 3422 | /* .tid */ |
| 3423 | }, | 3423 | }, |
| 3424 | }; | 3424 | }; |
| 3425 | 3425 | ||
| 3426 | perf_event_comm_event(&comm_event); | 3426 | perf_event_comm_event(&comm_event); |
| 3427 | } | 3427 | } |
| 3428 | 3428 | ||
| 3429 | /* | 3429 | /* |
| 3430 | * mmap tracking | 3430 | * mmap tracking |
| 3431 | */ | 3431 | */ |
| 3432 | 3432 | ||
| 3433 | struct perf_mmap_event { | 3433 | struct perf_mmap_event { |
| 3434 | struct vm_area_struct *vma; | 3434 | struct vm_area_struct *vma; |
| 3435 | 3435 | ||
| 3436 | const char *file_name; | 3436 | const char *file_name; |
| 3437 | int file_size; | 3437 | int file_size; |
| 3438 | 3438 | ||
| 3439 | struct { | 3439 | struct { |
| 3440 | struct perf_event_header header; | 3440 | struct perf_event_header header; |
| 3441 | 3441 | ||
| 3442 | u32 pid; | 3442 | u32 pid; |
| 3443 | u32 tid; | 3443 | u32 tid; |
| 3444 | u64 start; | 3444 | u64 start; |
| 3445 | u64 len; | 3445 | u64 len; |
| 3446 | u64 pgoff; | 3446 | u64 pgoff; |
| 3447 | } event_id; | 3447 | } event_id; |
| 3448 | }; | 3448 | }; |
| 3449 | 3449 | ||
| 3450 | static void perf_event_mmap_output(struct perf_event *event, | 3450 | static void perf_event_mmap_output(struct perf_event *event, |
| 3451 | struct perf_mmap_event *mmap_event) | 3451 | struct perf_mmap_event *mmap_event) |
| 3452 | { | 3452 | { |
| 3453 | struct perf_output_handle handle; | 3453 | struct perf_output_handle handle; |
| 3454 | int size = mmap_event->event_id.header.size; | 3454 | int size = mmap_event->event_id.header.size; |
| 3455 | int ret = perf_output_begin(&handle, event, size, 0, 0); | 3455 | int ret = perf_output_begin(&handle, event, size, 0, 0); |
| 3456 | 3456 | ||
| 3457 | if (ret) | 3457 | if (ret) |
| 3458 | return; | 3458 | return; |
| 3459 | 3459 | ||
| 3460 | mmap_event->event_id.pid = perf_event_pid(event, current); | 3460 | mmap_event->event_id.pid = perf_event_pid(event, current); |
| 3461 | mmap_event->event_id.tid = perf_event_tid(event, current); | 3461 | mmap_event->event_id.tid = perf_event_tid(event, current); |
| 3462 | 3462 | ||
| 3463 | perf_output_put(&handle, mmap_event->event_id); | 3463 | perf_output_put(&handle, mmap_event->event_id); |
| 3464 | perf_output_copy(&handle, mmap_event->file_name, | 3464 | perf_output_copy(&handle, mmap_event->file_name, |
| 3465 | mmap_event->file_size); | 3465 | mmap_event->file_size); |
| 3466 | perf_output_end(&handle); | 3466 | perf_output_end(&handle); |
| 3467 | } | 3467 | } |
| 3468 | 3468 | ||
| 3469 | static int perf_event_mmap_match(struct perf_event *event, | 3469 | static int perf_event_mmap_match(struct perf_event *event, |
| 3470 | struct perf_mmap_event *mmap_event) | 3470 | struct perf_mmap_event *mmap_event) |
| 3471 | { | 3471 | { |
| 3472 | if (event->attr.mmap) | 3472 | if (event->attr.mmap) |
| 3473 | return 1; | 3473 | return 1; |
| 3474 | 3474 | ||
| 3475 | return 0; | 3475 | return 0; |
| 3476 | } | 3476 | } |
| 3477 | 3477 | ||
| 3478 | static void perf_event_mmap_ctx(struct perf_event_context *ctx, | 3478 | static void perf_event_mmap_ctx(struct perf_event_context *ctx, |
| 3479 | struct perf_mmap_event *mmap_event) | 3479 | struct perf_mmap_event *mmap_event) |
| 3480 | { | 3480 | { |
| 3481 | struct perf_event *event; | 3481 | struct perf_event *event; |
| 3482 | 3482 | ||
| 3483 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | 3483 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) |
| 3484 | return; | 3484 | return; |
| 3485 | 3485 | ||
| 3486 | rcu_read_lock(); | 3486 | rcu_read_lock(); |
| 3487 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 3487 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
| 3488 | if (perf_event_mmap_match(event, mmap_event)) | 3488 | if (perf_event_mmap_match(event, mmap_event)) |
| 3489 | perf_event_mmap_output(event, mmap_event); | 3489 | perf_event_mmap_output(event, mmap_event); |
| 3490 | } | 3490 | } |
| 3491 | rcu_read_unlock(); | 3491 | rcu_read_unlock(); |
| 3492 | } | 3492 | } |
| 3493 | 3493 | ||
| 3494 | static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) | 3494 | static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) |
| 3495 | { | 3495 | { |
| 3496 | struct perf_cpu_context *cpuctx; | 3496 | struct perf_cpu_context *cpuctx; |
| 3497 | struct perf_event_context *ctx; | 3497 | struct perf_event_context *ctx; |
| 3498 | struct vm_area_struct *vma = mmap_event->vma; | 3498 | struct vm_area_struct *vma = mmap_event->vma; |
| 3499 | struct file *file = vma->vm_file; | 3499 | struct file *file = vma->vm_file; |
| 3500 | unsigned int size; | 3500 | unsigned int size; |
| 3501 | char tmp[16]; | 3501 | char tmp[16]; |
| 3502 | char *buf = NULL; | 3502 | char *buf = NULL; |
| 3503 | const char *name; | 3503 | const char *name; |
| 3504 | 3504 | ||
| 3505 | memset(tmp, 0, sizeof(tmp)); | 3505 | memset(tmp, 0, sizeof(tmp)); |
| 3506 | 3506 | ||
| 3507 | if (file) { | 3507 | if (file) { |
| 3508 | /* | 3508 | /* |
| 3509 | * d_path works from the end of the buffer backwards, so we | 3509 | * d_path works from the end of the buffer backwards, so we |
| 3510 | * need to add enough zero bytes after the string to handle | 3510 | * need to add enough zero bytes after the string to handle |
| 3511 | * the 64bit alignment we do later. | 3511 | * the 64bit alignment we do later. |
| 3512 | */ | 3512 | */ |
| 3513 | buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); | 3513 | buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); |
| 3514 | if (!buf) { | 3514 | if (!buf) { |
| 3515 | name = strncpy(tmp, "//enomem", sizeof(tmp)); | 3515 | name = strncpy(tmp, "//enomem", sizeof(tmp)); |
| 3516 | goto got_name; | 3516 | goto got_name; |
| 3517 | } | 3517 | } |
| 3518 | name = d_path(&file->f_path, buf, PATH_MAX); | 3518 | name = d_path(&file->f_path, buf, PATH_MAX); |
| 3519 | if (IS_ERR(name)) { | 3519 | if (IS_ERR(name)) { |
| 3520 | name = strncpy(tmp, "//toolong", sizeof(tmp)); | 3520 | name = strncpy(tmp, "//toolong", sizeof(tmp)); |
| 3521 | goto got_name; | 3521 | goto got_name; |
| 3522 | } | 3522 | } |
| 3523 | } else { | 3523 | } else { |
| 3524 | if (arch_vma_name(mmap_event->vma)) { | 3524 | if (arch_vma_name(mmap_event->vma)) { |
| 3525 | name = strncpy(tmp, arch_vma_name(mmap_event->vma), | 3525 | name = strncpy(tmp, arch_vma_name(mmap_event->vma), |
| 3526 | sizeof(tmp)); | 3526 | sizeof(tmp)); |
| 3527 | goto got_name; | 3527 | goto got_name; |
| 3528 | } | 3528 | } |
| 3529 | 3529 | ||
| 3530 | if (!vma->vm_mm) { | 3530 | if (!vma->vm_mm) { |
| 3531 | name = strncpy(tmp, "[vdso]", sizeof(tmp)); | 3531 | name = strncpy(tmp, "[vdso]", sizeof(tmp)); |
| 3532 | goto got_name; | 3532 | goto got_name; |
| 3533 | } | 3533 | } |
| 3534 | 3534 | ||
| 3535 | name = strncpy(tmp, "//anon", sizeof(tmp)); | 3535 | name = strncpy(tmp, "//anon", sizeof(tmp)); |
| 3536 | goto got_name; | 3536 | goto got_name; |
| 3537 | } | 3537 | } |
| 3538 | 3538 | ||
| 3539 | got_name: | 3539 | got_name: |
| 3540 | size = ALIGN(strlen(name)+1, sizeof(u64)); | 3540 | size = ALIGN(strlen(name)+1, sizeof(u64)); |
| 3541 | 3541 | ||
| 3542 | mmap_event->file_name = name; | 3542 | mmap_event->file_name = name; |
| 3543 | mmap_event->file_size = size; | 3543 | mmap_event->file_size = size; |
| 3544 | 3544 | ||
| 3545 | mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; | 3545 | mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; |
| 3546 | 3546 | ||
| 3547 | cpuctx = &get_cpu_var(perf_cpu_context); | 3547 | cpuctx = &get_cpu_var(perf_cpu_context); |
| 3548 | perf_event_mmap_ctx(&cpuctx->ctx, mmap_event); | 3548 | perf_event_mmap_ctx(&cpuctx->ctx, mmap_event); |
| 3549 | put_cpu_var(perf_cpu_context); | 3549 | put_cpu_var(perf_cpu_context); |
| 3550 | 3550 | ||
| 3551 | rcu_read_lock(); | 3551 | rcu_read_lock(); |
| 3552 | /* | 3552 | /* |
| 3553 | * doesn't really matter which of the child contexts the | 3553 | * doesn't really matter which of the child contexts the |
| 3554 | * events ends up in. | 3554 | * events ends up in. |
| 3555 | */ | 3555 | */ |
| 3556 | ctx = rcu_dereference(current->perf_event_ctxp); | 3556 | ctx = rcu_dereference(current->perf_event_ctxp); |
| 3557 | if (ctx) | 3557 | if (ctx) |
| 3558 | perf_event_mmap_ctx(ctx, mmap_event); | 3558 | perf_event_mmap_ctx(ctx, mmap_event); |
| 3559 | rcu_read_unlock(); | 3559 | rcu_read_unlock(); |
| 3560 | 3560 | ||
| 3561 | kfree(buf); | 3561 | kfree(buf); |
| 3562 | } | 3562 | } |
| 3563 | 3563 | ||
| 3564 | void __perf_event_mmap(struct vm_area_struct *vma) | 3564 | void __perf_event_mmap(struct vm_area_struct *vma) |
| 3565 | { | 3565 | { |
| 3566 | struct perf_mmap_event mmap_event; | 3566 | struct perf_mmap_event mmap_event; |
| 3567 | 3567 | ||
| 3568 | if (!atomic_read(&nr_mmap_events)) | 3568 | if (!atomic_read(&nr_mmap_events)) |
| 3569 | return; | 3569 | return; |
| 3570 | 3570 | ||
| 3571 | mmap_event = (struct perf_mmap_event){ | 3571 | mmap_event = (struct perf_mmap_event){ |
| 3572 | .vma = vma, | 3572 | .vma = vma, |
| 3573 | /* .file_name */ | 3573 | /* .file_name */ |
| 3574 | /* .file_size */ | 3574 | /* .file_size */ |
| 3575 | .event_id = { | 3575 | .event_id = { |
| 3576 | .header = { | 3576 | .header = { |
| 3577 | .type = PERF_RECORD_MMAP, | 3577 | .type = PERF_RECORD_MMAP, |
| 3578 | .misc = 0, | 3578 | .misc = 0, |
| 3579 | /* .size */ | 3579 | /* .size */ |
| 3580 | }, | 3580 | }, |
| 3581 | /* .pid */ | 3581 | /* .pid */ |
| 3582 | /* .tid */ | 3582 | /* .tid */ |
| 3583 | .start = vma->vm_start, | 3583 | .start = vma->vm_start, |
| 3584 | .len = vma->vm_end - vma->vm_start, | 3584 | .len = vma->vm_end - vma->vm_start, |
| 3585 | .pgoff = vma->vm_pgoff, | 3585 | .pgoff = vma->vm_pgoff, |
| 3586 | }, | 3586 | }, |
| 3587 | }; | 3587 | }; |
| 3588 | 3588 | ||
| 3589 | perf_event_mmap_event(&mmap_event); | 3589 | perf_event_mmap_event(&mmap_event); |
| 3590 | } | 3590 | } |
| 3591 | 3591 | ||
| 3592 | /* | 3592 | /* |
| 3593 | * IRQ throttle logging | 3593 | * IRQ throttle logging |
| 3594 | */ | 3594 | */ |
| 3595 | 3595 | ||
| 3596 | static void perf_log_throttle(struct perf_event *event, int enable) | 3596 | static void perf_log_throttle(struct perf_event *event, int enable) |
| 3597 | { | 3597 | { |
| 3598 | struct perf_output_handle handle; | 3598 | struct perf_output_handle handle; |
| 3599 | int ret; | 3599 | int ret; |
| 3600 | 3600 | ||
| 3601 | struct { | 3601 | struct { |
| 3602 | struct perf_event_header header; | 3602 | struct perf_event_header header; |
| 3603 | u64 time; | 3603 | u64 time; |
| 3604 | u64 id; | 3604 | u64 id; |
| 3605 | u64 stream_id; | 3605 | u64 stream_id; |
| 3606 | } throttle_event = { | 3606 | } throttle_event = { |
| 3607 | .header = { | 3607 | .header = { |
| 3608 | .type = PERF_RECORD_THROTTLE, | 3608 | .type = PERF_RECORD_THROTTLE, |
| 3609 | .misc = 0, | 3609 | .misc = 0, |
| 3610 | .size = sizeof(throttle_event), | 3610 | .size = sizeof(throttle_event), |
| 3611 | }, | 3611 | }, |
| 3612 | .time = perf_clock(), | 3612 | .time = perf_clock(), |
| 3613 | .id = primary_event_id(event), | 3613 | .id = primary_event_id(event), |
| 3614 | .stream_id = event->id, | 3614 | .stream_id = event->id, |
| 3615 | }; | 3615 | }; |
| 3616 | 3616 | ||
| 3617 | if (enable) | 3617 | if (enable) |
| 3618 | throttle_event.header.type = PERF_RECORD_UNTHROTTLE; | 3618 | throttle_event.header.type = PERF_RECORD_UNTHROTTLE; |
| 3619 | 3619 | ||
| 3620 | ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0); | 3620 | ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0); |
| 3621 | if (ret) | 3621 | if (ret) |
| 3622 | return; | 3622 | return; |
| 3623 | 3623 | ||
| 3624 | perf_output_put(&handle, throttle_event); | 3624 | perf_output_put(&handle, throttle_event); |
| 3625 | perf_output_end(&handle); | 3625 | perf_output_end(&handle); |
| 3626 | } | 3626 | } |
| 3627 | 3627 | ||
| 3628 | /* | 3628 | /* |
| 3629 | * Generic event overflow handling, sampling. | 3629 | * Generic event overflow handling, sampling. |
| 3630 | */ | 3630 | */ |
| 3631 | 3631 | ||
| 3632 | static int __perf_event_overflow(struct perf_event *event, int nmi, | 3632 | static int __perf_event_overflow(struct perf_event *event, int nmi, |
| 3633 | int throttle, struct perf_sample_data *data, | 3633 | int throttle, struct perf_sample_data *data, |
| 3634 | struct pt_regs *regs) | 3634 | struct pt_regs *regs) |
| 3635 | { | 3635 | { |
| 3636 | int events = atomic_read(&event->event_limit); | 3636 | int events = atomic_read(&event->event_limit); |
| 3637 | struct hw_perf_event *hwc = &event->hw; | 3637 | struct hw_perf_event *hwc = &event->hw; |
| 3638 | int ret = 0; | 3638 | int ret = 0; |
| 3639 | 3639 | ||
| 3640 | throttle = (throttle && event->pmu->unthrottle != NULL); | 3640 | throttle = (throttle && event->pmu->unthrottle != NULL); |
| 3641 | 3641 | ||
| 3642 | if (!throttle) { | 3642 | if (!throttle) { |
| 3643 | hwc->interrupts++; | 3643 | hwc->interrupts++; |
| 3644 | } else { | 3644 | } else { |
| 3645 | if (hwc->interrupts != MAX_INTERRUPTS) { | 3645 | if (hwc->interrupts != MAX_INTERRUPTS) { |
| 3646 | hwc->interrupts++; | 3646 | hwc->interrupts++; |
| 3647 | if (HZ * hwc->interrupts > | 3647 | if (HZ * hwc->interrupts > |
| 3648 | (u64)sysctl_perf_event_sample_rate) { | 3648 | (u64)sysctl_perf_event_sample_rate) { |
| 3649 | hwc->interrupts = MAX_INTERRUPTS; | 3649 | hwc->interrupts = MAX_INTERRUPTS; |
| 3650 | perf_log_throttle(event, 0); | 3650 | perf_log_throttle(event, 0); |
| 3651 | ret = 1; | 3651 | ret = 1; |
| 3652 | } | 3652 | } |
| 3653 | } else { | 3653 | } else { |
| 3654 | /* | 3654 | /* |
| 3655 | * Keep re-disabling events even though on the previous | 3655 | * Keep re-disabling events even though on the previous |
| 3656 | * pass we disabled it - just in case we raced with a | 3656 | * pass we disabled it - just in case we raced with a |
| 3657 | * sched-in and the event got enabled again: | 3657 | * sched-in and the event got enabled again: |
| 3658 | */ | 3658 | */ |
| 3659 | ret = 1; | 3659 | ret = 1; |
| 3660 | } | 3660 | } |
| 3661 | } | 3661 | } |
| 3662 | 3662 | ||
| 3663 | if (event->attr.freq) { | 3663 | if (event->attr.freq) { |
| 3664 | u64 now = perf_clock(); | 3664 | u64 now = perf_clock(); |
| 3665 | s64 delta = now - hwc->freq_stamp; | 3665 | s64 delta = now - hwc->freq_stamp; |
| 3666 | 3666 | ||
| 3667 | hwc->freq_stamp = now; | 3667 | hwc->freq_stamp = now; |
| 3668 | 3668 | ||
| 3669 | if (delta > 0 && delta < TICK_NSEC) | 3669 | if (delta > 0 && delta < TICK_NSEC) |
| 3670 | perf_adjust_period(event, NSEC_PER_SEC / (int)delta); | 3670 | perf_adjust_period(event, NSEC_PER_SEC / (int)delta); |
| 3671 | } | 3671 | } |
| 3672 | 3672 | ||
| 3673 | /* | 3673 | /* |
| 3674 | * XXX event_limit might not quite work as expected on inherited | 3674 | * XXX event_limit might not quite work as expected on inherited |
| 3675 | * events | 3675 | * events |
| 3676 | */ | 3676 | */ |
| 3677 | 3677 | ||
| 3678 | event->pending_kill = POLL_IN; | 3678 | event->pending_kill = POLL_IN; |
| 3679 | if (events && atomic_dec_and_test(&event->event_limit)) { | 3679 | if (events && atomic_dec_and_test(&event->event_limit)) { |
| 3680 | ret = 1; | 3680 | ret = 1; |
| 3681 | event->pending_kill = POLL_HUP; | 3681 | event->pending_kill = POLL_HUP; |
| 3682 | if (nmi) { | 3682 | if (nmi) { |
| 3683 | event->pending_disable = 1; | 3683 | event->pending_disable = 1; |
| 3684 | perf_pending_queue(&event->pending, | 3684 | perf_pending_queue(&event->pending, |
| 3685 | perf_pending_event); | 3685 | perf_pending_event); |
| 3686 | } else | 3686 | } else |
| 3687 | perf_event_disable(event); | 3687 | perf_event_disable(event); |
| 3688 | } | 3688 | } |
| 3689 | 3689 | ||
| 3690 | perf_event_output(event, nmi, data, regs); | 3690 | perf_event_output(event, nmi, data, regs); |
| 3691 | return ret; | 3691 | return ret; |
| 3692 | } | 3692 | } |
| 3693 | 3693 | ||
| 3694 | int perf_event_overflow(struct perf_event *event, int nmi, | 3694 | int perf_event_overflow(struct perf_event *event, int nmi, |
| 3695 | struct perf_sample_data *data, | 3695 | struct perf_sample_data *data, |
| 3696 | struct pt_regs *regs) | 3696 | struct pt_regs *regs) |
| 3697 | { | 3697 | { |
| 3698 | return __perf_event_overflow(event, nmi, 1, data, regs); | 3698 | return __perf_event_overflow(event, nmi, 1, data, regs); |
| 3699 | } | 3699 | } |
| 3700 | 3700 | ||
| 3701 | /* | 3701 | /* |
| 3702 | * Generic software event infrastructure | 3702 | * Generic software event infrastructure |
| 3703 | */ | 3703 | */ |
| 3704 | 3704 | ||
| 3705 | /* | 3705 | /* |
| 3706 | * We directly increment event->count and keep a second value in | 3706 | * We directly increment event->count and keep a second value in |
| 3707 | * event->hw.period_left to count intervals. This period event | 3707 | * event->hw.period_left to count intervals. This period event |
| 3708 | * is kept in the range [-sample_period, 0] so that we can use the | 3708 | * is kept in the range [-sample_period, 0] so that we can use the |
| 3709 | * sign as trigger. | 3709 | * sign as trigger. |
| 3710 | */ | 3710 | */ |
| 3711 | 3711 | ||
| 3712 | static u64 perf_swevent_set_period(struct perf_event *event) | 3712 | static u64 perf_swevent_set_period(struct perf_event *event) |
| 3713 | { | 3713 | { |
| 3714 | struct hw_perf_event *hwc = &event->hw; | 3714 | struct hw_perf_event *hwc = &event->hw; |
| 3715 | u64 period = hwc->last_period; | 3715 | u64 period = hwc->last_period; |
| 3716 | u64 nr, offset; | 3716 | u64 nr, offset; |
| 3717 | s64 old, val; | 3717 | s64 old, val; |
| 3718 | 3718 | ||
| 3719 | hwc->last_period = hwc->sample_period; | 3719 | hwc->last_period = hwc->sample_period; |
| 3720 | 3720 | ||
| 3721 | again: | 3721 | again: |
| 3722 | old = val = atomic64_read(&hwc->period_left); | 3722 | old = val = atomic64_read(&hwc->period_left); |
| 3723 | if (val < 0) | 3723 | if (val < 0) |
| 3724 | return 0; | 3724 | return 0; |
| 3725 | 3725 | ||
| 3726 | nr = div64_u64(period + val, period); | 3726 | nr = div64_u64(period + val, period); |
| 3727 | offset = nr * period; | 3727 | offset = nr * period; |
| 3728 | val -= offset; | 3728 | val -= offset; |
| 3729 | if (atomic64_cmpxchg(&hwc->period_left, old, val) != old) | 3729 | if (atomic64_cmpxchg(&hwc->period_left, old, val) != old) |
| 3730 | goto again; | 3730 | goto again; |
| 3731 | 3731 | ||
| 3732 | return nr; | 3732 | return nr; |
| 3733 | } | 3733 | } |
| 3734 | 3734 | ||
| 3735 | static void perf_swevent_overflow(struct perf_event *event, | 3735 | static void perf_swevent_overflow(struct perf_event *event, |
| 3736 | int nmi, struct perf_sample_data *data, | 3736 | int nmi, struct perf_sample_data *data, |
| 3737 | struct pt_regs *regs) | 3737 | struct pt_regs *regs) |
| 3738 | { | 3738 | { |
| 3739 | struct hw_perf_event *hwc = &event->hw; | 3739 | struct hw_perf_event *hwc = &event->hw; |
| 3740 | int throttle = 0; | 3740 | int throttle = 0; |
| 3741 | u64 overflow; | 3741 | u64 overflow; |
| 3742 | 3742 | ||
| 3743 | data->period = event->hw.last_period; | 3743 | data->period = event->hw.last_period; |
| 3744 | overflow = perf_swevent_set_period(event); | 3744 | overflow = perf_swevent_set_period(event); |
| 3745 | 3745 | ||
| 3746 | if (hwc->interrupts == MAX_INTERRUPTS) | 3746 | if (hwc->interrupts == MAX_INTERRUPTS) |
| 3747 | return; | 3747 | return; |
| 3748 | 3748 | ||
| 3749 | for (; overflow; overflow--) { | 3749 | for (; overflow; overflow--) { |
| 3750 | if (__perf_event_overflow(event, nmi, throttle, | 3750 | if (__perf_event_overflow(event, nmi, throttle, |
| 3751 | data, regs)) { | 3751 | data, regs)) { |
| 3752 | /* | 3752 | /* |
| 3753 | * We inhibit the overflow from happening when | 3753 | * We inhibit the overflow from happening when |
| 3754 | * hwc->interrupts == MAX_INTERRUPTS. | 3754 | * hwc->interrupts == MAX_INTERRUPTS. |
| 3755 | */ | 3755 | */ |
| 3756 | break; | 3756 | break; |
| 3757 | } | 3757 | } |
| 3758 | throttle = 1; | 3758 | throttle = 1; |
| 3759 | } | 3759 | } |
| 3760 | } | 3760 | } |
| 3761 | 3761 | ||
| 3762 | static void perf_swevent_unthrottle(struct perf_event *event) | 3762 | static void perf_swevent_unthrottle(struct perf_event *event) |
| 3763 | { | 3763 | { |
| 3764 | /* | 3764 | /* |
| 3765 | * Nothing to do, we already reset hwc->interrupts. | 3765 | * Nothing to do, we already reset hwc->interrupts. |
| 3766 | */ | 3766 | */ |
| 3767 | } | 3767 | } |
| 3768 | 3768 | ||
| 3769 | static void perf_swevent_add(struct perf_event *event, u64 nr, | 3769 | static void perf_swevent_add(struct perf_event *event, u64 nr, |
| 3770 | int nmi, struct perf_sample_data *data, | 3770 | int nmi, struct perf_sample_data *data, |
| 3771 | struct pt_regs *regs) | 3771 | struct pt_regs *regs) |
| 3772 | { | 3772 | { |
| 3773 | struct hw_perf_event *hwc = &event->hw; | 3773 | struct hw_perf_event *hwc = &event->hw; |
| 3774 | 3774 | ||
| 3775 | atomic64_add(nr, &event->count); | 3775 | atomic64_add(nr, &event->count); |
| 3776 | 3776 | ||
| 3777 | if (!hwc->sample_period) | 3777 | if (!hwc->sample_period) |
| 3778 | return; | 3778 | return; |
| 3779 | 3779 | ||
| 3780 | if (!regs) | 3780 | if (!regs) |
| 3781 | return; | 3781 | return; |
| 3782 | 3782 | ||
| 3783 | if (!atomic64_add_negative(nr, &hwc->period_left)) | 3783 | if (!atomic64_add_negative(nr, &hwc->period_left)) |
| 3784 | perf_swevent_overflow(event, nmi, data, regs); | 3784 | perf_swevent_overflow(event, nmi, data, regs); |
| 3785 | } | 3785 | } |
| 3786 | 3786 | ||
| 3787 | static int perf_swevent_is_counting(struct perf_event *event) | 3787 | static int perf_swevent_is_counting(struct perf_event *event) |
| 3788 | { | 3788 | { |
| 3789 | /* | 3789 | /* |
| 3790 | * The event is active, we're good! | 3790 | * The event is active, we're good! |
| 3791 | */ | 3791 | */ |
| 3792 | if (event->state == PERF_EVENT_STATE_ACTIVE) | 3792 | if (event->state == PERF_EVENT_STATE_ACTIVE) |
| 3793 | return 1; | 3793 | return 1; |
| 3794 | 3794 | ||
| 3795 | /* | 3795 | /* |
| 3796 | * The event is off/error, not counting. | 3796 | * The event is off/error, not counting. |
| 3797 | */ | 3797 | */ |
| 3798 | if (event->state != PERF_EVENT_STATE_INACTIVE) | 3798 | if (event->state != PERF_EVENT_STATE_INACTIVE) |
| 3799 | return 0; | 3799 | return 0; |
| 3800 | 3800 | ||
| 3801 | /* | 3801 | /* |
| 3802 | * The event is inactive, if the context is active | 3802 | * The event is inactive, if the context is active |
| 3803 | * we're part of a group that didn't make it on the 'pmu', | 3803 | * we're part of a group that didn't make it on the 'pmu', |
| 3804 | * not counting. | 3804 | * not counting. |
| 3805 | */ | 3805 | */ |
| 3806 | if (event->ctx->is_active) | 3806 | if (event->ctx->is_active) |
| 3807 | return 0; | 3807 | return 0; |
| 3808 | 3808 | ||
| 3809 | /* | 3809 | /* |
| 3810 | * We're inactive and the context is too, this means the | 3810 | * We're inactive and the context is too, this means the |
| 3811 | * task is scheduled out, we're counting events that happen | 3811 | * task is scheduled out, we're counting events that happen |
| 3812 | * to us, like migration events. | 3812 | * to us, like migration events. |
| 3813 | */ | 3813 | */ |
| 3814 | return 1; | 3814 | return 1; |
| 3815 | } | 3815 | } |
| 3816 | 3816 | ||
| 3817 | static int perf_tp_event_match(struct perf_event *event, | 3817 | static int perf_tp_event_match(struct perf_event *event, |
| 3818 | struct perf_sample_data *data); | 3818 | struct perf_sample_data *data); |
| 3819 | 3819 | ||
| 3820 | static int perf_swevent_match(struct perf_event *event, | 3820 | static int perf_swevent_match(struct perf_event *event, |
| 3821 | enum perf_type_id type, | 3821 | enum perf_type_id type, |
| 3822 | u32 event_id, | 3822 | u32 event_id, |
| 3823 | struct perf_sample_data *data, | 3823 | struct perf_sample_data *data, |
| 3824 | struct pt_regs *regs) | 3824 | struct pt_regs *regs) |
| 3825 | { | 3825 | { |
| 3826 | if (!perf_swevent_is_counting(event)) | 3826 | if (!perf_swevent_is_counting(event)) |
| 3827 | return 0; | 3827 | return 0; |
| 3828 | 3828 | ||
| 3829 | if (event->attr.type != type) | 3829 | if (event->attr.type != type) |
| 3830 | return 0; | 3830 | return 0; |
| 3831 | if (event->attr.config != event_id) | 3831 | if (event->attr.config != event_id) |
| 3832 | return 0; | 3832 | return 0; |
| 3833 | 3833 | ||
| 3834 | if (regs) { | 3834 | if (regs) { |
| 3835 | if (event->attr.exclude_user && user_mode(regs)) | 3835 | if (event->attr.exclude_user && user_mode(regs)) |
| 3836 | return 0; | 3836 | return 0; |
| 3837 | 3837 | ||
| 3838 | if (event->attr.exclude_kernel && !user_mode(regs)) | 3838 | if (event->attr.exclude_kernel && !user_mode(regs)) |
| 3839 | return 0; | 3839 | return 0; |
| 3840 | } | 3840 | } |
| 3841 | 3841 | ||
| 3842 | if (event->attr.type == PERF_TYPE_TRACEPOINT && | 3842 | if (event->attr.type == PERF_TYPE_TRACEPOINT && |
| 3843 | !perf_tp_event_match(event, data)) | 3843 | !perf_tp_event_match(event, data)) |
| 3844 | return 0; | 3844 | return 0; |
| 3845 | 3845 | ||
| 3846 | return 1; | 3846 | return 1; |
| 3847 | } | 3847 | } |
| 3848 | 3848 | ||
| 3849 | static void perf_swevent_ctx_event(struct perf_event_context *ctx, | 3849 | static void perf_swevent_ctx_event(struct perf_event_context *ctx, |
| 3850 | enum perf_type_id type, | 3850 | enum perf_type_id type, |
| 3851 | u32 event_id, u64 nr, int nmi, | 3851 | u32 event_id, u64 nr, int nmi, |
| 3852 | struct perf_sample_data *data, | 3852 | struct perf_sample_data *data, |
| 3853 | struct pt_regs *regs) | 3853 | struct pt_regs *regs) |
| 3854 | { | 3854 | { |
| 3855 | struct perf_event *event; | 3855 | struct perf_event *event; |
| 3856 | 3856 | ||
| 3857 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | 3857 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) |
| 3858 | return; | 3858 | return; |
| 3859 | 3859 | ||
| 3860 | rcu_read_lock(); | 3860 | rcu_read_lock(); |
| 3861 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 3861 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
| 3862 | if (perf_swevent_match(event, type, event_id, data, regs)) | 3862 | if (perf_swevent_match(event, type, event_id, data, regs)) |
| 3863 | perf_swevent_add(event, nr, nmi, data, regs); | 3863 | perf_swevent_add(event, nr, nmi, data, regs); |
| 3864 | } | 3864 | } |
| 3865 | rcu_read_unlock(); | 3865 | rcu_read_unlock(); |
| 3866 | } | 3866 | } |
| 3867 | 3867 | ||
| 3868 | static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx) | 3868 | static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx) |
| 3869 | { | 3869 | { |
| 3870 | if (in_nmi()) | 3870 | if (in_nmi()) |
| 3871 | return &cpuctx->recursion[3]; | 3871 | return &cpuctx->recursion[3]; |
| 3872 | 3872 | ||
| 3873 | if (in_irq()) | 3873 | if (in_irq()) |
| 3874 | return &cpuctx->recursion[2]; | 3874 | return &cpuctx->recursion[2]; |
| 3875 | 3875 | ||
| 3876 | if (in_softirq()) | 3876 | if (in_softirq()) |
| 3877 | return &cpuctx->recursion[1]; | 3877 | return &cpuctx->recursion[1]; |
| 3878 | 3878 | ||
| 3879 | return &cpuctx->recursion[0]; | 3879 | return &cpuctx->recursion[0]; |
| 3880 | } | 3880 | } |
| 3881 | 3881 | ||
| 3882 | static void do_perf_sw_event(enum perf_type_id type, u32 event_id, | 3882 | static void do_perf_sw_event(enum perf_type_id type, u32 event_id, |
| 3883 | u64 nr, int nmi, | 3883 | u64 nr, int nmi, |
| 3884 | struct perf_sample_data *data, | 3884 | struct perf_sample_data *data, |
| 3885 | struct pt_regs *regs) | 3885 | struct pt_regs *regs) |
| 3886 | { | 3886 | { |
| 3887 | struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); | 3887 | struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); |
| 3888 | int *recursion = perf_swevent_recursion_context(cpuctx); | 3888 | int *recursion = perf_swevent_recursion_context(cpuctx); |
| 3889 | struct perf_event_context *ctx; | 3889 | struct perf_event_context *ctx; |
| 3890 | 3890 | ||
| 3891 | if (*recursion) | 3891 | if (*recursion) |
| 3892 | goto out; | 3892 | goto out; |
| 3893 | 3893 | ||
| 3894 | (*recursion)++; | 3894 | (*recursion)++; |
| 3895 | barrier(); | 3895 | barrier(); |
| 3896 | 3896 | ||
| 3897 | perf_swevent_ctx_event(&cpuctx->ctx, type, event_id, | 3897 | perf_swevent_ctx_event(&cpuctx->ctx, type, event_id, |
| 3898 | nr, nmi, data, regs); | 3898 | nr, nmi, data, regs); |
| 3899 | rcu_read_lock(); | 3899 | rcu_read_lock(); |
| 3900 | /* | 3900 | /* |
| 3901 | * doesn't really matter which of the child contexts the | 3901 | * doesn't really matter which of the child contexts the |
| 3902 | * events ends up in. | 3902 | * events ends up in. |
| 3903 | */ | 3903 | */ |
| 3904 | ctx = rcu_dereference(current->perf_event_ctxp); | 3904 | ctx = rcu_dereference(current->perf_event_ctxp); |
| 3905 | if (ctx) | 3905 | if (ctx) |
| 3906 | perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs); | 3906 | perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs); |
| 3907 | rcu_read_unlock(); | 3907 | rcu_read_unlock(); |
| 3908 | 3908 | ||
| 3909 | barrier(); | 3909 | barrier(); |
| 3910 | (*recursion)--; | 3910 | (*recursion)--; |
| 3911 | 3911 | ||
| 3912 | out: | 3912 | out: |
| 3913 | put_cpu_var(perf_cpu_context); | 3913 | put_cpu_var(perf_cpu_context); |
| 3914 | } | 3914 | } |
| 3915 | 3915 | ||
| 3916 | void __perf_sw_event(u32 event_id, u64 nr, int nmi, | 3916 | void __perf_sw_event(u32 event_id, u64 nr, int nmi, |
| 3917 | struct pt_regs *regs, u64 addr) | 3917 | struct pt_regs *regs, u64 addr) |
| 3918 | { | 3918 | { |
| 3919 | struct perf_sample_data data = { | 3919 | struct perf_sample_data data = { |
| 3920 | .addr = addr, | 3920 | .addr = addr, |
| 3921 | }; | 3921 | }; |
| 3922 | 3922 | ||
| 3923 | do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, | 3923 | do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, |
| 3924 | &data, regs); | 3924 | &data, regs); |
| 3925 | } | 3925 | } |
| 3926 | 3926 | ||
| 3927 | static void perf_swevent_read(struct perf_event *event) | 3927 | static void perf_swevent_read(struct perf_event *event) |
| 3928 | { | 3928 | { |
| 3929 | } | 3929 | } |
| 3930 | 3930 | ||
| 3931 | static int perf_swevent_enable(struct perf_event *event) | 3931 | static int perf_swevent_enable(struct perf_event *event) |
| 3932 | { | 3932 | { |
| 3933 | struct hw_perf_event *hwc = &event->hw; | 3933 | struct hw_perf_event *hwc = &event->hw; |
| 3934 | 3934 | ||
| 3935 | if (hwc->sample_period) { | 3935 | if (hwc->sample_period) { |
| 3936 | hwc->last_period = hwc->sample_period; | 3936 | hwc->last_period = hwc->sample_period; |
| 3937 | perf_swevent_set_period(event); | 3937 | perf_swevent_set_period(event); |
| 3938 | } | 3938 | } |
| 3939 | return 0; | 3939 | return 0; |
| 3940 | } | 3940 | } |
| 3941 | 3941 | ||
| 3942 | static void perf_swevent_disable(struct perf_event *event) | 3942 | static void perf_swevent_disable(struct perf_event *event) |
| 3943 | { | 3943 | { |
| 3944 | } | 3944 | } |
| 3945 | 3945 | ||
| 3946 | static const struct pmu perf_ops_generic = { | 3946 | static const struct pmu perf_ops_generic = { |
| 3947 | .enable = perf_swevent_enable, | 3947 | .enable = perf_swevent_enable, |
| 3948 | .disable = perf_swevent_disable, | 3948 | .disable = perf_swevent_disable, |
| 3949 | .read = perf_swevent_read, | 3949 | .read = perf_swevent_read, |
| 3950 | .unthrottle = perf_swevent_unthrottle, | 3950 | .unthrottle = perf_swevent_unthrottle, |
| 3951 | }; | 3951 | }; |
| 3952 | 3952 | ||
| 3953 | /* | 3953 | /* |
| 3954 | * hrtimer based swevent callback | 3954 | * hrtimer based swevent callback |
| 3955 | */ | 3955 | */ |
| 3956 | 3956 | ||
| 3957 | static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | 3957 | static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) |
| 3958 | { | 3958 | { |
| 3959 | enum hrtimer_restart ret = HRTIMER_RESTART; | 3959 | enum hrtimer_restart ret = HRTIMER_RESTART; |
| 3960 | struct perf_sample_data data; | 3960 | struct perf_sample_data data; |
| 3961 | struct pt_regs *regs; | 3961 | struct pt_regs *regs; |
| 3962 | struct perf_event *event; | 3962 | struct perf_event *event; |
| 3963 | u64 period; | 3963 | u64 period; |
| 3964 | 3964 | ||
| 3965 | event = container_of(hrtimer, struct perf_event, hw.hrtimer); | 3965 | event = container_of(hrtimer, struct perf_event, hw.hrtimer); |
| 3966 | event->pmu->read(event); | 3966 | event->pmu->read(event); |
| 3967 | 3967 | ||
| 3968 | data.addr = 0; | 3968 | data.addr = 0; |
| 3969 | regs = get_irq_regs(); | 3969 | regs = get_irq_regs(); |
| 3970 | /* | 3970 | /* |
| 3971 | * In case we exclude kernel IPs or are somehow not in interrupt | 3971 | * In case we exclude kernel IPs or are somehow not in interrupt |
| 3972 | * context, provide the next best thing, the user IP. | 3972 | * context, provide the next best thing, the user IP. |
| 3973 | */ | 3973 | */ |
| 3974 | if ((event->attr.exclude_kernel || !regs) && | 3974 | if ((event->attr.exclude_kernel || !regs) && |
| 3975 | !event->attr.exclude_user) | 3975 | !event->attr.exclude_user) |
| 3976 | regs = task_pt_regs(current); | 3976 | regs = task_pt_regs(current); |
| 3977 | 3977 | ||
| 3978 | if (regs) { | 3978 | if (regs) { |
| 3979 | if (!(event->attr.exclude_idle && current->pid == 0)) | 3979 | if (!(event->attr.exclude_idle && current->pid == 0)) |
| 3980 | if (perf_event_overflow(event, 0, &data, regs)) | 3980 | if (perf_event_overflow(event, 0, &data, regs)) |
| 3981 | ret = HRTIMER_NORESTART; | 3981 | ret = HRTIMER_NORESTART; |
| 3982 | } | 3982 | } |
| 3983 | 3983 | ||
| 3984 | period = max_t(u64, 10000, event->hw.sample_period); | 3984 | period = max_t(u64, 10000, event->hw.sample_period); |
| 3985 | hrtimer_forward_now(hrtimer, ns_to_ktime(period)); | 3985 | hrtimer_forward_now(hrtimer, ns_to_ktime(period)); |
| 3986 | 3986 | ||
| 3987 | return ret; | 3987 | return ret; |
| 3988 | } | 3988 | } |
| 3989 | 3989 | ||
| 3990 | static void perf_swevent_start_hrtimer(struct perf_event *event) | 3990 | static void perf_swevent_start_hrtimer(struct perf_event *event) |
| 3991 | { | 3991 | { |
| 3992 | struct hw_perf_event *hwc = &event->hw; | 3992 | struct hw_perf_event *hwc = &event->hw; |
| 3993 | 3993 | ||
| 3994 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 3994 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 3995 | hwc->hrtimer.function = perf_swevent_hrtimer; | 3995 | hwc->hrtimer.function = perf_swevent_hrtimer; |
| 3996 | if (hwc->sample_period) { | 3996 | if (hwc->sample_period) { |
| 3997 | u64 period; | 3997 | u64 period; |
| 3998 | 3998 | ||
| 3999 | if (hwc->remaining) { | 3999 | if (hwc->remaining) { |
| 4000 | if (hwc->remaining < 0) | 4000 | if (hwc->remaining < 0) |
| 4001 | period = 10000; | 4001 | period = 10000; |
| 4002 | else | 4002 | else |
| 4003 | period = hwc->remaining; | 4003 | period = hwc->remaining; |
| 4004 | hwc->remaining = 0; | 4004 | hwc->remaining = 0; |
| 4005 | } else { | 4005 | } else { |
| 4006 | period = max_t(u64, 10000, hwc->sample_period); | 4006 | period = max_t(u64, 10000, hwc->sample_period); |
| 4007 | } | 4007 | } |
| 4008 | __hrtimer_start_range_ns(&hwc->hrtimer, | 4008 | __hrtimer_start_range_ns(&hwc->hrtimer, |
| 4009 | ns_to_ktime(period), 0, | 4009 | ns_to_ktime(period), 0, |
| 4010 | HRTIMER_MODE_REL, 0); | 4010 | HRTIMER_MODE_REL, 0); |
| 4011 | } | 4011 | } |
| 4012 | } | 4012 | } |
| 4013 | 4013 | ||
| 4014 | static void perf_swevent_cancel_hrtimer(struct perf_event *event) | 4014 | static void perf_swevent_cancel_hrtimer(struct perf_event *event) |
| 4015 | { | 4015 | { |
| 4016 | struct hw_perf_event *hwc = &event->hw; | 4016 | struct hw_perf_event *hwc = &event->hw; |
| 4017 | 4017 | ||
| 4018 | if (hwc->sample_period) { | 4018 | if (hwc->sample_period) { |
| 4019 | ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); | 4019 | ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); |
| 4020 | hwc->remaining = ktime_to_ns(remaining); | 4020 | hwc->remaining = ktime_to_ns(remaining); |
| 4021 | 4021 | ||
| 4022 | hrtimer_cancel(&hwc->hrtimer); | 4022 | hrtimer_cancel(&hwc->hrtimer); |
| 4023 | } | 4023 | } |
| 4024 | } | 4024 | } |
| 4025 | 4025 | ||
| 4026 | /* | 4026 | /* |
| 4027 | * Software event: cpu wall time clock | 4027 | * Software event: cpu wall time clock |
| 4028 | */ | 4028 | */ |
| 4029 | 4029 | ||
| 4030 | static void cpu_clock_perf_event_update(struct perf_event *event) | 4030 | static void cpu_clock_perf_event_update(struct perf_event *event) |
| 4031 | { | 4031 | { |
| 4032 | int cpu = raw_smp_processor_id(); | 4032 | int cpu = raw_smp_processor_id(); |
| 4033 | s64 prev; | 4033 | s64 prev; |
| 4034 | u64 now; | 4034 | u64 now; |
| 4035 | 4035 | ||
| 4036 | now = cpu_clock(cpu); | 4036 | now = cpu_clock(cpu); |
| 4037 | prev = atomic64_read(&event->hw.prev_count); | 4037 | prev = atomic64_read(&event->hw.prev_count); |
| 4038 | atomic64_set(&event->hw.prev_count, now); | 4038 | atomic64_set(&event->hw.prev_count, now); |
| 4039 | atomic64_add(now - prev, &event->count); | 4039 | atomic64_add(now - prev, &event->count); |
| 4040 | } | 4040 | } |
| 4041 | 4041 | ||
| 4042 | static int cpu_clock_perf_event_enable(struct perf_event *event) | 4042 | static int cpu_clock_perf_event_enable(struct perf_event *event) |
| 4043 | { | 4043 | { |
| 4044 | struct hw_perf_event *hwc = &event->hw; | 4044 | struct hw_perf_event *hwc = &event->hw; |
| 4045 | int cpu = raw_smp_processor_id(); | 4045 | int cpu = raw_smp_processor_id(); |
| 4046 | 4046 | ||
| 4047 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); | 4047 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); |
| 4048 | perf_swevent_start_hrtimer(event); | 4048 | perf_swevent_start_hrtimer(event); |
| 4049 | 4049 | ||
| 4050 | return 0; | 4050 | return 0; |
| 4051 | } | 4051 | } |
| 4052 | 4052 | ||
| 4053 | static void cpu_clock_perf_event_disable(struct perf_event *event) | 4053 | static void cpu_clock_perf_event_disable(struct perf_event *event) |
| 4054 | { | 4054 | { |
| 4055 | perf_swevent_cancel_hrtimer(event); | 4055 | perf_swevent_cancel_hrtimer(event); |
| 4056 | cpu_clock_perf_event_update(event); | 4056 | cpu_clock_perf_event_update(event); |
| 4057 | } | 4057 | } |
| 4058 | 4058 | ||
| 4059 | static void cpu_clock_perf_event_read(struct perf_event *event) | 4059 | static void cpu_clock_perf_event_read(struct perf_event *event) |
| 4060 | { | 4060 | { |
| 4061 | cpu_clock_perf_event_update(event); | 4061 | cpu_clock_perf_event_update(event); |
| 4062 | } | 4062 | } |
| 4063 | 4063 | ||
| 4064 | static const struct pmu perf_ops_cpu_clock = { | 4064 | static const struct pmu perf_ops_cpu_clock = { |
| 4065 | .enable = cpu_clock_perf_event_enable, | 4065 | .enable = cpu_clock_perf_event_enable, |
| 4066 | .disable = cpu_clock_perf_event_disable, | 4066 | .disable = cpu_clock_perf_event_disable, |
| 4067 | .read = cpu_clock_perf_event_read, | 4067 | .read = cpu_clock_perf_event_read, |
| 4068 | }; | 4068 | }; |
| 4069 | 4069 | ||
| 4070 | /* | 4070 | /* |
| 4071 | * Software event: task time clock | 4071 | * Software event: task time clock |
| 4072 | */ | 4072 | */ |
| 4073 | 4073 | ||
| 4074 | static void task_clock_perf_event_update(struct perf_event *event, u64 now) | 4074 | static void task_clock_perf_event_update(struct perf_event *event, u64 now) |
| 4075 | { | 4075 | { |
| 4076 | u64 prev; | 4076 | u64 prev; |
| 4077 | s64 delta; | 4077 | s64 delta; |
| 4078 | 4078 | ||
| 4079 | prev = atomic64_xchg(&event->hw.prev_count, now); | 4079 | prev = atomic64_xchg(&event->hw.prev_count, now); |
| 4080 | delta = now - prev; | 4080 | delta = now - prev; |
| 4081 | atomic64_add(delta, &event->count); | 4081 | atomic64_add(delta, &event->count); |
| 4082 | } | 4082 | } |
| 4083 | 4083 | ||
| 4084 | static int task_clock_perf_event_enable(struct perf_event *event) | 4084 | static int task_clock_perf_event_enable(struct perf_event *event) |
| 4085 | { | 4085 | { |
| 4086 | struct hw_perf_event *hwc = &event->hw; | 4086 | struct hw_perf_event *hwc = &event->hw; |
| 4087 | u64 now; | 4087 | u64 now; |
| 4088 | 4088 | ||
| 4089 | now = event->ctx->time; | 4089 | now = event->ctx->time; |
| 4090 | 4090 | ||
| 4091 | atomic64_set(&hwc->prev_count, now); | 4091 | atomic64_set(&hwc->prev_count, now); |
| 4092 | 4092 | ||
| 4093 | perf_swevent_start_hrtimer(event); | 4093 | perf_swevent_start_hrtimer(event); |
| 4094 | 4094 | ||
| 4095 | return 0; | 4095 | return 0; |
| 4096 | } | 4096 | } |
| 4097 | 4097 | ||
| 4098 | static void task_clock_perf_event_disable(struct perf_event *event) | 4098 | static void task_clock_perf_event_disable(struct perf_event *event) |
| 4099 | { | 4099 | { |
| 4100 | perf_swevent_cancel_hrtimer(event); | 4100 | perf_swevent_cancel_hrtimer(event); |
| 4101 | task_clock_perf_event_update(event, event->ctx->time); | 4101 | task_clock_perf_event_update(event, event->ctx->time); |
| 4102 | 4102 | ||
| 4103 | } | 4103 | } |
| 4104 | 4104 | ||
| 4105 | static void task_clock_perf_event_read(struct perf_event *event) | 4105 | static void task_clock_perf_event_read(struct perf_event *event) |
| 4106 | { | 4106 | { |
| 4107 | u64 time; | 4107 | u64 time; |
| 4108 | 4108 | ||
| 4109 | if (!in_nmi()) { | 4109 | if (!in_nmi()) { |
| 4110 | update_context_time(event->ctx); | 4110 | update_context_time(event->ctx); |
| 4111 | time = event->ctx->time; | 4111 | time = event->ctx->time; |
| 4112 | } else { | 4112 | } else { |
| 4113 | u64 now = perf_clock(); | 4113 | u64 now = perf_clock(); |
| 4114 | u64 delta = now - event->ctx->timestamp; | 4114 | u64 delta = now - event->ctx->timestamp; |
| 4115 | time = event->ctx->time + delta; | 4115 | time = event->ctx->time + delta; |
| 4116 | } | 4116 | } |
| 4117 | 4117 | ||
| 4118 | task_clock_perf_event_update(event, time); | 4118 | task_clock_perf_event_update(event, time); |
| 4119 | } | 4119 | } |
| 4120 | 4120 | ||
| 4121 | static const struct pmu perf_ops_task_clock = { | 4121 | static const struct pmu perf_ops_task_clock = { |
| 4122 | .enable = task_clock_perf_event_enable, | 4122 | .enable = task_clock_perf_event_enable, |
| 4123 | .disable = task_clock_perf_event_disable, | 4123 | .disable = task_clock_perf_event_disable, |
| 4124 | .read = task_clock_perf_event_read, | 4124 | .read = task_clock_perf_event_read, |
| 4125 | }; | 4125 | }; |
| 4126 | 4126 | ||
| 4127 | #ifdef CONFIG_EVENT_PROFILE | 4127 | #ifdef CONFIG_EVENT_PROFILE |
| 4128 | 4128 | ||
| 4129 | void perf_tp_event(int event_id, u64 addr, u64 count, void *record, | 4129 | void perf_tp_event(int event_id, u64 addr, u64 count, void *record, |
| 4130 | int entry_size) | 4130 | int entry_size) |
| 4131 | { | 4131 | { |
| 4132 | struct perf_raw_record raw = { | 4132 | struct perf_raw_record raw = { |
| 4133 | .size = entry_size, | 4133 | .size = entry_size, |
| 4134 | .data = record, | 4134 | .data = record, |
| 4135 | }; | 4135 | }; |
| 4136 | 4136 | ||
| 4137 | struct perf_sample_data data = { | 4137 | struct perf_sample_data data = { |
| 4138 | .addr = addr, | 4138 | .addr = addr, |
| 4139 | .raw = &raw, | 4139 | .raw = &raw, |
| 4140 | }; | 4140 | }; |
| 4141 | 4141 | ||
| 4142 | struct pt_regs *regs = get_irq_regs(); | 4142 | struct pt_regs *regs = get_irq_regs(); |
| 4143 | 4143 | ||
| 4144 | if (!regs) | 4144 | if (!regs) |
| 4145 | regs = task_pt_regs(current); | 4145 | regs = task_pt_regs(current); |
| 4146 | 4146 | ||
| 4147 | do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, | 4147 | do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, |
| 4148 | &data, regs); | 4148 | &data, regs); |
| 4149 | } | 4149 | } |
| 4150 | EXPORT_SYMBOL_GPL(perf_tp_event); | 4150 | EXPORT_SYMBOL_GPL(perf_tp_event); |
| 4151 | 4151 | ||
| 4152 | static int perf_tp_event_match(struct perf_event *event, | 4152 | static int perf_tp_event_match(struct perf_event *event, |
| 4153 | struct perf_sample_data *data) | 4153 | struct perf_sample_data *data) |
| 4154 | { | 4154 | { |
| 4155 | void *record = data->raw->data; | 4155 | void *record = data->raw->data; |
| 4156 | 4156 | ||
| 4157 | if (likely(!event->filter) || filter_match_preds(event->filter, record)) | 4157 | if (likely(!event->filter) || filter_match_preds(event->filter, record)) |
| 4158 | return 1; | 4158 | return 1; |
| 4159 | return 0; | 4159 | return 0; |
| 4160 | } | 4160 | } |
| 4161 | 4161 | ||
| 4162 | static void tp_perf_event_destroy(struct perf_event *event) | 4162 | static void tp_perf_event_destroy(struct perf_event *event) |
| 4163 | { | 4163 | { |
| 4164 | ftrace_profile_disable(event->attr.config); | 4164 | ftrace_profile_disable(event->attr.config); |
| 4165 | } | 4165 | } |
| 4166 | 4166 | ||
| 4167 | static const struct pmu *tp_perf_event_init(struct perf_event *event) | 4167 | static const struct pmu *tp_perf_event_init(struct perf_event *event) |
| 4168 | { | 4168 | { |
| 4169 | /* | 4169 | /* |
| 4170 | * Raw tracepoint data is a severe data leak, only allow root to | 4170 | * Raw tracepoint data is a severe data leak, only allow root to |
| 4171 | * have these. | 4171 | * have these. |
| 4172 | */ | 4172 | */ |
| 4173 | if ((event->attr.sample_type & PERF_SAMPLE_RAW) && | 4173 | if ((event->attr.sample_type & PERF_SAMPLE_RAW) && |
| 4174 | perf_paranoid_tracepoint_raw() && | 4174 | perf_paranoid_tracepoint_raw() && |
| 4175 | !capable(CAP_SYS_ADMIN)) | 4175 | !capable(CAP_SYS_ADMIN)) |
| 4176 | return ERR_PTR(-EPERM); | 4176 | return ERR_PTR(-EPERM); |
| 4177 | 4177 | ||
| 4178 | if (ftrace_profile_enable(event->attr.config)) | 4178 | if (ftrace_profile_enable(event->attr.config)) |
| 4179 | return NULL; | 4179 | return NULL; |
| 4180 | 4180 | ||
| 4181 | event->destroy = tp_perf_event_destroy; | 4181 | event->destroy = tp_perf_event_destroy; |
| 4182 | 4182 | ||
| 4183 | return &perf_ops_generic; | 4183 | return &perf_ops_generic; |
| 4184 | } | 4184 | } |
| 4185 | 4185 | ||
| 4186 | static int perf_event_set_filter(struct perf_event *event, void __user *arg) | 4186 | static int perf_event_set_filter(struct perf_event *event, void __user *arg) |
| 4187 | { | 4187 | { |
| 4188 | char *filter_str; | 4188 | char *filter_str; |
| 4189 | int ret; | 4189 | int ret; |
| 4190 | 4190 | ||
| 4191 | if (event->attr.type != PERF_TYPE_TRACEPOINT) | 4191 | if (event->attr.type != PERF_TYPE_TRACEPOINT) |
| 4192 | return -EINVAL; | 4192 | return -EINVAL; |
| 4193 | 4193 | ||
| 4194 | filter_str = strndup_user(arg, PAGE_SIZE); | 4194 | filter_str = strndup_user(arg, PAGE_SIZE); |
| 4195 | if (IS_ERR(filter_str)) | 4195 | if (IS_ERR(filter_str)) |
| 4196 | return PTR_ERR(filter_str); | 4196 | return PTR_ERR(filter_str); |
| 4197 | 4197 | ||
| 4198 | ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); | 4198 | ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); |
| 4199 | 4199 | ||
| 4200 | kfree(filter_str); | 4200 | kfree(filter_str); |
| 4201 | return ret; | 4201 | return ret; |
| 4202 | } | 4202 | } |
| 4203 | 4203 | ||
| 4204 | static void perf_event_free_filter(struct perf_event *event) | 4204 | static void perf_event_free_filter(struct perf_event *event) |
| 4205 | { | 4205 | { |
| 4206 | ftrace_profile_free_filter(event); | 4206 | ftrace_profile_free_filter(event); |
| 4207 | } | 4207 | } |
| 4208 | 4208 | ||
| 4209 | #else | 4209 | #else |
| 4210 | 4210 | ||
| 4211 | static int perf_tp_event_match(struct perf_event *event, | 4211 | static int perf_tp_event_match(struct perf_event *event, |
| 4212 | struct perf_sample_data *data) | 4212 | struct perf_sample_data *data) |
| 4213 | { | 4213 | { |
| 4214 | return 1; | 4214 | return 1; |
| 4215 | } | 4215 | } |
| 4216 | 4216 | ||
| 4217 | static const struct pmu *tp_perf_event_init(struct perf_event *event) | 4217 | static const struct pmu *tp_perf_event_init(struct perf_event *event) |
| 4218 | { | 4218 | { |
| 4219 | return NULL; | 4219 | return NULL; |
| 4220 | } | 4220 | } |
| 4221 | 4221 | ||
| 4222 | static int perf_event_set_filter(struct perf_event *event, void __user *arg) | 4222 | static int perf_event_set_filter(struct perf_event *event, void __user *arg) |
| 4223 | { | 4223 | { |
| 4224 | return -ENOENT; | 4224 | return -ENOENT; |
| 4225 | } | 4225 | } |
| 4226 | 4226 | ||
| 4227 | static void perf_event_free_filter(struct perf_event *event) | 4227 | static void perf_event_free_filter(struct perf_event *event) |
| 4228 | { | 4228 | { |
| 4229 | } | 4229 | } |
| 4230 | 4230 | ||
| 4231 | #endif /* CONFIG_EVENT_PROFILE */ | 4231 | #endif /* CONFIG_EVENT_PROFILE */ |
| 4232 | 4232 | ||
| 4233 | atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 4233 | atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
| 4234 | 4234 | ||
| 4235 | static void sw_perf_event_destroy(struct perf_event *event) | 4235 | static void sw_perf_event_destroy(struct perf_event *event) |
| 4236 | { | 4236 | { |
| 4237 | u64 event_id = event->attr.config; | 4237 | u64 event_id = event->attr.config; |
| 4238 | 4238 | ||
| 4239 | WARN_ON(event->parent); | 4239 | WARN_ON(event->parent); |
| 4240 | 4240 | ||
| 4241 | atomic_dec(&perf_swevent_enabled[event_id]); | 4241 | atomic_dec(&perf_swevent_enabled[event_id]); |
| 4242 | } | 4242 | } |
| 4243 | 4243 | ||
| 4244 | static const struct pmu *sw_perf_event_init(struct perf_event *event) | 4244 | static const struct pmu *sw_perf_event_init(struct perf_event *event) |
| 4245 | { | 4245 | { |
| 4246 | const struct pmu *pmu = NULL; | 4246 | const struct pmu *pmu = NULL; |
| 4247 | u64 event_id = event->attr.config; | 4247 | u64 event_id = event->attr.config; |
| 4248 | 4248 | ||
| 4249 | /* | 4249 | /* |
| 4250 | * Software events (currently) can't in general distinguish | 4250 | * Software events (currently) can't in general distinguish |
| 4251 | * between user, kernel and hypervisor events. | 4251 | * between user, kernel and hypervisor events. |
| 4252 | * However, context switches and cpu migrations are considered | 4252 | * However, context switches and cpu migrations are considered |
| 4253 | * to be kernel events, and page faults are never hypervisor | 4253 | * to be kernel events, and page faults are never hypervisor |
| 4254 | * events. | 4254 | * events. |
| 4255 | */ | 4255 | */ |
| 4256 | switch (event_id) { | 4256 | switch (event_id) { |
| 4257 | case PERF_COUNT_SW_CPU_CLOCK: | 4257 | case PERF_COUNT_SW_CPU_CLOCK: |
| 4258 | pmu = &perf_ops_cpu_clock; | 4258 | pmu = &perf_ops_cpu_clock; |
| 4259 | 4259 | ||
| 4260 | break; | 4260 | break; |
| 4261 | case PERF_COUNT_SW_TASK_CLOCK: | 4261 | case PERF_COUNT_SW_TASK_CLOCK: |
| 4262 | /* | 4262 | /* |
| 4263 | * If the user instantiates this as a per-cpu event, | 4263 | * If the user instantiates this as a per-cpu event, |
| 4264 | * use the cpu_clock event instead. | 4264 | * use the cpu_clock event instead. |
| 4265 | */ | 4265 | */ |
| 4266 | if (event->ctx->task) | 4266 | if (event->ctx->task) |
| 4267 | pmu = &perf_ops_task_clock; | 4267 | pmu = &perf_ops_task_clock; |
| 4268 | else | 4268 | else |
| 4269 | pmu = &perf_ops_cpu_clock; | 4269 | pmu = &perf_ops_cpu_clock; |
| 4270 | 4270 | ||
| 4271 | break; | 4271 | break; |
| 4272 | case PERF_COUNT_SW_PAGE_FAULTS: | 4272 | case PERF_COUNT_SW_PAGE_FAULTS: |
| 4273 | case PERF_COUNT_SW_PAGE_FAULTS_MIN: | 4273 | case PERF_COUNT_SW_PAGE_FAULTS_MIN: |
| 4274 | case PERF_COUNT_SW_PAGE_FAULTS_MAJ: | 4274 | case PERF_COUNT_SW_PAGE_FAULTS_MAJ: |
| 4275 | case PERF_COUNT_SW_CONTEXT_SWITCHES: | 4275 | case PERF_COUNT_SW_CONTEXT_SWITCHES: |
| 4276 | case PERF_COUNT_SW_CPU_MIGRATIONS: | 4276 | case PERF_COUNT_SW_CPU_MIGRATIONS: |
| 4277 | case PERF_COUNT_SW_ALIGNMENT_FAULTS: | ||
| 4278 | case PERF_COUNT_SW_EMULATION_FAULTS: | ||
| 4277 | if (!event->parent) { | 4279 | if (!event->parent) { |
| 4278 | atomic_inc(&perf_swevent_enabled[event_id]); | 4280 | atomic_inc(&perf_swevent_enabled[event_id]); |
| 4279 | event->destroy = sw_perf_event_destroy; | 4281 | event->destroy = sw_perf_event_destroy; |
| 4280 | } | 4282 | } |
| 4281 | pmu = &perf_ops_generic; | 4283 | pmu = &perf_ops_generic; |
| 4282 | break; | 4284 | break; |
| 4283 | } | 4285 | } |
| 4284 | 4286 | ||
| 4285 | return pmu; | 4287 | return pmu; |
| 4286 | } | 4288 | } |
| 4287 | 4289 | ||
| 4288 | /* | 4290 | /* |
| 4289 | * Allocate and initialize a event structure | 4291 | * Allocate and initialize a event structure |
| 4290 | */ | 4292 | */ |
| 4291 | static struct perf_event * | 4293 | static struct perf_event * |
| 4292 | perf_event_alloc(struct perf_event_attr *attr, | 4294 | perf_event_alloc(struct perf_event_attr *attr, |
| 4293 | int cpu, | 4295 | int cpu, |
| 4294 | struct perf_event_context *ctx, | 4296 | struct perf_event_context *ctx, |
| 4295 | struct perf_event *group_leader, | 4297 | struct perf_event *group_leader, |
| 4296 | struct perf_event *parent_event, | 4298 | struct perf_event *parent_event, |
| 4297 | gfp_t gfpflags) | 4299 | gfp_t gfpflags) |
| 4298 | { | 4300 | { |
| 4299 | const struct pmu *pmu; | 4301 | const struct pmu *pmu; |
| 4300 | struct perf_event *event; | 4302 | struct perf_event *event; |
| 4301 | struct hw_perf_event *hwc; | 4303 | struct hw_perf_event *hwc; |
| 4302 | long err; | 4304 | long err; |
| 4303 | 4305 | ||
| 4304 | event = kzalloc(sizeof(*event), gfpflags); | 4306 | event = kzalloc(sizeof(*event), gfpflags); |
| 4305 | if (!event) | 4307 | if (!event) |
| 4306 | return ERR_PTR(-ENOMEM); | 4308 | return ERR_PTR(-ENOMEM); |
| 4307 | 4309 | ||
| 4308 | /* | 4310 | /* |
| 4309 | * Single events are their own group leaders, with an | 4311 | * Single events are their own group leaders, with an |
| 4310 | * empty sibling list: | 4312 | * empty sibling list: |
| 4311 | */ | 4313 | */ |
| 4312 | if (!group_leader) | 4314 | if (!group_leader) |
| 4313 | group_leader = event; | 4315 | group_leader = event; |
| 4314 | 4316 | ||
| 4315 | mutex_init(&event->child_mutex); | 4317 | mutex_init(&event->child_mutex); |
| 4316 | INIT_LIST_HEAD(&event->child_list); | 4318 | INIT_LIST_HEAD(&event->child_list); |
| 4317 | 4319 | ||
| 4318 | INIT_LIST_HEAD(&event->group_entry); | 4320 | INIT_LIST_HEAD(&event->group_entry); |
| 4319 | INIT_LIST_HEAD(&event->event_entry); | 4321 | INIT_LIST_HEAD(&event->event_entry); |
| 4320 | INIT_LIST_HEAD(&event->sibling_list); | 4322 | INIT_LIST_HEAD(&event->sibling_list); |
| 4321 | init_waitqueue_head(&event->waitq); | 4323 | init_waitqueue_head(&event->waitq); |
| 4322 | 4324 | ||
| 4323 | mutex_init(&event->mmap_mutex); | 4325 | mutex_init(&event->mmap_mutex); |
| 4324 | 4326 | ||
| 4325 | event->cpu = cpu; | 4327 | event->cpu = cpu; |
| 4326 | event->attr = *attr; | 4328 | event->attr = *attr; |
| 4327 | event->group_leader = group_leader; | 4329 | event->group_leader = group_leader; |
| 4328 | event->pmu = NULL; | 4330 | event->pmu = NULL; |
| 4329 | event->ctx = ctx; | 4331 | event->ctx = ctx; |
| 4330 | event->oncpu = -1; | 4332 | event->oncpu = -1; |
| 4331 | 4333 | ||
| 4332 | event->parent = parent_event; | 4334 | event->parent = parent_event; |
| 4333 | 4335 | ||
| 4334 | event->ns = get_pid_ns(current->nsproxy->pid_ns); | 4336 | event->ns = get_pid_ns(current->nsproxy->pid_ns); |
| 4335 | event->id = atomic64_inc_return(&perf_event_id); | 4337 | event->id = atomic64_inc_return(&perf_event_id); |
| 4336 | 4338 | ||
| 4337 | event->state = PERF_EVENT_STATE_INACTIVE; | 4339 | event->state = PERF_EVENT_STATE_INACTIVE; |
| 4338 | 4340 | ||
| 4339 | if (attr->disabled) | 4341 | if (attr->disabled) |
| 4340 | event->state = PERF_EVENT_STATE_OFF; | 4342 | event->state = PERF_EVENT_STATE_OFF; |
| 4341 | 4343 | ||
| 4342 | pmu = NULL; | 4344 | pmu = NULL; |
| 4343 | 4345 | ||
| 4344 | hwc = &event->hw; | 4346 | hwc = &event->hw; |
| 4345 | hwc->sample_period = attr->sample_period; | 4347 | hwc->sample_period = attr->sample_period; |
| 4346 | if (attr->freq && attr->sample_freq) | 4348 | if (attr->freq && attr->sample_freq) |
| 4347 | hwc->sample_period = 1; | 4349 | hwc->sample_period = 1; |
| 4348 | hwc->last_period = hwc->sample_period; | 4350 | hwc->last_period = hwc->sample_period; |
| 4349 | 4351 | ||
| 4350 | atomic64_set(&hwc->period_left, hwc->sample_period); | 4352 | atomic64_set(&hwc->period_left, hwc->sample_period); |
| 4351 | 4353 | ||
| 4352 | /* | 4354 | /* |
| 4353 | * we currently do not support PERF_FORMAT_GROUP on inherited events | 4355 | * we currently do not support PERF_FORMAT_GROUP on inherited events |
| 4354 | */ | 4356 | */ |
| 4355 | if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) | 4357 | if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) |
| 4356 | goto done; | 4358 | goto done; |
| 4357 | 4359 | ||
| 4358 | switch (attr->type) { | 4360 | switch (attr->type) { |
| 4359 | case PERF_TYPE_RAW: | 4361 | case PERF_TYPE_RAW: |
| 4360 | case PERF_TYPE_HARDWARE: | 4362 | case PERF_TYPE_HARDWARE: |
| 4361 | case PERF_TYPE_HW_CACHE: | 4363 | case PERF_TYPE_HW_CACHE: |
| 4362 | pmu = hw_perf_event_init(event); | 4364 | pmu = hw_perf_event_init(event); |
| 4363 | break; | 4365 | break; |
| 4364 | 4366 | ||
| 4365 | case PERF_TYPE_SOFTWARE: | 4367 | case PERF_TYPE_SOFTWARE: |
| 4366 | pmu = sw_perf_event_init(event); | 4368 | pmu = sw_perf_event_init(event); |
| 4367 | break; | 4369 | break; |
| 4368 | 4370 | ||
| 4369 | case PERF_TYPE_TRACEPOINT: | 4371 | case PERF_TYPE_TRACEPOINT: |
| 4370 | pmu = tp_perf_event_init(event); | 4372 | pmu = tp_perf_event_init(event); |
| 4371 | break; | 4373 | break; |
| 4372 | 4374 | ||
| 4373 | default: | 4375 | default: |
| 4374 | break; | 4376 | break; |
| 4375 | } | 4377 | } |
| 4376 | done: | 4378 | done: |
| 4377 | err = 0; | 4379 | err = 0; |
| 4378 | if (!pmu) | 4380 | if (!pmu) |
| 4379 | err = -EINVAL; | 4381 | err = -EINVAL; |
| 4380 | else if (IS_ERR(pmu)) | 4382 | else if (IS_ERR(pmu)) |
| 4381 | err = PTR_ERR(pmu); | 4383 | err = PTR_ERR(pmu); |
| 4382 | 4384 | ||
| 4383 | if (err) { | 4385 | if (err) { |
| 4384 | if (event->ns) | 4386 | if (event->ns) |
| 4385 | put_pid_ns(event->ns); | 4387 | put_pid_ns(event->ns); |
| 4386 | kfree(event); | 4388 | kfree(event); |
| 4387 | return ERR_PTR(err); | 4389 | return ERR_PTR(err); |
| 4388 | } | 4390 | } |
| 4389 | 4391 | ||
| 4390 | event->pmu = pmu; | 4392 | event->pmu = pmu; |
| 4391 | 4393 | ||
| 4392 | if (!event->parent) { | 4394 | if (!event->parent) { |
| 4393 | atomic_inc(&nr_events); | 4395 | atomic_inc(&nr_events); |
| 4394 | if (event->attr.mmap) | 4396 | if (event->attr.mmap) |
| 4395 | atomic_inc(&nr_mmap_events); | 4397 | atomic_inc(&nr_mmap_events); |
| 4396 | if (event->attr.comm) | 4398 | if (event->attr.comm) |
| 4397 | atomic_inc(&nr_comm_events); | 4399 | atomic_inc(&nr_comm_events); |
| 4398 | if (event->attr.task) | 4400 | if (event->attr.task) |
| 4399 | atomic_inc(&nr_task_events); | 4401 | atomic_inc(&nr_task_events); |
| 4400 | } | 4402 | } |
| 4401 | 4403 | ||
| 4402 | return event; | 4404 | return event; |
| 4403 | } | 4405 | } |
| 4404 | 4406 | ||
| 4405 | static int perf_copy_attr(struct perf_event_attr __user *uattr, | 4407 | static int perf_copy_attr(struct perf_event_attr __user *uattr, |
| 4406 | struct perf_event_attr *attr) | 4408 | struct perf_event_attr *attr) |
| 4407 | { | 4409 | { |
| 4408 | u32 size; | 4410 | u32 size; |
| 4409 | int ret; | 4411 | int ret; |
| 4410 | 4412 | ||
| 4411 | if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) | 4413 | if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) |
| 4412 | return -EFAULT; | 4414 | return -EFAULT; |
| 4413 | 4415 | ||
| 4414 | /* | 4416 | /* |
| 4415 | * zero the full structure, so that a short copy will be nice. | 4417 | * zero the full structure, so that a short copy will be nice. |
| 4416 | */ | 4418 | */ |
| 4417 | memset(attr, 0, sizeof(*attr)); | 4419 | memset(attr, 0, sizeof(*attr)); |
| 4418 | 4420 | ||
| 4419 | ret = get_user(size, &uattr->size); | 4421 | ret = get_user(size, &uattr->size); |
| 4420 | if (ret) | 4422 | if (ret) |
| 4421 | return ret; | 4423 | return ret; |
| 4422 | 4424 | ||
| 4423 | if (size > PAGE_SIZE) /* silly large */ | 4425 | if (size > PAGE_SIZE) /* silly large */ |
| 4424 | goto err_size; | 4426 | goto err_size; |
| 4425 | 4427 | ||
| 4426 | if (!size) /* abi compat */ | 4428 | if (!size) /* abi compat */ |
| 4427 | size = PERF_ATTR_SIZE_VER0; | 4429 | size = PERF_ATTR_SIZE_VER0; |
| 4428 | 4430 | ||
| 4429 | if (size < PERF_ATTR_SIZE_VER0) | 4431 | if (size < PERF_ATTR_SIZE_VER0) |
| 4430 | goto err_size; | 4432 | goto err_size; |
| 4431 | 4433 | ||
| 4432 | /* | 4434 | /* |
| 4433 | * If we're handed a bigger struct than we know of, | 4435 | * If we're handed a bigger struct than we know of, |
| 4434 | * ensure all the unknown bits are 0 - i.e. new | 4436 | * ensure all the unknown bits are 0 - i.e. new |
| 4435 | * user-space does not rely on any kernel feature | 4437 | * user-space does not rely on any kernel feature |
| 4436 | * extensions we dont know about yet. | 4438 | * extensions we dont know about yet. |
| 4437 | */ | 4439 | */ |
| 4438 | if (size > sizeof(*attr)) { | 4440 | if (size > sizeof(*attr)) { |
| 4439 | unsigned char __user *addr; | 4441 | unsigned char __user *addr; |
| 4440 | unsigned char __user *end; | 4442 | unsigned char __user *end; |
| 4441 | unsigned char val; | 4443 | unsigned char val; |
| 4442 | 4444 | ||
| 4443 | addr = (void __user *)uattr + sizeof(*attr); | 4445 | addr = (void __user *)uattr + sizeof(*attr); |
| 4444 | end = (void __user *)uattr + size; | 4446 | end = (void __user *)uattr + size; |
| 4445 | 4447 | ||
| 4446 | for (; addr < end; addr++) { | 4448 | for (; addr < end; addr++) { |
| 4447 | ret = get_user(val, addr); | 4449 | ret = get_user(val, addr); |
| 4448 | if (ret) | 4450 | if (ret) |
| 4449 | return ret; | 4451 | return ret; |
| 4450 | if (val) | 4452 | if (val) |
| 4451 | goto err_size; | 4453 | goto err_size; |
| 4452 | } | 4454 | } |
| 4453 | size = sizeof(*attr); | 4455 | size = sizeof(*attr); |
| 4454 | } | 4456 | } |
| 4455 | 4457 | ||
| 4456 | ret = copy_from_user(attr, uattr, size); | 4458 | ret = copy_from_user(attr, uattr, size); |
| 4457 | if (ret) | 4459 | if (ret) |
| 4458 | return -EFAULT; | 4460 | return -EFAULT; |
| 4459 | 4461 | ||
| 4460 | /* | 4462 | /* |
| 4461 | * If the type exists, the corresponding creation will verify | 4463 | * If the type exists, the corresponding creation will verify |
| 4462 | * the attr->config. | 4464 | * the attr->config. |
| 4463 | */ | 4465 | */ |
| 4464 | if (attr->type >= PERF_TYPE_MAX) | 4466 | if (attr->type >= PERF_TYPE_MAX) |
| 4465 | return -EINVAL; | 4467 | return -EINVAL; |
| 4466 | 4468 | ||
| 4467 | if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) | 4469 | if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) |
| 4468 | return -EINVAL; | 4470 | return -EINVAL; |
| 4469 | 4471 | ||
| 4470 | if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) | 4472 | if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) |
| 4471 | return -EINVAL; | 4473 | return -EINVAL; |
| 4472 | 4474 | ||
| 4473 | if (attr->read_format & ~(PERF_FORMAT_MAX-1)) | 4475 | if (attr->read_format & ~(PERF_FORMAT_MAX-1)) |
| 4474 | return -EINVAL; | 4476 | return -EINVAL; |
| 4475 | 4477 | ||
| 4476 | out: | 4478 | out: |
| 4477 | return ret; | 4479 | return ret; |
| 4478 | 4480 | ||
| 4479 | err_size: | 4481 | err_size: |
| 4480 | put_user(sizeof(*attr), &uattr->size); | 4482 | put_user(sizeof(*attr), &uattr->size); |
| 4481 | ret = -E2BIG; | 4483 | ret = -E2BIG; |
| 4482 | goto out; | 4484 | goto out; |
| 4483 | } | 4485 | } |
| 4484 | 4486 | ||
| 4485 | static int perf_event_set_output(struct perf_event *event, int output_fd) | 4487 | static int perf_event_set_output(struct perf_event *event, int output_fd) |
| 4486 | { | 4488 | { |
| 4487 | struct perf_event *output_event = NULL; | 4489 | struct perf_event *output_event = NULL; |
| 4488 | struct file *output_file = NULL; | 4490 | struct file *output_file = NULL; |
| 4489 | struct perf_event *old_output; | 4491 | struct perf_event *old_output; |
| 4490 | int fput_needed = 0; | 4492 | int fput_needed = 0; |
| 4491 | int ret = -EINVAL; | 4493 | int ret = -EINVAL; |
| 4492 | 4494 | ||
| 4493 | if (!output_fd) | 4495 | if (!output_fd) |
| 4494 | goto set; | 4496 | goto set; |
| 4495 | 4497 | ||
| 4496 | output_file = fget_light(output_fd, &fput_needed); | 4498 | output_file = fget_light(output_fd, &fput_needed); |
| 4497 | if (!output_file) | 4499 | if (!output_file) |
| 4498 | return -EBADF; | 4500 | return -EBADF; |
| 4499 | 4501 | ||
| 4500 | if (output_file->f_op != &perf_fops) | 4502 | if (output_file->f_op != &perf_fops) |
| 4501 | goto out; | 4503 | goto out; |
| 4502 | 4504 | ||
| 4503 | output_event = output_file->private_data; | 4505 | output_event = output_file->private_data; |
| 4504 | 4506 | ||
| 4505 | /* Don't chain output fds */ | 4507 | /* Don't chain output fds */ |
| 4506 | if (output_event->output) | 4508 | if (output_event->output) |
| 4507 | goto out; | 4509 | goto out; |
| 4508 | 4510 | ||
| 4509 | /* Don't set an output fd when we already have an output channel */ | 4511 | /* Don't set an output fd when we already have an output channel */ |
| 4510 | if (event->data) | 4512 | if (event->data) |
| 4511 | goto out; | 4513 | goto out; |
| 4512 | 4514 | ||
| 4513 | atomic_long_inc(&output_file->f_count); | 4515 | atomic_long_inc(&output_file->f_count); |
| 4514 | 4516 | ||
| 4515 | set: | 4517 | set: |
| 4516 | mutex_lock(&event->mmap_mutex); | 4518 | mutex_lock(&event->mmap_mutex); |
| 4517 | old_output = event->output; | 4519 | old_output = event->output; |
| 4518 | rcu_assign_pointer(event->output, output_event); | 4520 | rcu_assign_pointer(event->output, output_event); |
| 4519 | mutex_unlock(&event->mmap_mutex); | 4521 | mutex_unlock(&event->mmap_mutex); |
| 4520 | 4522 | ||
| 4521 | if (old_output) { | 4523 | if (old_output) { |
| 4522 | /* | 4524 | /* |
| 4523 | * we need to make sure no existing perf_output_*() | 4525 | * we need to make sure no existing perf_output_*() |
| 4524 | * is still referencing this event. | 4526 | * is still referencing this event. |
| 4525 | */ | 4527 | */ |
| 4526 | synchronize_rcu(); | 4528 | synchronize_rcu(); |
| 4527 | fput(old_output->filp); | 4529 | fput(old_output->filp); |
| 4528 | } | 4530 | } |
| 4529 | 4531 | ||
| 4530 | ret = 0; | 4532 | ret = 0; |
| 4531 | out: | 4533 | out: |
| 4532 | fput_light(output_file, fput_needed); | 4534 | fput_light(output_file, fput_needed); |
| 4533 | return ret; | 4535 | return ret; |
| 4534 | } | 4536 | } |
| 4535 | 4537 | ||
| 4536 | /** | 4538 | /** |
| 4537 | * sys_perf_event_open - open a performance event, associate it to a task/cpu | 4539 | * sys_perf_event_open - open a performance event, associate it to a task/cpu |
| 4538 | * | 4540 | * |
| 4539 | * @attr_uptr: event_id type attributes for monitoring/sampling | 4541 | * @attr_uptr: event_id type attributes for monitoring/sampling |
| 4540 | * @pid: target pid | 4542 | * @pid: target pid |
| 4541 | * @cpu: target cpu | 4543 | * @cpu: target cpu |
| 4542 | * @group_fd: group leader event fd | 4544 | * @group_fd: group leader event fd |
| 4543 | */ | 4545 | */ |
| 4544 | SYSCALL_DEFINE5(perf_event_open, | 4546 | SYSCALL_DEFINE5(perf_event_open, |
| 4545 | struct perf_event_attr __user *, attr_uptr, | 4547 | struct perf_event_attr __user *, attr_uptr, |
| 4546 | pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) | 4548 | pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) |
| 4547 | { | 4549 | { |
| 4548 | struct perf_event *event, *group_leader; | 4550 | struct perf_event *event, *group_leader; |
| 4549 | struct perf_event_attr attr; | 4551 | struct perf_event_attr attr; |
| 4550 | struct perf_event_context *ctx; | 4552 | struct perf_event_context *ctx; |
| 4551 | struct file *event_file = NULL; | 4553 | struct file *event_file = NULL; |
| 4552 | struct file *group_file = NULL; | 4554 | struct file *group_file = NULL; |
| 4553 | int fput_needed = 0; | 4555 | int fput_needed = 0; |
| 4554 | int fput_needed2 = 0; | 4556 | int fput_needed2 = 0; |
| 4555 | int err; | 4557 | int err; |
| 4556 | 4558 | ||
| 4557 | /* for future expandability... */ | 4559 | /* for future expandability... */ |
| 4558 | if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT)) | 4560 | if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT)) |
| 4559 | return -EINVAL; | 4561 | return -EINVAL; |
| 4560 | 4562 | ||
| 4561 | err = perf_copy_attr(attr_uptr, &attr); | 4563 | err = perf_copy_attr(attr_uptr, &attr); |
| 4562 | if (err) | 4564 | if (err) |
| 4563 | return err; | 4565 | return err; |
| 4564 | 4566 | ||
| 4565 | if (!attr.exclude_kernel) { | 4567 | if (!attr.exclude_kernel) { |
| 4566 | if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) | 4568 | if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) |
| 4567 | return -EACCES; | 4569 | return -EACCES; |
| 4568 | } | 4570 | } |
| 4569 | 4571 | ||
| 4570 | if (attr.freq) { | 4572 | if (attr.freq) { |
| 4571 | if (attr.sample_freq > sysctl_perf_event_sample_rate) | 4573 | if (attr.sample_freq > sysctl_perf_event_sample_rate) |
| 4572 | return -EINVAL; | 4574 | return -EINVAL; |
| 4573 | } | 4575 | } |
| 4574 | 4576 | ||
| 4575 | /* | 4577 | /* |
| 4576 | * Get the target context (task or percpu): | 4578 | * Get the target context (task or percpu): |
| 4577 | */ | 4579 | */ |
| 4578 | ctx = find_get_context(pid, cpu); | 4580 | ctx = find_get_context(pid, cpu); |
| 4579 | if (IS_ERR(ctx)) | 4581 | if (IS_ERR(ctx)) |
| 4580 | return PTR_ERR(ctx); | 4582 | return PTR_ERR(ctx); |
| 4581 | 4583 | ||
| 4582 | /* | 4584 | /* |
| 4583 | * Look up the group leader (we will attach this event to it): | 4585 | * Look up the group leader (we will attach this event to it): |
| 4584 | */ | 4586 | */ |
| 4585 | group_leader = NULL; | 4587 | group_leader = NULL; |
| 4586 | if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) { | 4588 | if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) { |
| 4587 | err = -EINVAL; | 4589 | err = -EINVAL; |
| 4588 | group_file = fget_light(group_fd, &fput_needed); | 4590 | group_file = fget_light(group_fd, &fput_needed); |
| 4589 | if (!group_file) | 4591 | if (!group_file) |
| 4590 | goto err_put_context; | 4592 | goto err_put_context; |
| 4591 | if (group_file->f_op != &perf_fops) | 4593 | if (group_file->f_op != &perf_fops) |
| 4592 | goto err_put_context; | 4594 | goto err_put_context; |
| 4593 | 4595 | ||
| 4594 | group_leader = group_file->private_data; | 4596 | group_leader = group_file->private_data; |
| 4595 | /* | 4597 | /* |
| 4596 | * Do not allow a recursive hierarchy (this new sibling | 4598 | * Do not allow a recursive hierarchy (this new sibling |
| 4597 | * becoming part of another group-sibling): | 4599 | * becoming part of another group-sibling): |
| 4598 | */ | 4600 | */ |
| 4599 | if (group_leader->group_leader != group_leader) | 4601 | if (group_leader->group_leader != group_leader) |
| 4600 | goto err_put_context; | 4602 | goto err_put_context; |
| 4601 | /* | 4603 | /* |
| 4602 | * Do not allow to attach to a group in a different | 4604 | * Do not allow to attach to a group in a different |
| 4603 | * task or CPU context: | 4605 | * task or CPU context: |
| 4604 | */ | 4606 | */ |
| 4605 | if (group_leader->ctx != ctx) | 4607 | if (group_leader->ctx != ctx) |
| 4606 | goto err_put_context; | 4608 | goto err_put_context; |
| 4607 | /* | 4609 | /* |
| 4608 | * Only a group leader can be exclusive or pinned | 4610 | * Only a group leader can be exclusive or pinned |
| 4609 | */ | 4611 | */ |
| 4610 | if (attr.exclusive || attr.pinned) | 4612 | if (attr.exclusive || attr.pinned) |
| 4611 | goto err_put_context; | 4613 | goto err_put_context; |
| 4612 | } | 4614 | } |
| 4613 | 4615 | ||
| 4614 | event = perf_event_alloc(&attr, cpu, ctx, group_leader, | 4616 | event = perf_event_alloc(&attr, cpu, ctx, group_leader, |
| 4615 | NULL, GFP_KERNEL); | 4617 | NULL, GFP_KERNEL); |
| 4616 | err = PTR_ERR(event); | 4618 | err = PTR_ERR(event); |
| 4617 | if (IS_ERR(event)) | 4619 | if (IS_ERR(event)) |
| 4618 | goto err_put_context; | 4620 | goto err_put_context; |
| 4619 | 4621 | ||
| 4620 | err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0); | 4622 | err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0); |
| 4621 | if (err < 0) | 4623 | if (err < 0) |
| 4622 | goto err_free_put_context; | 4624 | goto err_free_put_context; |
| 4623 | 4625 | ||
| 4624 | event_file = fget_light(err, &fput_needed2); | 4626 | event_file = fget_light(err, &fput_needed2); |
| 4625 | if (!event_file) | 4627 | if (!event_file) |
| 4626 | goto err_free_put_context; | 4628 | goto err_free_put_context; |
| 4627 | 4629 | ||
| 4628 | if (flags & PERF_FLAG_FD_OUTPUT) { | 4630 | if (flags & PERF_FLAG_FD_OUTPUT) { |
| 4629 | err = perf_event_set_output(event, group_fd); | 4631 | err = perf_event_set_output(event, group_fd); |
| 4630 | if (err) | 4632 | if (err) |
| 4631 | goto err_fput_free_put_context; | 4633 | goto err_fput_free_put_context; |
| 4632 | } | 4634 | } |
| 4633 | 4635 | ||
| 4634 | event->filp = event_file; | 4636 | event->filp = event_file; |
| 4635 | WARN_ON_ONCE(ctx->parent_ctx); | 4637 | WARN_ON_ONCE(ctx->parent_ctx); |
| 4636 | mutex_lock(&ctx->mutex); | 4638 | mutex_lock(&ctx->mutex); |
| 4637 | perf_install_in_context(ctx, event, cpu); | 4639 | perf_install_in_context(ctx, event, cpu); |
| 4638 | ++ctx->generation; | 4640 | ++ctx->generation; |
| 4639 | mutex_unlock(&ctx->mutex); | 4641 | mutex_unlock(&ctx->mutex); |
| 4640 | 4642 | ||
| 4641 | event->owner = current; | 4643 | event->owner = current; |
| 4642 | get_task_struct(current); | 4644 | get_task_struct(current); |
| 4643 | mutex_lock(¤t->perf_event_mutex); | 4645 | mutex_lock(¤t->perf_event_mutex); |
| 4644 | list_add_tail(&event->owner_entry, ¤t->perf_event_list); | 4646 | list_add_tail(&event->owner_entry, ¤t->perf_event_list); |
| 4645 | mutex_unlock(¤t->perf_event_mutex); | 4647 | mutex_unlock(¤t->perf_event_mutex); |
| 4646 | 4648 | ||
| 4647 | err_fput_free_put_context: | 4649 | err_fput_free_put_context: |
| 4648 | fput_light(event_file, fput_needed2); | 4650 | fput_light(event_file, fput_needed2); |
| 4649 | 4651 | ||
| 4650 | err_free_put_context: | 4652 | err_free_put_context: |
| 4651 | if (err < 0) | 4653 | if (err < 0) |
| 4652 | kfree(event); | 4654 | kfree(event); |
| 4653 | 4655 | ||
| 4654 | err_put_context: | 4656 | err_put_context: |
| 4655 | if (err < 0) | 4657 | if (err < 0) |
| 4656 | put_ctx(ctx); | 4658 | put_ctx(ctx); |
| 4657 | 4659 | ||
| 4658 | fput_light(group_file, fput_needed); | 4660 | fput_light(group_file, fput_needed); |
| 4659 | 4661 | ||
| 4660 | return err; | 4662 | return err; |
| 4661 | } | 4663 | } |
| 4662 | 4664 | ||
| 4663 | /* | 4665 | /* |
| 4664 | * inherit a event from parent task to child task: | 4666 | * inherit a event from parent task to child task: |
| 4665 | */ | 4667 | */ |
| 4666 | static struct perf_event * | 4668 | static struct perf_event * |
| 4667 | inherit_event(struct perf_event *parent_event, | 4669 | inherit_event(struct perf_event *parent_event, |
| 4668 | struct task_struct *parent, | 4670 | struct task_struct *parent, |
| 4669 | struct perf_event_context *parent_ctx, | 4671 | struct perf_event_context *parent_ctx, |
| 4670 | struct task_struct *child, | 4672 | struct task_struct *child, |
| 4671 | struct perf_event *group_leader, | 4673 | struct perf_event *group_leader, |
| 4672 | struct perf_event_context *child_ctx) | 4674 | struct perf_event_context *child_ctx) |
| 4673 | { | 4675 | { |
| 4674 | struct perf_event *child_event; | 4676 | struct perf_event *child_event; |
| 4675 | 4677 | ||
| 4676 | /* | 4678 | /* |
| 4677 | * Instead of creating recursive hierarchies of events, | 4679 | * Instead of creating recursive hierarchies of events, |
| 4678 | * we link inherited events back to the original parent, | 4680 | * we link inherited events back to the original parent, |
| 4679 | * which has a filp for sure, which we use as the reference | 4681 | * which has a filp for sure, which we use as the reference |
| 4680 | * count: | 4682 | * count: |
| 4681 | */ | 4683 | */ |
| 4682 | if (parent_event->parent) | 4684 | if (parent_event->parent) |
| 4683 | parent_event = parent_event->parent; | 4685 | parent_event = parent_event->parent; |
| 4684 | 4686 | ||
| 4685 | child_event = perf_event_alloc(&parent_event->attr, | 4687 | child_event = perf_event_alloc(&parent_event->attr, |
| 4686 | parent_event->cpu, child_ctx, | 4688 | parent_event->cpu, child_ctx, |
| 4687 | group_leader, parent_event, | 4689 | group_leader, parent_event, |
| 4688 | GFP_KERNEL); | 4690 | GFP_KERNEL); |
| 4689 | if (IS_ERR(child_event)) | 4691 | if (IS_ERR(child_event)) |
| 4690 | return child_event; | 4692 | return child_event; |
| 4691 | get_ctx(child_ctx); | 4693 | get_ctx(child_ctx); |
| 4692 | 4694 | ||
| 4693 | /* | 4695 | /* |
| 4694 | * Make the child state follow the state of the parent event, | 4696 | * Make the child state follow the state of the parent event, |
| 4695 | * not its attr.disabled bit. We hold the parent's mutex, | 4697 | * not its attr.disabled bit. We hold the parent's mutex, |
| 4696 | * so we won't race with perf_event_{en, dis}able_family. | 4698 | * so we won't race with perf_event_{en, dis}able_family. |
| 4697 | */ | 4699 | */ |
| 4698 | if (parent_event->state >= PERF_EVENT_STATE_INACTIVE) | 4700 | if (parent_event->state >= PERF_EVENT_STATE_INACTIVE) |
| 4699 | child_event->state = PERF_EVENT_STATE_INACTIVE; | 4701 | child_event->state = PERF_EVENT_STATE_INACTIVE; |
| 4700 | else | 4702 | else |
| 4701 | child_event->state = PERF_EVENT_STATE_OFF; | 4703 | child_event->state = PERF_EVENT_STATE_OFF; |
| 4702 | 4704 | ||
| 4703 | if (parent_event->attr.freq) | 4705 | if (parent_event->attr.freq) |
| 4704 | child_event->hw.sample_period = parent_event->hw.sample_period; | 4706 | child_event->hw.sample_period = parent_event->hw.sample_period; |
| 4705 | 4707 | ||
| 4706 | /* | 4708 | /* |
| 4707 | * Link it up in the child's context: | 4709 | * Link it up in the child's context: |
| 4708 | */ | 4710 | */ |
| 4709 | add_event_to_ctx(child_event, child_ctx); | 4711 | add_event_to_ctx(child_event, child_ctx); |
| 4710 | 4712 | ||
| 4711 | /* | 4713 | /* |
| 4712 | * Get a reference to the parent filp - we will fput it | 4714 | * Get a reference to the parent filp - we will fput it |
| 4713 | * when the child event exits. This is safe to do because | 4715 | * when the child event exits. This is safe to do because |
| 4714 | * we are in the parent and we know that the filp still | 4716 | * we are in the parent and we know that the filp still |
| 4715 | * exists and has a nonzero count: | 4717 | * exists and has a nonzero count: |
| 4716 | */ | 4718 | */ |
| 4717 | atomic_long_inc(&parent_event->filp->f_count); | 4719 | atomic_long_inc(&parent_event->filp->f_count); |
| 4718 | 4720 | ||
| 4719 | /* | 4721 | /* |
| 4720 | * Link this into the parent event's child list | 4722 | * Link this into the parent event's child list |
| 4721 | */ | 4723 | */ |
| 4722 | WARN_ON_ONCE(parent_event->ctx->parent_ctx); | 4724 | WARN_ON_ONCE(parent_event->ctx->parent_ctx); |
| 4723 | mutex_lock(&parent_event->child_mutex); | 4725 | mutex_lock(&parent_event->child_mutex); |
| 4724 | list_add_tail(&child_event->child_list, &parent_event->child_list); | 4726 | list_add_tail(&child_event->child_list, &parent_event->child_list); |
| 4725 | mutex_unlock(&parent_event->child_mutex); | 4727 | mutex_unlock(&parent_event->child_mutex); |
| 4726 | 4728 | ||
| 4727 | return child_event; | 4729 | return child_event; |
| 4728 | } | 4730 | } |
| 4729 | 4731 | ||
| 4730 | static int inherit_group(struct perf_event *parent_event, | 4732 | static int inherit_group(struct perf_event *parent_event, |
| 4731 | struct task_struct *parent, | 4733 | struct task_struct *parent, |
| 4732 | struct perf_event_context *parent_ctx, | 4734 | struct perf_event_context *parent_ctx, |
| 4733 | struct task_struct *child, | 4735 | struct task_struct *child, |
| 4734 | struct perf_event_context *child_ctx) | 4736 | struct perf_event_context *child_ctx) |
| 4735 | { | 4737 | { |
| 4736 | struct perf_event *leader; | 4738 | struct perf_event *leader; |
| 4737 | struct perf_event *sub; | 4739 | struct perf_event *sub; |
| 4738 | struct perf_event *child_ctr; | 4740 | struct perf_event *child_ctr; |
| 4739 | 4741 | ||
| 4740 | leader = inherit_event(parent_event, parent, parent_ctx, | 4742 | leader = inherit_event(parent_event, parent, parent_ctx, |
| 4741 | child, NULL, child_ctx); | 4743 | child, NULL, child_ctx); |
| 4742 | if (IS_ERR(leader)) | 4744 | if (IS_ERR(leader)) |
| 4743 | return PTR_ERR(leader); | 4745 | return PTR_ERR(leader); |
| 4744 | list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { | 4746 | list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { |
| 4745 | child_ctr = inherit_event(sub, parent, parent_ctx, | 4747 | child_ctr = inherit_event(sub, parent, parent_ctx, |
| 4746 | child, leader, child_ctx); | 4748 | child, leader, child_ctx); |
| 4747 | if (IS_ERR(child_ctr)) | 4749 | if (IS_ERR(child_ctr)) |
| 4748 | return PTR_ERR(child_ctr); | 4750 | return PTR_ERR(child_ctr); |
| 4749 | } | 4751 | } |
| 4750 | return 0; | 4752 | return 0; |
| 4751 | } | 4753 | } |
| 4752 | 4754 | ||
| 4753 | static void sync_child_event(struct perf_event *child_event, | 4755 | static void sync_child_event(struct perf_event *child_event, |
| 4754 | struct task_struct *child) | 4756 | struct task_struct *child) |
| 4755 | { | 4757 | { |
| 4756 | struct perf_event *parent_event = child_event->parent; | 4758 | struct perf_event *parent_event = child_event->parent; |
| 4757 | u64 child_val; | 4759 | u64 child_val; |
| 4758 | 4760 | ||
| 4759 | if (child_event->attr.inherit_stat) | 4761 | if (child_event->attr.inherit_stat) |
| 4760 | perf_event_read_event(child_event, child); | 4762 | perf_event_read_event(child_event, child); |
| 4761 | 4763 | ||
| 4762 | child_val = atomic64_read(&child_event->count); | 4764 | child_val = atomic64_read(&child_event->count); |
| 4763 | 4765 | ||
| 4764 | /* | 4766 | /* |
| 4765 | * Add back the child's count to the parent's count: | 4767 | * Add back the child's count to the parent's count: |
| 4766 | */ | 4768 | */ |
| 4767 | atomic64_add(child_val, &parent_event->count); | 4769 | atomic64_add(child_val, &parent_event->count); |
| 4768 | atomic64_add(child_event->total_time_enabled, | 4770 | atomic64_add(child_event->total_time_enabled, |
| 4769 | &parent_event->child_total_time_enabled); | 4771 | &parent_event->child_total_time_enabled); |
| 4770 | atomic64_add(child_event->total_time_running, | 4772 | atomic64_add(child_event->total_time_running, |
| 4771 | &parent_event->child_total_time_running); | 4773 | &parent_event->child_total_time_running); |
| 4772 | 4774 | ||
| 4773 | /* | 4775 | /* |
| 4774 | * Remove this event from the parent's list | 4776 | * Remove this event from the parent's list |
| 4775 | */ | 4777 | */ |
| 4776 | WARN_ON_ONCE(parent_event->ctx->parent_ctx); | 4778 | WARN_ON_ONCE(parent_event->ctx->parent_ctx); |
| 4777 | mutex_lock(&parent_event->child_mutex); | 4779 | mutex_lock(&parent_event->child_mutex); |
| 4778 | list_del_init(&child_event->child_list); | 4780 | list_del_init(&child_event->child_list); |
| 4779 | mutex_unlock(&parent_event->child_mutex); | 4781 | mutex_unlock(&parent_event->child_mutex); |
| 4780 | 4782 | ||
| 4781 | /* | 4783 | /* |
| 4782 | * Release the parent event, if this was the last | 4784 | * Release the parent event, if this was the last |
| 4783 | * reference to it. | 4785 | * reference to it. |
| 4784 | */ | 4786 | */ |
| 4785 | fput(parent_event->filp); | 4787 | fput(parent_event->filp); |
| 4786 | } | 4788 | } |
| 4787 | 4789 | ||
| 4788 | static void | 4790 | static void |
| 4789 | __perf_event_exit_task(struct perf_event *child_event, | 4791 | __perf_event_exit_task(struct perf_event *child_event, |
| 4790 | struct perf_event_context *child_ctx, | 4792 | struct perf_event_context *child_ctx, |
| 4791 | struct task_struct *child) | 4793 | struct task_struct *child) |
| 4792 | { | 4794 | { |
| 4793 | struct perf_event *parent_event; | 4795 | struct perf_event *parent_event; |
| 4794 | 4796 | ||
| 4795 | update_event_times(child_event); | 4797 | update_event_times(child_event); |
| 4796 | perf_event_remove_from_context(child_event); | 4798 | perf_event_remove_from_context(child_event); |
| 4797 | 4799 | ||
| 4798 | parent_event = child_event->parent; | 4800 | parent_event = child_event->parent; |
| 4799 | /* | 4801 | /* |
| 4800 | * It can happen that parent exits first, and has events | 4802 | * It can happen that parent exits first, and has events |
| 4801 | * that are still around due to the child reference. These | 4803 | * that are still around due to the child reference. These |
| 4802 | * events need to be zapped - but otherwise linger. | 4804 | * events need to be zapped - but otherwise linger. |
| 4803 | */ | 4805 | */ |
| 4804 | if (parent_event) { | 4806 | if (parent_event) { |
| 4805 | sync_child_event(child_event, child); | 4807 | sync_child_event(child_event, child); |
| 4806 | free_event(child_event); | 4808 | free_event(child_event); |
| 4807 | } | 4809 | } |
| 4808 | } | 4810 | } |
| 4809 | 4811 | ||
| 4810 | /* | 4812 | /* |
| 4811 | * When a child task exits, feed back event values to parent events. | 4813 | * When a child task exits, feed back event values to parent events. |
| 4812 | */ | 4814 | */ |
| 4813 | void perf_event_exit_task(struct task_struct *child) | 4815 | void perf_event_exit_task(struct task_struct *child) |
| 4814 | { | 4816 | { |
| 4815 | struct perf_event *child_event, *tmp; | 4817 | struct perf_event *child_event, *tmp; |
| 4816 | struct perf_event_context *child_ctx; | 4818 | struct perf_event_context *child_ctx; |
| 4817 | unsigned long flags; | 4819 | unsigned long flags; |
| 4818 | 4820 | ||
| 4819 | if (likely(!child->perf_event_ctxp)) { | 4821 | if (likely(!child->perf_event_ctxp)) { |
| 4820 | perf_event_task(child, NULL, 0); | 4822 | perf_event_task(child, NULL, 0); |
| 4821 | return; | 4823 | return; |
| 4822 | } | 4824 | } |
| 4823 | 4825 | ||
| 4824 | local_irq_save(flags); | 4826 | local_irq_save(flags); |
| 4825 | /* | 4827 | /* |
| 4826 | * We can't reschedule here because interrupts are disabled, | 4828 | * We can't reschedule here because interrupts are disabled, |
| 4827 | * and either child is current or it is a task that can't be | 4829 | * and either child is current or it is a task that can't be |
| 4828 | * scheduled, so we are now safe from rescheduling changing | 4830 | * scheduled, so we are now safe from rescheduling changing |
| 4829 | * our context. | 4831 | * our context. |
| 4830 | */ | 4832 | */ |
| 4831 | child_ctx = child->perf_event_ctxp; | 4833 | child_ctx = child->perf_event_ctxp; |
| 4832 | __perf_event_task_sched_out(child_ctx); | 4834 | __perf_event_task_sched_out(child_ctx); |
| 4833 | 4835 | ||
| 4834 | /* | 4836 | /* |
| 4835 | * Take the context lock here so that if find_get_context is | 4837 | * Take the context lock here so that if find_get_context is |
| 4836 | * reading child->perf_event_ctxp, we wait until it has | 4838 | * reading child->perf_event_ctxp, we wait until it has |
| 4837 | * incremented the context's refcount before we do put_ctx below. | 4839 | * incremented the context's refcount before we do put_ctx below. |
| 4838 | */ | 4840 | */ |
| 4839 | spin_lock(&child_ctx->lock); | 4841 | spin_lock(&child_ctx->lock); |
| 4840 | child->perf_event_ctxp = NULL; | 4842 | child->perf_event_ctxp = NULL; |
| 4841 | /* | 4843 | /* |
| 4842 | * If this context is a clone; unclone it so it can't get | 4844 | * If this context is a clone; unclone it so it can't get |
| 4843 | * swapped to another process while we're removing all | 4845 | * swapped to another process while we're removing all |
| 4844 | * the events from it. | 4846 | * the events from it. |
| 4845 | */ | 4847 | */ |
| 4846 | unclone_ctx(child_ctx); | 4848 | unclone_ctx(child_ctx); |
| 4847 | spin_unlock_irqrestore(&child_ctx->lock, flags); | 4849 | spin_unlock_irqrestore(&child_ctx->lock, flags); |
| 4848 | 4850 | ||
| 4849 | /* | 4851 | /* |
| 4850 | * Report the task dead after unscheduling the events so that we | 4852 | * Report the task dead after unscheduling the events so that we |
| 4851 | * won't get any samples after PERF_RECORD_EXIT. We can however still | 4853 | * won't get any samples after PERF_RECORD_EXIT. We can however still |
| 4852 | * get a few PERF_RECORD_READ events. | 4854 | * get a few PERF_RECORD_READ events. |
| 4853 | */ | 4855 | */ |
| 4854 | perf_event_task(child, child_ctx, 0); | 4856 | perf_event_task(child, child_ctx, 0); |
| 4855 | 4857 | ||
| 4856 | /* | 4858 | /* |
| 4857 | * We can recurse on the same lock type through: | 4859 | * We can recurse on the same lock type through: |
| 4858 | * | 4860 | * |
| 4859 | * __perf_event_exit_task() | 4861 | * __perf_event_exit_task() |
| 4860 | * sync_child_event() | 4862 | * sync_child_event() |
| 4861 | * fput(parent_event->filp) | 4863 | * fput(parent_event->filp) |
| 4862 | * perf_release() | 4864 | * perf_release() |
| 4863 | * mutex_lock(&ctx->mutex) | 4865 | * mutex_lock(&ctx->mutex) |
| 4864 | * | 4866 | * |
| 4865 | * But since its the parent context it won't be the same instance. | 4867 | * But since its the parent context it won't be the same instance. |
| 4866 | */ | 4868 | */ |
| 4867 | mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING); | 4869 | mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING); |
| 4868 | 4870 | ||
| 4869 | again: | 4871 | again: |
| 4870 | list_for_each_entry_safe(child_event, tmp, &child_ctx->group_list, | 4872 | list_for_each_entry_safe(child_event, tmp, &child_ctx->group_list, |
| 4871 | group_entry) | 4873 | group_entry) |
| 4872 | __perf_event_exit_task(child_event, child_ctx, child); | 4874 | __perf_event_exit_task(child_event, child_ctx, child); |
| 4873 | 4875 | ||
| 4874 | /* | 4876 | /* |
| 4875 | * If the last event was a group event, it will have appended all | 4877 | * If the last event was a group event, it will have appended all |
| 4876 | * its siblings to the list, but we obtained 'tmp' before that which | 4878 | * its siblings to the list, but we obtained 'tmp' before that which |
| 4877 | * will still point to the list head terminating the iteration. | 4879 | * will still point to the list head terminating the iteration. |
| 4878 | */ | 4880 | */ |
| 4879 | if (!list_empty(&child_ctx->group_list)) | 4881 | if (!list_empty(&child_ctx->group_list)) |
| 4880 | goto again; | 4882 | goto again; |
| 4881 | 4883 | ||
| 4882 | mutex_unlock(&child_ctx->mutex); | 4884 | mutex_unlock(&child_ctx->mutex); |
| 4883 | 4885 | ||
| 4884 | put_ctx(child_ctx); | 4886 | put_ctx(child_ctx); |
| 4885 | } | 4887 | } |
| 4886 | 4888 | ||
| 4887 | /* | 4889 | /* |
| 4888 | * free an unexposed, unused context as created by inheritance by | 4890 | * free an unexposed, unused context as created by inheritance by |
| 4889 | * init_task below, used by fork() in case of fail. | 4891 | * init_task below, used by fork() in case of fail. |
| 4890 | */ | 4892 | */ |
| 4891 | void perf_event_free_task(struct task_struct *task) | 4893 | void perf_event_free_task(struct task_struct *task) |
| 4892 | { | 4894 | { |
| 4893 | struct perf_event_context *ctx = task->perf_event_ctxp; | 4895 | struct perf_event_context *ctx = task->perf_event_ctxp; |
| 4894 | struct perf_event *event, *tmp; | 4896 | struct perf_event *event, *tmp; |
| 4895 | 4897 | ||
| 4896 | if (!ctx) | 4898 | if (!ctx) |
| 4897 | return; | 4899 | return; |
| 4898 | 4900 | ||
| 4899 | mutex_lock(&ctx->mutex); | 4901 | mutex_lock(&ctx->mutex); |
| 4900 | again: | 4902 | again: |
| 4901 | list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) { | 4903 | list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) { |
| 4902 | struct perf_event *parent = event->parent; | 4904 | struct perf_event *parent = event->parent; |
| 4903 | 4905 | ||
| 4904 | if (WARN_ON_ONCE(!parent)) | 4906 | if (WARN_ON_ONCE(!parent)) |
| 4905 | continue; | 4907 | continue; |
| 4906 | 4908 | ||
| 4907 | mutex_lock(&parent->child_mutex); | 4909 | mutex_lock(&parent->child_mutex); |
| 4908 | list_del_init(&event->child_list); | 4910 | list_del_init(&event->child_list); |
| 4909 | mutex_unlock(&parent->child_mutex); | 4911 | mutex_unlock(&parent->child_mutex); |
| 4910 | 4912 | ||
| 4911 | fput(parent->filp); | 4913 | fput(parent->filp); |
| 4912 | 4914 | ||
| 4913 | list_del_event(event, ctx); | 4915 | list_del_event(event, ctx); |
| 4914 | free_event(event); | 4916 | free_event(event); |
| 4915 | } | 4917 | } |
| 4916 | 4918 | ||
| 4917 | if (!list_empty(&ctx->group_list)) | 4919 | if (!list_empty(&ctx->group_list)) |
| 4918 | goto again; | 4920 | goto again; |
| 4919 | 4921 | ||
| 4920 | mutex_unlock(&ctx->mutex); | 4922 | mutex_unlock(&ctx->mutex); |
| 4921 | 4923 | ||
| 4922 | put_ctx(ctx); | 4924 | put_ctx(ctx); |
| 4923 | } | 4925 | } |
| 4924 | 4926 | ||
| 4925 | /* | 4927 | /* |
| 4926 | * Initialize the perf_event context in task_struct | 4928 | * Initialize the perf_event context in task_struct |
| 4927 | */ | 4929 | */ |
| 4928 | int perf_event_init_task(struct task_struct *child) | 4930 | int perf_event_init_task(struct task_struct *child) |
| 4929 | { | 4931 | { |
| 4930 | struct perf_event_context *child_ctx, *parent_ctx; | 4932 | struct perf_event_context *child_ctx, *parent_ctx; |
| 4931 | struct perf_event_context *cloned_ctx; | 4933 | struct perf_event_context *cloned_ctx; |
| 4932 | struct perf_event *event; | 4934 | struct perf_event *event; |
| 4933 | struct task_struct *parent = current; | 4935 | struct task_struct *parent = current; |
| 4934 | int inherited_all = 1; | 4936 | int inherited_all = 1; |
| 4935 | int ret = 0; | 4937 | int ret = 0; |
| 4936 | 4938 | ||
| 4937 | child->perf_event_ctxp = NULL; | 4939 | child->perf_event_ctxp = NULL; |
| 4938 | 4940 | ||
| 4939 | mutex_init(&child->perf_event_mutex); | 4941 | mutex_init(&child->perf_event_mutex); |
| 4940 | INIT_LIST_HEAD(&child->perf_event_list); | 4942 | INIT_LIST_HEAD(&child->perf_event_list); |
| 4941 | 4943 | ||
| 4942 | if (likely(!parent->perf_event_ctxp)) | 4944 | if (likely(!parent->perf_event_ctxp)) |
| 4943 | return 0; | 4945 | return 0; |
| 4944 | 4946 | ||
| 4945 | /* | 4947 | /* |
| 4946 | * This is executed from the parent task context, so inherit | 4948 | * This is executed from the parent task context, so inherit |
| 4947 | * events that have been marked for cloning. | 4949 | * events that have been marked for cloning. |
| 4948 | * First allocate and initialize a context for the child. | 4950 | * First allocate and initialize a context for the child. |
| 4949 | */ | 4951 | */ |
| 4950 | 4952 | ||
| 4951 | child_ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL); | 4953 | child_ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL); |
| 4952 | if (!child_ctx) | 4954 | if (!child_ctx) |
| 4953 | return -ENOMEM; | 4955 | return -ENOMEM; |
| 4954 | 4956 | ||
| 4955 | __perf_event_init_context(child_ctx, child); | 4957 | __perf_event_init_context(child_ctx, child); |
| 4956 | child->perf_event_ctxp = child_ctx; | 4958 | child->perf_event_ctxp = child_ctx; |
| 4957 | get_task_struct(child); | 4959 | get_task_struct(child); |
| 4958 | 4960 | ||
| 4959 | /* | 4961 | /* |
| 4960 | * If the parent's context is a clone, pin it so it won't get | 4962 | * If the parent's context is a clone, pin it so it won't get |
| 4961 | * swapped under us. | 4963 | * swapped under us. |
| 4962 | */ | 4964 | */ |
| 4963 | parent_ctx = perf_pin_task_context(parent); | 4965 | parent_ctx = perf_pin_task_context(parent); |
| 4964 | 4966 | ||
| 4965 | /* | 4967 | /* |
| 4966 | * No need to check if parent_ctx != NULL here; since we saw | 4968 | * No need to check if parent_ctx != NULL here; since we saw |
| 4967 | * it non-NULL earlier, the only reason for it to become NULL | 4969 | * it non-NULL earlier, the only reason for it to become NULL |
| 4968 | * is if we exit, and since we're currently in the middle of | 4970 | * is if we exit, and since we're currently in the middle of |
| 4969 | * a fork we can't be exiting at the same time. | 4971 | * a fork we can't be exiting at the same time. |
| 4970 | */ | 4972 | */ |
| 4971 | 4973 | ||
| 4972 | /* | 4974 | /* |
| 4973 | * Lock the parent list. No need to lock the child - not PID | 4975 | * Lock the parent list. No need to lock the child - not PID |
| 4974 | * hashed yet and not running, so nobody can access it. | 4976 | * hashed yet and not running, so nobody can access it. |
| 4975 | */ | 4977 | */ |
| 4976 | mutex_lock(&parent_ctx->mutex); | 4978 | mutex_lock(&parent_ctx->mutex); |
| 4977 | 4979 | ||
| 4978 | /* | 4980 | /* |
| 4979 | * We dont have to disable NMIs - we are only looking at | 4981 | * We dont have to disable NMIs - we are only looking at |
| 4980 | * the list, not manipulating it: | 4982 | * the list, not manipulating it: |
| 4981 | */ | 4983 | */ |
| 4982 | list_for_each_entry(event, &parent_ctx->group_list, group_entry) { | 4984 | list_for_each_entry(event, &parent_ctx->group_list, group_entry) { |
| 4983 | 4985 | ||
| 4984 | if (!event->attr.inherit) { | 4986 | if (!event->attr.inherit) { |
| 4985 | inherited_all = 0; | 4987 | inherited_all = 0; |
| 4986 | continue; | 4988 | continue; |
| 4987 | } | 4989 | } |
| 4988 | 4990 | ||
| 4989 | ret = inherit_group(event, parent, parent_ctx, | 4991 | ret = inherit_group(event, parent, parent_ctx, |
| 4990 | child, child_ctx); | 4992 | child, child_ctx); |
| 4991 | if (ret) { | 4993 | if (ret) { |
| 4992 | inherited_all = 0; | 4994 | inherited_all = 0; |
| 4993 | break; | 4995 | break; |
| 4994 | } | 4996 | } |
| 4995 | } | 4997 | } |
| 4996 | 4998 | ||
| 4997 | if (inherited_all) { | 4999 | if (inherited_all) { |
| 4998 | /* | 5000 | /* |
| 4999 | * Mark the child context as a clone of the parent | 5001 | * Mark the child context as a clone of the parent |
| 5000 | * context, or of whatever the parent is a clone of. | 5002 | * context, or of whatever the parent is a clone of. |
| 5001 | * Note that if the parent is a clone, it could get | 5003 | * Note that if the parent is a clone, it could get |
| 5002 | * uncloned at any point, but that doesn't matter | 5004 | * uncloned at any point, but that doesn't matter |
| 5003 | * because the list of events and the generation | 5005 | * because the list of events and the generation |
| 5004 | * count can't have changed since we took the mutex. | 5006 | * count can't have changed since we took the mutex. |
| 5005 | */ | 5007 | */ |
| 5006 | cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); | 5008 | cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); |
| 5007 | if (cloned_ctx) { | 5009 | if (cloned_ctx) { |
| 5008 | child_ctx->parent_ctx = cloned_ctx; | 5010 | child_ctx->parent_ctx = cloned_ctx; |
| 5009 | child_ctx->parent_gen = parent_ctx->parent_gen; | 5011 | child_ctx->parent_gen = parent_ctx->parent_gen; |
| 5010 | } else { | 5012 | } else { |
| 5011 | child_ctx->parent_ctx = parent_ctx; | 5013 | child_ctx->parent_ctx = parent_ctx; |
| 5012 | child_ctx->parent_gen = parent_ctx->generation; | 5014 | child_ctx->parent_gen = parent_ctx->generation; |
| 5013 | } | 5015 | } |
| 5014 | get_ctx(child_ctx->parent_ctx); | 5016 | get_ctx(child_ctx->parent_ctx); |
| 5015 | } | 5017 | } |
| 5016 | 5018 | ||
| 5017 | mutex_unlock(&parent_ctx->mutex); | 5019 | mutex_unlock(&parent_ctx->mutex); |
| 5018 | 5020 | ||
| 5019 | perf_unpin_context(parent_ctx); | 5021 | perf_unpin_context(parent_ctx); |
| 5020 | 5022 | ||
| 5021 | return ret; | 5023 | return ret; |
| 5022 | } | 5024 | } |
| 5023 | 5025 | ||
| 5024 | static void __cpuinit perf_event_init_cpu(int cpu) | 5026 | static void __cpuinit perf_event_init_cpu(int cpu) |
| 5025 | { | 5027 | { |
| 5026 | struct perf_cpu_context *cpuctx; | 5028 | struct perf_cpu_context *cpuctx; |
| 5027 | 5029 | ||
| 5028 | cpuctx = &per_cpu(perf_cpu_context, cpu); | 5030 | cpuctx = &per_cpu(perf_cpu_context, cpu); |
| 5029 | __perf_event_init_context(&cpuctx->ctx, NULL); | 5031 | __perf_event_init_context(&cpuctx->ctx, NULL); |
| 5030 | 5032 | ||
| 5031 | spin_lock(&perf_resource_lock); | 5033 | spin_lock(&perf_resource_lock); |
| 5032 | cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; | 5034 | cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; |
| 5033 | spin_unlock(&perf_resource_lock); | 5035 | spin_unlock(&perf_resource_lock); |
| 5034 | 5036 | ||
| 5035 | hw_perf_event_setup(cpu); | 5037 | hw_perf_event_setup(cpu); |
| 5036 | } | 5038 | } |
| 5037 | 5039 | ||
| 5038 | #ifdef CONFIG_HOTPLUG_CPU | 5040 | #ifdef CONFIG_HOTPLUG_CPU |
| 5039 | static void __perf_event_exit_cpu(void *info) | 5041 | static void __perf_event_exit_cpu(void *info) |
| 5040 | { | 5042 | { |
| 5041 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 5043 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); |
| 5042 | struct perf_event_context *ctx = &cpuctx->ctx; | 5044 | struct perf_event_context *ctx = &cpuctx->ctx; |
| 5043 | struct perf_event *event, *tmp; | 5045 | struct perf_event *event, *tmp; |
| 5044 | 5046 | ||
| 5045 | list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) | 5047 | list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) |
| 5046 | __perf_event_remove_from_context(event); | 5048 | __perf_event_remove_from_context(event); |
| 5047 | } | 5049 | } |
| 5048 | static void perf_event_exit_cpu(int cpu) | 5050 | static void perf_event_exit_cpu(int cpu) |
| 5049 | { | 5051 | { |
| 5050 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | 5052 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); |
| 5051 | struct perf_event_context *ctx = &cpuctx->ctx; | 5053 | struct perf_event_context *ctx = &cpuctx->ctx; |
| 5052 | 5054 | ||
| 5053 | mutex_lock(&ctx->mutex); | 5055 | mutex_lock(&ctx->mutex); |
| 5054 | smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1); | 5056 | smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1); |
| 5055 | mutex_unlock(&ctx->mutex); | 5057 | mutex_unlock(&ctx->mutex); |
| 5056 | } | 5058 | } |
| 5057 | #else | 5059 | #else |
| 5058 | static inline void perf_event_exit_cpu(int cpu) { } | 5060 | static inline void perf_event_exit_cpu(int cpu) { } |
| 5059 | #endif | 5061 | #endif |
| 5060 | 5062 | ||
| 5061 | static int __cpuinit | 5063 | static int __cpuinit |
| 5062 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | 5064 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) |
| 5063 | { | 5065 | { |
| 5064 | unsigned int cpu = (long)hcpu; | 5066 | unsigned int cpu = (long)hcpu; |
| 5065 | 5067 | ||
| 5066 | switch (action) { | 5068 | switch (action) { |
| 5067 | 5069 | ||
| 5068 | case CPU_UP_PREPARE: | 5070 | case CPU_UP_PREPARE: |
| 5069 | case CPU_UP_PREPARE_FROZEN: | 5071 | case CPU_UP_PREPARE_FROZEN: |
| 5070 | perf_event_init_cpu(cpu); | 5072 | perf_event_init_cpu(cpu); |
| 5071 | break; | 5073 | break; |
| 5072 | 5074 | ||
| 5073 | case CPU_ONLINE: | 5075 | case CPU_ONLINE: |
| 5074 | case CPU_ONLINE_FROZEN: | 5076 | case CPU_ONLINE_FROZEN: |
| 5075 | hw_perf_event_setup_online(cpu); | 5077 | hw_perf_event_setup_online(cpu); |
| 5076 | break; | 5078 | break; |
| 5077 | 5079 | ||
| 5078 | case CPU_DOWN_PREPARE: | 5080 | case CPU_DOWN_PREPARE: |
| 5079 | case CPU_DOWN_PREPARE_FROZEN: | 5081 | case CPU_DOWN_PREPARE_FROZEN: |
| 5080 | perf_event_exit_cpu(cpu); | 5082 | perf_event_exit_cpu(cpu); |
| 5081 | break; | 5083 | break; |
| 5082 | 5084 | ||
| 5083 | default: | 5085 | default: |
| 5084 | break; | 5086 | break; |
| 5085 | } | 5087 | } |
| 5086 | 5088 | ||
| 5087 | return NOTIFY_OK; | 5089 | return NOTIFY_OK; |
| 5088 | } | 5090 | } |
| 5089 | 5091 | ||
| 5090 | /* | 5092 | /* |
| 5091 | * This has to have a higher priority than migration_notifier in sched.c. | 5093 | * This has to have a higher priority than migration_notifier in sched.c. |
| 5092 | */ | 5094 | */ |
| 5093 | static struct notifier_block __cpuinitdata perf_cpu_nb = { | 5095 | static struct notifier_block __cpuinitdata perf_cpu_nb = { |
| 5094 | .notifier_call = perf_cpu_notify, | 5096 | .notifier_call = perf_cpu_notify, |
| 5095 | .priority = 20, | 5097 | .priority = 20, |
| 5096 | }; | 5098 | }; |
| 5097 | 5099 | ||
| 5098 | void __init perf_event_init(void) | 5100 | void __init perf_event_init(void) |
| 5099 | { | 5101 | { |
| 5100 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, | 5102 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, |
| 5101 | (void *)(long)smp_processor_id()); | 5103 | (void *)(long)smp_processor_id()); |
| 5102 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, | 5104 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, |
| 5103 | (void *)(long)smp_processor_id()); | 5105 | (void *)(long)smp_processor_id()); |
| 5104 | register_cpu_notifier(&perf_cpu_nb); | 5106 | register_cpu_notifier(&perf_cpu_nb); |
| 5105 | } | 5107 | } |
| 5106 | 5108 | ||
| 5107 | static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf) | 5109 | static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf) |
| 5108 | { | 5110 | { |
| 5109 | return sprintf(buf, "%d\n", perf_reserved_percpu); | 5111 | return sprintf(buf, "%d\n", perf_reserved_percpu); |
| 5110 | } | 5112 | } |
| 5111 | 5113 | ||
| 5112 | static ssize_t | 5114 | static ssize_t |
| 5113 | perf_set_reserve_percpu(struct sysdev_class *class, | 5115 | perf_set_reserve_percpu(struct sysdev_class *class, |
| 5114 | const char *buf, | 5116 | const char *buf, |
| 5115 | size_t count) | 5117 | size_t count) |
| 5116 | { | 5118 | { |
| 5117 | struct perf_cpu_context *cpuctx; | 5119 | struct perf_cpu_context *cpuctx; |
| 5118 | unsigned long val; | 5120 | unsigned long val; |
| 5119 | int err, cpu, mpt; | 5121 | int err, cpu, mpt; |
| 5120 | 5122 | ||
| 5121 | err = strict_strtoul(buf, 10, &val); | 5123 | err = strict_strtoul(buf, 10, &val); |
| 5122 | if (err) | 5124 | if (err) |
| 5123 | return err; | 5125 | return err; |
| 5124 | if (val > perf_max_events) | 5126 | if (val > perf_max_events) |
| 5125 | return -EINVAL; | 5127 | return -EINVAL; |
| 5126 | 5128 | ||
| 5127 | spin_lock(&perf_resource_lock); | 5129 | spin_lock(&perf_resource_lock); |
| 5128 | perf_reserved_percpu = val; | 5130 | perf_reserved_percpu = val; |
| 5129 | for_each_online_cpu(cpu) { | 5131 | for_each_online_cpu(cpu) { |
| 5130 | cpuctx = &per_cpu(perf_cpu_context, cpu); | 5132 | cpuctx = &per_cpu(perf_cpu_context, cpu); |
| 5131 | spin_lock_irq(&cpuctx->ctx.lock); | 5133 | spin_lock_irq(&cpuctx->ctx.lock); |
| 5132 | mpt = min(perf_max_events - cpuctx->ctx.nr_events, | 5134 | mpt = min(perf_max_events - cpuctx->ctx.nr_events, |
| 5133 | perf_max_events - perf_reserved_percpu); | 5135 | perf_max_events - perf_reserved_percpu); |
| 5134 | cpuctx->max_pertask = mpt; | 5136 | cpuctx->max_pertask = mpt; |
| 5135 | spin_unlock_irq(&cpuctx->ctx.lock); | 5137 | spin_unlock_irq(&cpuctx->ctx.lock); |
| 5136 | } | 5138 | } |
| 5137 | spin_unlock(&perf_resource_lock); | 5139 | spin_unlock(&perf_resource_lock); |
| 5138 | 5140 | ||
| 5139 | return count; | 5141 | return count; |
| 5140 | } | 5142 | } |
| 5141 | 5143 | ||
| 5142 | static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf) | 5144 | static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf) |
| 5143 | { | 5145 | { |
| 5144 | return sprintf(buf, "%d\n", perf_overcommit); | 5146 | return sprintf(buf, "%d\n", perf_overcommit); |
| 5145 | } | 5147 | } |
| 5146 | 5148 | ||
| 5147 | static ssize_t | 5149 | static ssize_t |
| 5148 | perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count) | 5150 | perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count) |
| 5149 | { | 5151 | { |
| 5150 | unsigned long val; | 5152 | unsigned long val; |
| 5151 | int err; | 5153 | int err; |
| 5152 | 5154 | ||
| 5153 | err = strict_strtoul(buf, 10, &val); | 5155 | err = strict_strtoul(buf, 10, &val); |
| 5154 | if (err) | 5156 | if (err) |
| 5155 | return err; | 5157 | return err; |
| 5156 | if (val > 1) | 5158 | if (val > 1) |
| 5157 | return -EINVAL; | 5159 | return -EINVAL; |
| 5158 | 5160 | ||
| 5159 | spin_lock(&perf_resource_lock); | 5161 | spin_lock(&perf_resource_lock); |
| 5160 | perf_overcommit = val; | 5162 | perf_overcommit = val; |
| 5161 | spin_unlock(&perf_resource_lock); | 5163 | spin_unlock(&perf_resource_lock); |
| 5162 | 5164 | ||
| 5163 | return count; | 5165 | return count; |
| 5164 | } | 5166 | } |
| 5165 | 5167 | ||
| 5166 | static SYSDEV_CLASS_ATTR( | 5168 | static SYSDEV_CLASS_ATTR( |
| 5167 | reserve_percpu, | 5169 | reserve_percpu, |
| 5168 | 0644, | 5170 | 0644, |
| 5169 | perf_show_reserve_percpu, | 5171 | perf_show_reserve_percpu, |
| 5170 | perf_set_reserve_percpu | 5172 | perf_set_reserve_percpu |
| 5171 | ); | 5173 | ); |
| 5172 | 5174 | ||
| 5173 | static SYSDEV_CLASS_ATTR( | 5175 | static SYSDEV_CLASS_ATTR( |
| 5174 | overcommit, | 5176 | overcommit, |
| 5175 | 0644, | 5177 | 0644, |
| 5176 | perf_show_overcommit, | 5178 | perf_show_overcommit, |
| 5177 | perf_set_overcommit | 5179 | perf_set_overcommit |
| 5178 | ); | 5180 | ); |
| 5179 | 5181 | ||
| 5180 | static struct attribute *perfclass_attrs[] = { | 5182 | static struct attribute *perfclass_attrs[] = { |
| 5181 | &attr_reserve_percpu.attr, | 5183 | &attr_reserve_percpu.attr, |
| 5182 | &attr_overcommit.attr, | 5184 | &attr_overcommit.attr, |
| 5183 | NULL | 5185 | NULL |
| 5184 | }; | 5186 | }; |
| 5185 | 5187 | ||
| 5186 | static struct attribute_group perfclass_attr_group = { | 5188 | static struct attribute_group perfclass_attr_group = { |
| 5187 | .attrs = perfclass_attrs, | 5189 | .attrs = perfclass_attrs, |
| 5188 | .name = "perf_events", | 5190 | .name = "perf_events", |
| 5189 | }; | 5191 | }; |
| 5190 | 5192 | ||
| 5191 | static int __init perf_event_sysfs_init(void) | 5193 | static int __init perf_event_sysfs_init(void) |
| 5192 | { | 5194 | { |
| 5193 | return sysfs_create_group(&cpu_sysdev_class.kset.kobj, | 5195 | return sysfs_create_group(&cpu_sysdev_class.kset.kobj, |
| 5194 | &perfclass_attr_group); | 5196 | &perfclass_attr_group); |
| 5195 | } | 5197 | } |
| 5196 | device_initcall(perf_event_sysfs_init); | 5198 | device_initcall(perf_event_sysfs_init); |
| 5197 | 5199 |
tools/perf/Documentation/perf-bench.txt
| File was created | 1 | perf-bench(1) | |
| 2 | ============ | ||
| 3 | |||
| 4 | NAME | ||
| 5 | ---- | ||
| 6 | perf-bench - General framework for benchmark suites | ||
| 7 | |||
| 8 | SYNOPSIS | ||
| 9 | -------- | ||
| 10 | [verse] | ||
| 11 | 'perf bench' [<common options>] <subsystem> <suite> [<options>] | ||
| 12 | |||
| 13 | DESCRIPTION | ||
| 14 | ----------- | ||
| 15 | This 'perf bench' command is general framework for benchmark suites. | ||
| 16 | |||
| 17 | COMMON OPTIONS | ||
| 18 | -------------- | ||
| 19 | -f:: | ||
| 20 | --format=:: | ||
| 21 | Specify format style. | ||
| 22 | Current available format styles are, | ||
| 23 | |||
| 24 | 'default':: | ||
| 25 | Default style. This is mainly for human reading. | ||
| 26 | --------------------- | ||
| 27 | % perf bench sched pipe # with no style specify | ||
| 28 | (executing 1000000 pipe operations between two tasks) | ||
| 29 | Total time:5.855 sec | ||
| 30 | 5.855061 usecs/op | ||
| 31 | 170792 ops/sec | ||
| 32 | --------------------- | ||
| 33 | |||
| 34 | 'simple':: | ||
| 35 | This simple style is friendly for automated | ||
| 36 | processing by scripts. | ||
| 37 | --------------------- | ||
| 38 | % perf bench --format=simple sched pipe # specified simple | ||
| 39 | 5.988 | ||
| 40 | --------------------- | ||
| 41 | |||
| 42 | SUBSYSTEM | ||
| 43 | --------- | ||
| 44 | |||
| 45 | 'sched':: | ||
| 46 | Scheduler and IPC mechanisms. | ||
| 47 | |||
| 48 | SUITES FOR 'sched' | ||
| 49 | ~~~~~~~~~~~~~~~~~~ | ||
| 50 | *messaging*:: | ||
| 51 | Suite for evaluating performance of scheduler and IPC mechanisms. | ||
| 52 | Based on hackbench by Rusty Russell. | ||
| 53 | |||
| 54 | Options of *pipe* | ||
| 55 | ^^^^^^^^^^^^^^^^^ | ||
| 56 | -p:: | ||
| 57 | --pipe:: | ||
| 58 | Use pipe() instead of socketpair() | ||
| 59 | |||
| 60 | -t:: | ||
| 61 | --thread:: | ||
| 62 | Be multi thread instead of multi process | ||
| 63 | |||
| 64 | -g:: | ||
| 65 | --group=:: | ||
| 66 | Specify number of groups | ||
| 67 | |||
| 68 | -l:: | ||
| 69 | --loop=:: | ||
| 70 | Specify number of loops | ||
| 71 | |||
| 72 | Example of *messaging* | ||
| 73 | ^^^^^^^^^^^^^^^^^^^^^^ | ||
| 74 | |||
| 75 | --------------------- | ||
| 76 | % perf bench sched messaging # run with default | ||
| 77 | options (20 sender and receiver processes per group) | ||
| 78 | (10 groups == 400 processes run) | ||
| 79 | |||
| 80 | Total time:0.308 sec | ||
| 81 | |||
| 82 | % perf bench sched messaging -t -g 20 # be multi-thread,with 20 groups | ||
| 83 | (20 sender and receiver threads per group) | ||
| 84 | (20 groups == 800 threads run) | ||
| 85 | |||
| 86 | Total time:0.582 sec | ||
| 87 | --------------------- | ||
| 88 | |||
| 89 | *pipe*:: | ||
| 90 | Suite for pipe() system call. | ||
| 91 | Based on pipe-test-1m.c by Ingo Molnar. | ||
| 92 | |||
| 93 | Options of *pipe* | ||
| 94 | ^^^^^^^^^^^^^^^^^ | ||
| 95 | -l:: | ||
| 96 | --loop=:: | ||
| 97 | Specify number of loops. | ||
| 98 | |||
| 99 | Example of *pipe* | ||
| 100 | ^^^^^^^^^^^^^^^^^ | ||
| 101 | |||
| 102 | --------------------- | ||
| 103 | % perf bench sched pipe | ||
| 104 | (executing 1000000 pipe operations between two tasks) | ||
| 105 | |||
| 106 | Total time:8.091 sec | ||
| 107 | 8.091833 usecs/op | ||
| 108 | 123581 ops/sec | ||
| 109 | |||
| 110 | % perf bench sched pipe -l 1000 # loop 1000 | ||
| 111 | (executing 1000 pipe operations between two tasks) | ||
| 112 | |||
| 113 | Total time:0.016 sec | ||
| 114 | 16.948000 usecs/op | ||
| 115 | 59004 ops/sec | ||
| 116 | --------------------- | ||
| 117 | |||
| 118 | SEE ALSO | ||
| 119 | -------- | ||
| 120 | linkperf:perf[1] | ||
| 121 |
tools/perf/Makefile
| 1 | # The default target of this Makefile is... | 1 | # The default target of this Makefile is... |
| 2 | all:: | 2 | all:: |
| 3 | 3 | ||
| 4 | # Define V=1 to have a more verbose compile. | 4 | # Define V=1 to have a more verbose compile. |
| 5 | # | 5 | # |
| 6 | # Define SNPRINTF_RETURNS_BOGUS if your are on a system which snprintf() | 6 | # Define SNPRINTF_RETURNS_BOGUS if your are on a system which snprintf() |
| 7 | # or vsnprintf() return -1 instead of number of characters which would | 7 | # or vsnprintf() return -1 instead of number of characters which would |
| 8 | # have been written to the final string if enough space had been available. | 8 | # have been written to the final string if enough space had been available. |
| 9 | # | 9 | # |
| 10 | # Define FREAD_READS_DIRECTORIES if your are on a system which succeeds | 10 | # Define FREAD_READS_DIRECTORIES if your are on a system which succeeds |
| 11 | # when attempting to read from an fopen'ed directory. | 11 | # when attempting to read from an fopen'ed directory. |
| 12 | # | 12 | # |
| 13 | # Define NO_OPENSSL environment variable if you do not have OpenSSL. | 13 | # Define NO_OPENSSL environment variable if you do not have OpenSSL. |
| 14 | # This also implies MOZILLA_SHA1. | 14 | # This also implies MOZILLA_SHA1. |
| 15 | # | 15 | # |
| 16 | # Define CURLDIR=/foo/bar if your curl header and library files are in | 16 | # Define CURLDIR=/foo/bar if your curl header and library files are in |
| 17 | # /foo/bar/include and /foo/bar/lib directories. | 17 | # /foo/bar/include and /foo/bar/lib directories. |
| 18 | # | 18 | # |
| 19 | # Define EXPATDIR=/foo/bar if your expat header and library files are in | 19 | # Define EXPATDIR=/foo/bar if your expat header and library files are in |
| 20 | # /foo/bar/include and /foo/bar/lib directories. | 20 | # /foo/bar/include and /foo/bar/lib directories. |
| 21 | # | 21 | # |
| 22 | # Define NO_D_INO_IN_DIRENT if you don't have d_ino in your struct dirent. | 22 | # Define NO_D_INO_IN_DIRENT if you don't have d_ino in your struct dirent. |
| 23 | # | 23 | # |
| 24 | # Define NO_D_TYPE_IN_DIRENT if your platform defines DT_UNKNOWN but lacks | 24 | # Define NO_D_TYPE_IN_DIRENT if your platform defines DT_UNKNOWN but lacks |
| 25 | # d_type in struct dirent (latest Cygwin -- will be fixed soonish). | 25 | # d_type in struct dirent (latest Cygwin -- will be fixed soonish). |
| 26 | # | 26 | # |
| 27 | # Define NO_C99_FORMAT if your formatted IO functions (printf/scanf et.al.) | 27 | # Define NO_C99_FORMAT if your formatted IO functions (printf/scanf et.al.) |
| 28 | # do not support the 'size specifiers' introduced by C99, namely ll, hh, | 28 | # do not support the 'size specifiers' introduced by C99, namely ll, hh, |
| 29 | # j, z, t. (representing long long int, char, intmax_t, size_t, ptrdiff_t). | 29 | # j, z, t. (representing long long int, char, intmax_t, size_t, ptrdiff_t). |
| 30 | # some C compilers supported these specifiers prior to C99 as an extension. | 30 | # some C compilers supported these specifiers prior to C99 as an extension. |
| 31 | # | 31 | # |
| 32 | # Define NO_STRCASESTR if you don't have strcasestr. | 32 | # Define NO_STRCASESTR if you don't have strcasestr. |
| 33 | # | 33 | # |
| 34 | # Define NO_MEMMEM if you don't have memmem. | 34 | # Define NO_MEMMEM if you don't have memmem. |
| 35 | # | 35 | # |
| 36 | # Define NO_STRTOUMAX if you don't have strtoumax in the C library. | 36 | # Define NO_STRTOUMAX if you don't have strtoumax in the C library. |
| 37 | # If your compiler also does not support long long or does not have | 37 | # If your compiler also does not support long long or does not have |
| 38 | # strtoull, define NO_STRTOULL. | 38 | # strtoull, define NO_STRTOULL. |
| 39 | # | 39 | # |
| 40 | # Define NO_SETENV if you don't have setenv in the C library. | 40 | # Define NO_SETENV if you don't have setenv in the C library. |
| 41 | # | 41 | # |
| 42 | # Define NO_UNSETENV if you don't have unsetenv in the C library. | 42 | # Define NO_UNSETENV if you don't have unsetenv in the C library. |
| 43 | # | 43 | # |
| 44 | # Define NO_MKDTEMP if you don't have mkdtemp in the C library. | 44 | # Define NO_MKDTEMP if you don't have mkdtemp in the C library. |
| 45 | # | 45 | # |
| 46 | # Define NO_SYS_SELECT_H if you don't have sys/select.h. | 46 | # Define NO_SYS_SELECT_H if you don't have sys/select.h. |
| 47 | # | 47 | # |
| 48 | # Define NO_SYMLINK_HEAD if you never want .perf/HEAD to be a symbolic link. | 48 | # Define NO_SYMLINK_HEAD if you never want .perf/HEAD to be a symbolic link. |
| 49 | # Enable it on Windows. By default, symrefs are still used. | 49 | # Enable it on Windows. By default, symrefs are still used. |
| 50 | # | 50 | # |
| 51 | # Define NO_SVN_TESTS if you want to skip time-consuming SVN interoperability | 51 | # Define NO_SVN_TESTS if you want to skip time-consuming SVN interoperability |
| 52 | # tests. These tests take up a significant amount of the total test time | 52 | # tests. These tests take up a significant amount of the total test time |
| 53 | # but are not needed unless you plan to talk to SVN repos. | 53 | # but are not needed unless you plan to talk to SVN repos. |
| 54 | # | 54 | # |
| 55 | # Define NO_FINK if you are building on Darwin/Mac OS X, have Fink | 55 | # Define NO_FINK if you are building on Darwin/Mac OS X, have Fink |
| 56 | # installed in /sw, but don't want PERF to link against any libraries | 56 | # installed in /sw, but don't want PERF to link against any libraries |
| 57 | # installed there. If defined you may specify your own (or Fink's) | 57 | # installed there. If defined you may specify your own (or Fink's) |
| 58 | # include directories and library directories by defining CFLAGS | 58 | # include directories and library directories by defining CFLAGS |
| 59 | # and LDFLAGS appropriately. | 59 | # and LDFLAGS appropriately. |
| 60 | # | 60 | # |
| 61 | # Define NO_DARWIN_PORTS if you are building on Darwin/Mac OS X, | 61 | # Define NO_DARWIN_PORTS if you are building on Darwin/Mac OS X, |
| 62 | # have DarwinPorts installed in /opt/local, but don't want PERF to | 62 | # have DarwinPorts installed in /opt/local, but don't want PERF to |
| 63 | # link against any libraries installed there. If defined you may | 63 | # link against any libraries installed there. If defined you may |
| 64 | # specify your own (or DarwinPort's) include directories and | 64 | # specify your own (or DarwinPort's) include directories and |
| 65 | # library directories by defining CFLAGS and LDFLAGS appropriately. | 65 | # library directories by defining CFLAGS and LDFLAGS appropriately. |
| 66 | # | 66 | # |
| 67 | # Define PPC_SHA1 environment variable when running make to make use of | 67 | # Define PPC_SHA1 environment variable when running make to make use of |
| 68 | # a bundled SHA1 routine optimized for PowerPC. | 68 | # a bundled SHA1 routine optimized for PowerPC. |
| 69 | # | 69 | # |
| 70 | # Define ARM_SHA1 environment variable when running make to make use of | 70 | # Define ARM_SHA1 environment variable when running make to make use of |
| 71 | # a bundled SHA1 routine optimized for ARM. | 71 | # a bundled SHA1 routine optimized for ARM. |
| 72 | # | 72 | # |
| 73 | # Define MOZILLA_SHA1 environment variable when running make to make use of | 73 | # Define MOZILLA_SHA1 environment variable when running make to make use of |
| 74 | # a bundled SHA1 routine coming from Mozilla. It is GPL'd and should be fast | 74 | # a bundled SHA1 routine coming from Mozilla. It is GPL'd and should be fast |
| 75 | # on non-x86 architectures (e.g. PowerPC), while the OpenSSL version (default | 75 | # on non-x86 architectures (e.g. PowerPC), while the OpenSSL version (default |
| 76 | # choice) has very fast version optimized for i586. | 76 | # choice) has very fast version optimized for i586. |
| 77 | # | 77 | # |
| 78 | # Define NEEDS_SSL_WITH_CRYPTO if you need -lcrypto with -lssl (Darwin). | 78 | # Define NEEDS_SSL_WITH_CRYPTO if you need -lcrypto with -lssl (Darwin). |
| 79 | # | 79 | # |
| 80 | # Define NEEDS_LIBICONV if linking with libc is not enough (Darwin). | 80 | # Define NEEDS_LIBICONV if linking with libc is not enough (Darwin). |
| 81 | # | 81 | # |
| 82 | # Define NEEDS_SOCKET if linking with libc is not enough (SunOS, | 82 | # Define NEEDS_SOCKET if linking with libc is not enough (SunOS, |
| 83 | # Patrick Mauritz). | 83 | # Patrick Mauritz). |
| 84 | # | 84 | # |
| 85 | # Define NO_MMAP if you want to avoid mmap. | 85 | # Define NO_MMAP if you want to avoid mmap. |
| 86 | # | 86 | # |
| 87 | # Define NO_PTHREADS if you do not have or do not want to use Pthreads. | 87 | # Define NO_PTHREADS if you do not have or do not want to use Pthreads. |
| 88 | # | 88 | # |
| 89 | # Define NO_PREAD if you have a problem with pread() system call (e.g. | 89 | # Define NO_PREAD if you have a problem with pread() system call (e.g. |
| 90 | # cygwin.dll before v1.5.22). | 90 | # cygwin.dll before v1.5.22). |
| 91 | # | 91 | # |
| 92 | # Define NO_FAST_WORKING_DIRECTORY if accessing objects in pack files is | 92 | # Define NO_FAST_WORKING_DIRECTORY if accessing objects in pack files is |
| 93 | # generally faster on your platform than accessing the working directory. | 93 | # generally faster on your platform than accessing the working directory. |
| 94 | # | 94 | # |
| 95 | # Define NO_TRUSTABLE_FILEMODE if your filesystem may claim to support | 95 | # Define NO_TRUSTABLE_FILEMODE if your filesystem may claim to support |
| 96 | # the executable mode bit, but doesn't really do so. | 96 | # the executable mode bit, but doesn't really do so. |
| 97 | # | 97 | # |
| 98 | # Define NO_IPV6 if you lack IPv6 support and getaddrinfo(). | 98 | # Define NO_IPV6 if you lack IPv6 support and getaddrinfo(). |
| 99 | # | 99 | # |
| 100 | # Define NO_SOCKADDR_STORAGE if your platform does not have struct | 100 | # Define NO_SOCKADDR_STORAGE if your platform does not have struct |
| 101 | # sockaddr_storage. | 101 | # sockaddr_storage. |
| 102 | # | 102 | # |
| 103 | # Define NO_ICONV if your libc does not properly support iconv. | 103 | # Define NO_ICONV if your libc does not properly support iconv. |
| 104 | # | 104 | # |
| 105 | # Define OLD_ICONV if your library has an old iconv(), where the second | 105 | # Define OLD_ICONV if your library has an old iconv(), where the second |
| 106 | # (input buffer pointer) parameter is declared with type (const char **). | 106 | # (input buffer pointer) parameter is declared with type (const char **). |
| 107 | # | 107 | # |
| 108 | # Define NO_DEFLATE_BOUND if your zlib does not have deflateBound. | 108 | # Define NO_DEFLATE_BOUND if your zlib does not have deflateBound. |
| 109 | # | 109 | # |
| 110 | # Define NO_R_TO_GCC_LINKER if your gcc does not like "-R/path/lib" | 110 | # Define NO_R_TO_GCC_LINKER if your gcc does not like "-R/path/lib" |
| 111 | # that tells runtime paths to dynamic libraries; | 111 | # that tells runtime paths to dynamic libraries; |
| 112 | # "-Wl,-rpath=/path/lib" is used instead. | 112 | # "-Wl,-rpath=/path/lib" is used instead. |
| 113 | # | 113 | # |
| 114 | # Define USE_NSEC below if you want perf to care about sub-second file mtimes | 114 | # Define USE_NSEC below if you want perf to care about sub-second file mtimes |
| 115 | # and ctimes. Note that you need recent glibc (at least 2.2.4) for this, and | 115 | # and ctimes. Note that you need recent glibc (at least 2.2.4) for this, and |
| 116 | # it will BREAK YOUR LOCAL DIFFS! show-diff and anything using it will likely | 116 | # it will BREAK YOUR LOCAL DIFFS! show-diff and anything using it will likely |
| 117 | # randomly break unless your underlying filesystem supports those sub-second | 117 | # randomly break unless your underlying filesystem supports those sub-second |
| 118 | # times (my ext3 doesn't). | 118 | # times (my ext3 doesn't). |
| 119 | # | 119 | # |
| 120 | # Define USE_ST_TIMESPEC if your "struct stat" uses "st_ctimespec" instead of | 120 | # Define USE_ST_TIMESPEC if your "struct stat" uses "st_ctimespec" instead of |
| 121 | # "st_ctim" | 121 | # "st_ctim" |
| 122 | # | 122 | # |
| 123 | # Define NO_NSEC if your "struct stat" does not have "st_ctim.tv_nsec" | 123 | # Define NO_NSEC if your "struct stat" does not have "st_ctim.tv_nsec" |
| 124 | # available. This automatically turns USE_NSEC off. | 124 | # available. This automatically turns USE_NSEC off. |
| 125 | # | 125 | # |
| 126 | # Define USE_STDEV below if you want perf to care about the underlying device | 126 | # Define USE_STDEV below if you want perf to care about the underlying device |
| 127 | # change being considered an inode change from the update-index perspective. | 127 | # change being considered an inode change from the update-index perspective. |
| 128 | # | 128 | # |
| 129 | # Define NO_ST_BLOCKS_IN_STRUCT_STAT if your platform does not have st_blocks | 129 | # Define NO_ST_BLOCKS_IN_STRUCT_STAT if your platform does not have st_blocks |
| 130 | # field that counts the on-disk footprint in 512-byte blocks. | 130 | # field that counts the on-disk footprint in 512-byte blocks. |
| 131 | # | 131 | # |
| 132 | # Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8 | 132 | # Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8 |
| 133 | # | 133 | # |
| 134 | # Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72. | 134 | # Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72. |
| 135 | # | 135 | # |
| 136 | # Define NO_PERL_MAKEMAKER if you cannot use Makefiles generated by perl's | 136 | # Define NO_PERL_MAKEMAKER if you cannot use Makefiles generated by perl's |
| 137 | # MakeMaker (e.g. using ActiveState under Cygwin). | 137 | # MakeMaker (e.g. using ActiveState under Cygwin). |
| 138 | # | 138 | # |
| 139 | # Define NO_PERL if you do not want Perl scripts or libraries at all. | 139 | # Define NO_PERL if you do not want Perl scripts or libraries at all. |
| 140 | # | 140 | # |
| 141 | # Define INTERNAL_QSORT to use Git's implementation of qsort(), which | 141 | # Define INTERNAL_QSORT to use Git's implementation of qsort(), which |
| 142 | # is a simplified version of the merge sort used in glibc. This is | 142 | # is a simplified version of the merge sort used in glibc. This is |
| 143 | # recommended if Git triggers O(n^2) behavior in your platform's qsort(). | 143 | # recommended if Git triggers O(n^2) behavior in your platform's qsort(). |
| 144 | # | 144 | # |
| 145 | # Define NO_EXTERNAL_GREP if you don't want "perf grep" to ever call | 145 | # Define NO_EXTERNAL_GREP if you don't want "perf grep" to ever call |
| 146 | # your external grep (e.g., if your system lacks grep, if its grep is | 146 | # your external grep (e.g., if your system lacks grep, if its grep is |
| 147 | # broken, or spawning external process is slower than built-in grep perf has). | 147 | # broken, or spawning external process is slower than built-in grep perf has). |
| 148 | 148 | ||
| 149 | PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE | 149 | PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE |
| 150 | @$(SHELL_PATH) util/PERF-VERSION-GEN | 150 | @$(SHELL_PATH) util/PERF-VERSION-GEN |
| 151 | -include PERF-VERSION-FILE | 151 | -include PERF-VERSION-FILE |
| 152 | 152 | ||
| 153 | uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not') | 153 | uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not') |
| 154 | uname_M := $(shell sh -c 'uname -m 2>/dev/null || echo not') | 154 | uname_M := $(shell sh -c 'uname -m 2>/dev/null || echo not') |
| 155 | uname_O := $(shell sh -c 'uname -o 2>/dev/null || echo not') | 155 | uname_O := $(shell sh -c 'uname -o 2>/dev/null || echo not') |
| 156 | uname_R := $(shell sh -c 'uname -r 2>/dev/null || echo not') | 156 | uname_R := $(shell sh -c 'uname -r 2>/dev/null || echo not') |
| 157 | uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not') | 157 | uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not') |
| 158 | uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not') | 158 | uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not') |
| 159 | 159 | ||
| 160 | # | 160 | # |
| 161 | # Add -m32 for cross-builds: | 161 | # Add -m32 for cross-builds: |
| 162 | # | 162 | # |
| 163 | ifdef NO_64BIT | 163 | ifdef NO_64BIT |
| 164 | MBITS := -m32 | 164 | MBITS := -m32 |
| 165 | else | 165 | else |
| 166 | # | 166 | # |
| 167 | # If we're on a 64-bit kernel, use -m64: | 167 | # If we're on a 64-bit kernel, use -m64: |
| 168 | # | 168 | # |
| 169 | ifneq ($(patsubst %64,%,$(uname_M)),$(uname_M)) | 169 | ifneq ($(patsubst %64,%,$(uname_M)),$(uname_M)) |
| 170 | MBITS := -m64 | 170 | MBITS := -m64 |
| 171 | endif | 171 | endif |
| 172 | endif | 172 | endif |
| 173 | 173 | ||
| 174 | # CFLAGS and LDFLAGS are for the users to override from the command line. | 174 | # CFLAGS and LDFLAGS are for the users to override from the command line. |
| 175 | 175 | ||
| 176 | # | 176 | # |
| 177 | # Include saner warnings here, which can catch bugs: | 177 | # Include saner warnings here, which can catch bugs: |
| 178 | # | 178 | # |
| 179 | 179 | ||
| 180 | EXTRA_WARNINGS := -Wformat | 180 | EXTRA_WARNINGS := -Wformat |
| 181 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wformat-security | 181 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wformat-security |
| 182 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wformat-y2k | 182 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wformat-y2k |
| 183 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wshadow | 183 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wshadow |
| 184 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Winit-self | 184 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Winit-self |
| 185 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wpacked | 185 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wpacked |
| 186 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wredundant-decls | 186 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wredundant-decls |
| 187 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstack-protector | 187 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstack-protector |
| 188 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-aliasing=3 | 188 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-aliasing=3 |
| 189 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-default | 189 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-default |
| 190 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-enum | 190 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-enum |
| 191 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wno-system-headers | 191 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wno-system-headers |
| 192 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wundef | 192 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wundef |
| 193 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wvolatile-register-var | 193 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wvolatile-register-var |
| 194 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wwrite-strings | 194 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wwrite-strings |
| 195 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wbad-function-cast | 195 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wbad-function-cast |
| 196 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-declarations | 196 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-declarations |
| 197 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-prototypes | 197 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-prototypes |
| 198 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wnested-externs | 198 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wnested-externs |
| 199 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wold-style-definition | 199 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wold-style-definition |
| 200 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-prototypes | 200 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-prototypes |
| 201 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wdeclaration-after-statement | 201 | EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wdeclaration-after-statement |
| 202 | 202 | ||
| 203 | ifeq ("$(origin DEBUG)", "command line") | 203 | ifeq ("$(origin DEBUG)", "command line") |
| 204 | PERF_DEBUG = $(DEBUG) | 204 | PERF_DEBUG = $(DEBUG) |
| 205 | endif | 205 | endif |
| 206 | ifndef PERF_DEBUG | 206 | ifndef PERF_DEBUG |
| 207 | CFLAGS_OPTIMIZE = -O6 | 207 | CFLAGS_OPTIMIZE = -O6 |
| 208 | endif | 208 | endif |
| 209 | 209 | ||
| 210 | CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) | 210 | CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) |
| 211 | LDFLAGS = -lpthread -lrt -lelf -lm | 211 | LDFLAGS = -lpthread -lrt -lelf -lm |
| 212 | ALL_CFLAGS = $(CFLAGS) | 212 | ALL_CFLAGS = $(CFLAGS) |
| 213 | ALL_LDFLAGS = $(LDFLAGS) | 213 | ALL_LDFLAGS = $(LDFLAGS) |
| 214 | STRIP ?= strip | 214 | STRIP ?= strip |
| 215 | 215 | ||
| 216 | # Among the variables below, these: | 216 | # Among the variables below, these: |
| 217 | # perfexecdir | 217 | # perfexecdir |
| 218 | # template_dir | 218 | # template_dir |
| 219 | # mandir | 219 | # mandir |
| 220 | # infodir | 220 | # infodir |
| 221 | # htmldir | 221 | # htmldir |
| 222 | # ETC_PERFCONFIG (but not sysconfdir) | 222 | # ETC_PERFCONFIG (but not sysconfdir) |
| 223 | # can be specified as a relative path some/where/else; | 223 | # can be specified as a relative path some/where/else; |
| 224 | # this is interpreted as relative to $(prefix) and "perf" at | 224 | # this is interpreted as relative to $(prefix) and "perf" at |
| 225 | # runtime figures out where they are based on the path to the executable. | 225 | # runtime figures out where they are based on the path to the executable. |
| 226 | # This can help installing the suite in a relocatable way. | 226 | # This can help installing the suite in a relocatable way. |
| 227 | 227 | ||
| 228 | prefix = $(HOME) | 228 | prefix = $(HOME) |
| 229 | bindir_relative = bin | 229 | bindir_relative = bin |
| 230 | bindir = $(prefix)/$(bindir_relative) | 230 | bindir = $(prefix)/$(bindir_relative) |
| 231 | mandir = share/man | 231 | mandir = share/man |
| 232 | infodir = share/info | 232 | infodir = share/info |
| 233 | perfexecdir = libexec/perf-core | 233 | perfexecdir = libexec/perf-core |
| 234 | sharedir = $(prefix)/share | 234 | sharedir = $(prefix)/share |
| 235 | template_dir = share/perf-core/templates | 235 | template_dir = share/perf-core/templates |
| 236 | htmldir = share/doc/perf-doc | 236 | htmldir = share/doc/perf-doc |
| 237 | ifeq ($(prefix),/usr) | 237 | ifeq ($(prefix),/usr) |
| 238 | sysconfdir = /etc | 238 | sysconfdir = /etc |
| 239 | ETC_PERFCONFIG = $(sysconfdir)/perfconfig | 239 | ETC_PERFCONFIG = $(sysconfdir)/perfconfig |
| 240 | else | 240 | else |
| 241 | sysconfdir = $(prefix)/etc | 241 | sysconfdir = $(prefix)/etc |
| 242 | ETC_PERFCONFIG = etc/perfconfig | 242 | ETC_PERFCONFIG = etc/perfconfig |
| 243 | endif | 243 | endif |
| 244 | lib = lib | 244 | lib = lib |
| 245 | # DESTDIR= | 245 | # DESTDIR= |
| 246 | 246 | ||
| 247 | export prefix bindir sharedir sysconfdir | 247 | export prefix bindir sharedir sysconfdir |
| 248 | 248 | ||
| 249 | CC = gcc | 249 | CC = gcc |
| 250 | AR = ar | 250 | AR = ar |
| 251 | RM = rm -f | 251 | RM = rm -f |
| 252 | TAR = tar | 252 | TAR = tar |
| 253 | FIND = find | 253 | FIND = find |
| 254 | INSTALL = install | 254 | INSTALL = install |
| 255 | RPMBUILD = rpmbuild | 255 | RPMBUILD = rpmbuild |
| 256 | PTHREAD_LIBS = -lpthread | 256 | PTHREAD_LIBS = -lpthread |
| 257 | 257 | ||
| 258 | # sparse is architecture-neutral, which means that we need to tell it | 258 | # sparse is architecture-neutral, which means that we need to tell it |
| 259 | # explicitly what architecture to check for. Fix this up for yours.. | 259 | # explicitly what architecture to check for. Fix this up for yours.. |
| 260 | SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ | 260 | SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ |
| 261 | 261 | ||
| 262 | ifeq ($(shell sh -c "echo 'int foo(void) {char X[2]; return 3;}' | $(CC) -x c -c -Werror -fstack-protector-all - -o /dev/null >/dev/null 2>&1 && echo y"), y) | 262 | ifeq ($(shell sh -c "echo 'int foo(void) {char X[2]; return 3;}' | $(CC) -x c -c -Werror -fstack-protector-all - -o /dev/null >/dev/null 2>&1 && echo y"), y) |
| 263 | CFLAGS := $(CFLAGS) -fstack-protector-all | 263 | CFLAGS := $(CFLAGS) -fstack-protector-all |
| 264 | endif | 264 | endif |
| 265 | 265 | ||
| 266 | 266 | ||
| 267 | ### --- END CONFIGURATION SECTION --- | 267 | ### --- END CONFIGURATION SECTION --- |
| 268 | 268 | ||
| 269 | # Those must not be GNU-specific; they are shared with perl/ which may | 269 | # Those must not be GNU-specific; they are shared with perl/ which may |
| 270 | # be built by a different compiler. (Note that this is an artifact now | 270 | # be built by a different compiler. (Note that this is an artifact now |
| 271 | # but it still might be nice to keep that distinction.) | 271 | # but it still might be nice to keep that distinction.) |
| 272 | BASIC_CFLAGS = -Iutil/include | 272 | BASIC_CFLAGS = -Iutil/include |
| 273 | BASIC_LDFLAGS = | 273 | BASIC_LDFLAGS = |
| 274 | 274 | ||
| 275 | # Guard against environment variables | 275 | # Guard against environment variables |
| 276 | BUILTIN_OBJS = | 276 | BUILTIN_OBJS = |
| 277 | BUILT_INS = | 277 | BUILT_INS = |
| 278 | COMPAT_CFLAGS = | 278 | COMPAT_CFLAGS = |
| 279 | COMPAT_OBJS = | 279 | COMPAT_OBJS = |
| 280 | LIB_H = | 280 | LIB_H = |
| 281 | LIB_OBJS = | 281 | LIB_OBJS = |
| 282 | SCRIPT_PERL = | 282 | SCRIPT_PERL = |
| 283 | SCRIPT_SH = | 283 | SCRIPT_SH = |
| 284 | TEST_PROGRAMS = | 284 | TEST_PROGRAMS = |
| 285 | 285 | ||
| 286 | # | 286 | # |
| 287 | # No scripts right now: | 287 | # No scripts right now: |
| 288 | # | 288 | # |
| 289 | 289 | ||
| 290 | # SCRIPT_SH += perf-am.sh | 290 | # SCRIPT_SH += perf-am.sh |
| 291 | 291 | ||
| 292 | # | 292 | # |
| 293 | # No Perl scripts right now: | 293 | # No Perl scripts right now: |
| 294 | # | 294 | # |
| 295 | 295 | ||
| 296 | # SCRIPT_PERL += perf-add--interactive.perl | 296 | # SCRIPT_PERL += perf-add--interactive.perl |
| 297 | 297 | ||
| 298 | SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) \ | 298 | SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) \ |
| 299 | $(patsubst %.perl,%,$(SCRIPT_PERL)) | 299 | $(patsubst %.perl,%,$(SCRIPT_PERL)) |
| 300 | 300 | ||
| 301 | # Empty... | 301 | # Empty... |
| 302 | EXTRA_PROGRAMS = | 302 | EXTRA_PROGRAMS = |
| 303 | 303 | ||
| 304 | # ... and all the rest that could be moved out of bindir to perfexecdir | 304 | # ... and all the rest that could be moved out of bindir to perfexecdir |
| 305 | PROGRAMS += $(EXTRA_PROGRAMS) | 305 | PROGRAMS += $(EXTRA_PROGRAMS) |
| 306 | 306 | ||
| 307 | # | 307 | # |
| 308 | # Single 'perf' binary right now: | 308 | # Single 'perf' binary right now: |
| 309 | # | 309 | # |
| 310 | PROGRAMS += perf | 310 | PROGRAMS += perf |
| 311 | 311 | ||
| 312 | # List built-in command $C whose implementation cmd_$C() is not in | 312 | # List built-in command $C whose implementation cmd_$C() is not in |
| 313 | # builtin-$C.o but is linked in as part of some other command. | 313 | # builtin-$C.o but is linked in as part of some other command. |
| 314 | # | 314 | # |
| 315 | # None right now: | 315 | # None right now: |
| 316 | # | 316 | # |
| 317 | # BUILT_INS += perf-init $X | 317 | # BUILT_INS += perf-init $X |
| 318 | 318 | ||
| 319 | # what 'all' will build and 'install' will install, in perfexecdir | 319 | # what 'all' will build and 'install' will install, in perfexecdir |
| 320 | ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS) | 320 | ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS) |
| 321 | 321 | ||
| 322 | # what 'all' will build but not install in perfexecdir | 322 | # what 'all' will build but not install in perfexecdir |
| 323 | OTHER_PROGRAMS = perf$X | 323 | OTHER_PROGRAMS = perf$X |
| 324 | 324 | ||
| 325 | # Set paths to tools early so that they can be used for version tests. | 325 | # Set paths to tools early so that they can be used for version tests. |
| 326 | ifndef SHELL_PATH | 326 | ifndef SHELL_PATH |
| 327 | SHELL_PATH = /bin/sh | 327 | SHELL_PATH = /bin/sh |
| 328 | endif | 328 | endif |
| 329 | ifndef PERL_PATH | 329 | ifndef PERL_PATH |
| 330 | PERL_PATH = /usr/bin/perl | 330 | PERL_PATH = /usr/bin/perl |
| 331 | endif | 331 | endif |
| 332 | 332 | ||
| 333 | export PERL_PATH | 333 | export PERL_PATH |
| 334 | 334 | ||
| 335 | LIB_FILE=libperf.a | 335 | LIB_FILE=libperf.a |
| 336 | 336 | ||
| 337 | LIB_H += ../../include/linux/perf_event.h | 337 | LIB_H += ../../include/linux/perf_event.h |
| 338 | LIB_H += ../../include/linux/rbtree.h | 338 | LIB_H += ../../include/linux/rbtree.h |
| 339 | LIB_H += ../../include/linux/list.h | 339 | LIB_H += ../../include/linux/list.h |
| 340 | LIB_H += util/include/linux/bitmap.h | 340 | LIB_H += util/include/linux/bitmap.h |
| 341 | LIB_H += util/include/linux/bitops.h | 341 | LIB_H += util/include/linux/bitops.h |
| 342 | LIB_H += util/include/linux/compiler.h | 342 | LIB_H += util/include/linux/compiler.h |
| 343 | LIB_H += util/include/linux/ctype.h | 343 | LIB_H += util/include/linux/ctype.h |
| 344 | LIB_H += util/include/linux/kernel.h | 344 | LIB_H += util/include/linux/kernel.h |
| 345 | LIB_H += util/include/linux/list.h | 345 | LIB_H += util/include/linux/list.h |
| 346 | LIB_H += util/include/linux/module.h | 346 | LIB_H += util/include/linux/module.h |
| 347 | LIB_H += util/include/linux/poison.h | 347 | LIB_H += util/include/linux/poison.h |
| 348 | LIB_H += util/include/linux/prefetch.h | 348 | LIB_H += util/include/linux/prefetch.h |
| 349 | LIB_H += util/include/linux/rbtree.h | 349 | LIB_H += util/include/linux/rbtree.h |
| 350 | LIB_H += util/include/linux/string.h | 350 | LIB_H += util/include/linux/string.h |
| 351 | LIB_H += util/include/linux/types.h | 351 | LIB_H += util/include/linux/types.h |
| 352 | LIB_H += util/include/asm/asm-offsets.h | 352 | LIB_H += util/include/asm/asm-offsets.h |
| 353 | LIB_H += util/include/asm/bitops.h | 353 | LIB_H += util/include/asm/bitops.h |
| 354 | LIB_H += util/include/asm/byteorder.h | 354 | LIB_H += util/include/asm/byteorder.h |
| 355 | LIB_H += util/include/asm/swab.h | 355 | LIB_H += util/include/asm/swab.h |
| 356 | LIB_H += util/include/asm/system.h | 356 | LIB_H += util/include/asm/system.h |
| 357 | LIB_H += util/include/asm/uaccess.h | 357 | LIB_H += util/include/asm/uaccess.h |
| 358 | LIB_H += perf.h | 358 | LIB_H += perf.h |
| 359 | LIB_H += util/debugfs.h | 359 | LIB_H += util/debugfs.h |
| 360 | LIB_H += util/event.h | 360 | LIB_H += util/event.h |
| 361 | LIB_H += util/types.h | 361 | LIB_H += util/types.h |
| 362 | LIB_H += util/levenshtein.h | 362 | LIB_H += util/levenshtein.h |
| 363 | LIB_H += util/parse-options.h | 363 | LIB_H += util/parse-options.h |
| 364 | LIB_H += util/parse-events.h | 364 | LIB_H += util/parse-events.h |
| 365 | LIB_H += util/quote.h | 365 | LIB_H += util/quote.h |
| 366 | LIB_H += util/util.h | 366 | LIB_H += util/util.h |
| 367 | LIB_H += util/help.h | 367 | LIB_H += util/help.h |
| 368 | LIB_H += util/strbuf.h | 368 | LIB_H += util/strbuf.h |
| 369 | LIB_H += util/string.h | 369 | LIB_H += util/string.h |
| 370 | LIB_H += util/strlist.h | 370 | LIB_H += util/strlist.h |
| 371 | LIB_H += util/run-command.h | 371 | LIB_H += util/run-command.h |
| 372 | LIB_H += util/sigchain.h | 372 | LIB_H += util/sigchain.h |
| 373 | LIB_H += util/symbol.h | 373 | LIB_H += util/symbol.h |
| 374 | LIB_H += util/color.h | 374 | LIB_H += util/color.h |
| 375 | LIB_H += util/values.h | 375 | LIB_H += util/values.h |
| 376 | LIB_H += util/sort.h | 376 | LIB_H += util/sort.h |
| 377 | LIB_H += util/hist.h | 377 | LIB_H += util/hist.h |
| 378 | LIB_H += util/thread.h | 378 | LIB_H += util/thread.h |
| 379 | LIB_H += util/data_map.h | 379 | LIB_H += util/data_map.h |
| 380 | 380 | ||
| 381 | LIB_OBJS += util/abspath.o | 381 | LIB_OBJS += util/abspath.o |
| 382 | LIB_OBJS += util/alias.o | 382 | LIB_OBJS += util/alias.o |
| 383 | LIB_OBJS += util/config.o | 383 | LIB_OBJS += util/config.o |
| 384 | LIB_OBJS += util/ctype.o | 384 | LIB_OBJS += util/ctype.o |
| 385 | LIB_OBJS += util/debugfs.o | 385 | LIB_OBJS += util/debugfs.o |
| 386 | LIB_OBJS += util/environment.o | 386 | LIB_OBJS += util/environment.o |
| 387 | LIB_OBJS += util/event.o | 387 | LIB_OBJS += util/event.o |
| 388 | LIB_OBJS += util/exec_cmd.o | 388 | LIB_OBJS += util/exec_cmd.o |
| 389 | LIB_OBJS += util/help.o | 389 | LIB_OBJS += util/help.o |
| 390 | LIB_OBJS += util/levenshtein.o | 390 | LIB_OBJS += util/levenshtein.o |
| 391 | LIB_OBJS += util/parse-options.o | 391 | LIB_OBJS += util/parse-options.o |
| 392 | LIB_OBJS += util/parse-events.o | 392 | LIB_OBJS += util/parse-events.o |
| 393 | LIB_OBJS += util/path.o | 393 | LIB_OBJS += util/path.o |
| 394 | LIB_OBJS += util/rbtree.o | 394 | LIB_OBJS += util/rbtree.o |
| 395 | LIB_OBJS += util/bitmap.o | 395 | LIB_OBJS += util/bitmap.o |
| 396 | LIB_OBJS += util/hweight.o | 396 | LIB_OBJS += util/hweight.o |
| 397 | LIB_OBJS += util/find_next_bit.o | 397 | LIB_OBJS += util/find_next_bit.o |
| 398 | LIB_OBJS += util/run-command.o | 398 | LIB_OBJS += util/run-command.o |
| 399 | LIB_OBJS += util/quote.o | 399 | LIB_OBJS += util/quote.o |
| 400 | LIB_OBJS += util/strbuf.o | 400 | LIB_OBJS += util/strbuf.o |
| 401 | LIB_OBJS += util/string.o | 401 | LIB_OBJS += util/string.o |
| 402 | LIB_OBJS += util/strlist.o | 402 | LIB_OBJS += util/strlist.o |
| 403 | LIB_OBJS += util/usage.o | 403 | LIB_OBJS += util/usage.o |
| 404 | LIB_OBJS += util/wrapper.o | 404 | LIB_OBJS += util/wrapper.o |
| 405 | LIB_OBJS += util/sigchain.o | 405 | LIB_OBJS += util/sigchain.o |
| 406 | LIB_OBJS += util/symbol.o | 406 | LIB_OBJS += util/symbol.o |
| 407 | LIB_OBJS += util/color.o | 407 | LIB_OBJS += util/color.o |
| 408 | LIB_OBJS += util/pager.o | 408 | LIB_OBJS += util/pager.o |
| 409 | LIB_OBJS += util/header.o | 409 | LIB_OBJS += util/header.o |
| 410 | LIB_OBJS += util/callchain.o | 410 | LIB_OBJS += util/callchain.o |
| 411 | LIB_OBJS += util/values.o | 411 | LIB_OBJS += util/values.o |
| 412 | LIB_OBJS += util/debug.o | 412 | LIB_OBJS += util/debug.o |
| 413 | LIB_OBJS += util/map.o | 413 | LIB_OBJS += util/map.o |
| 414 | LIB_OBJS += util/thread.o | 414 | LIB_OBJS += util/thread.o |
| 415 | LIB_OBJS += util/trace-event-parse.o | 415 | LIB_OBJS += util/trace-event-parse.o |
| 416 | LIB_OBJS += util/trace-event-read.o | 416 | LIB_OBJS += util/trace-event-read.o |
| 417 | LIB_OBJS += util/trace-event-info.o | 417 | LIB_OBJS += util/trace-event-info.o |
| 418 | LIB_OBJS += util/svghelper.o | 418 | LIB_OBJS += util/svghelper.o |
| 419 | LIB_OBJS += util/sort.o | 419 | LIB_OBJS += util/sort.o |
| 420 | LIB_OBJS += util/hist.o | 420 | LIB_OBJS += util/hist.o |
| 421 | LIB_OBJS += util/data_map.o | 421 | LIB_OBJS += util/data_map.o |
| 422 | 422 | ||
| 423 | BUILTIN_OBJS += builtin-annotate.o | 423 | BUILTIN_OBJS += builtin-annotate.o |
| 424 | |||
| 425 | BUILTIN_OBJS += builtin-bench.o | ||
| 426 | |||
| 427 | # Benchmark modules | ||
| 428 | BUILTIN_OBJS += bench/sched-messaging.o | ||
| 429 | BUILTIN_OBJS += bench/sched-pipe.o | ||
| 430 | |||
| 424 | BUILTIN_OBJS += builtin-help.o | 431 | BUILTIN_OBJS += builtin-help.o |
| 425 | BUILTIN_OBJS += builtin-sched.o | 432 | BUILTIN_OBJS += builtin-sched.o |
| 426 | BUILTIN_OBJS += builtin-list.o | 433 | BUILTIN_OBJS += builtin-list.o |
| 427 | BUILTIN_OBJS += builtin-record.o | 434 | BUILTIN_OBJS += builtin-record.o |
| 428 | BUILTIN_OBJS += builtin-report.o | 435 | BUILTIN_OBJS += builtin-report.o |
| 429 | BUILTIN_OBJS += builtin-stat.o | 436 | BUILTIN_OBJS += builtin-stat.o |
| 430 | BUILTIN_OBJS += builtin-timechart.o | 437 | BUILTIN_OBJS += builtin-timechart.o |
| 431 | BUILTIN_OBJS += builtin-top.o | 438 | BUILTIN_OBJS += builtin-top.o |
| 432 | BUILTIN_OBJS += builtin-trace.o | 439 | BUILTIN_OBJS += builtin-trace.o |
| 433 | 440 | ||
| 434 | PERFLIBS = $(LIB_FILE) | 441 | PERFLIBS = $(LIB_FILE) |
| 435 | 442 | ||
| 436 | # | 443 | # |
| 437 | # Platform specific tweaks | 444 | # Platform specific tweaks |
| 438 | # | 445 | # |
| 439 | 446 | ||
| 440 | # We choose to avoid "if .. else if .. else .. endif endif" | 447 | # We choose to avoid "if .. else if .. else .. endif endif" |
| 441 | # because maintaining the nesting to match is a pain. If | 448 | # because maintaining the nesting to match is a pain. If |
| 442 | # we had "elif" things would have been much nicer... | 449 | # we had "elif" things would have been much nicer... |
| 443 | 450 | ||
| 444 | -include config.mak.autogen | 451 | -include config.mak.autogen |
| 445 | -include config.mak | 452 | -include config.mak |
| 446 | 453 | ||
| 447 | ifeq ($(uname_S),Darwin) | 454 | ifeq ($(uname_S),Darwin) |
| 448 | ifndef NO_FINK | 455 | ifndef NO_FINK |
| 449 | ifeq ($(shell test -d /sw/lib && echo y),y) | 456 | ifeq ($(shell test -d /sw/lib && echo y),y) |
| 450 | BASIC_CFLAGS += -I/sw/include | 457 | BASIC_CFLAGS += -I/sw/include |
| 451 | BASIC_LDFLAGS += -L/sw/lib | 458 | BASIC_LDFLAGS += -L/sw/lib |
| 452 | endif | 459 | endif |
| 453 | endif | 460 | endif |
| 454 | ifndef NO_DARWIN_PORTS | 461 | ifndef NO_DARWIN_PORTS |
| 455 | ifeq ($(shell test -d /opt/local/lib && echo y),y) | 462 | ifeq ($(shell test -d /opt/local/lib && echo y),y) |
| 456 | BASIC_CFLAGS += -I/opt/local/include | 463 | BASIC_CFLAGS += -I/opt/local/include |
| 457 | BASIC_LDFLAGS += -L/opt/local/lib | 464 | BASIC_LDFLAGS += -L/opt/local/lib |
| 458 | endif | 465 | endif |
| 459 | endif | 466 | endif |
| 460 | PTHREAD_LIBS = | 467 | PTHREAD_LIBS = |
| 461 | endif | 468 | endif |
| 462 | 469 | ||
| 463 | ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) | 470 | ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) |
| 464 | ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) | 471 | ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) |
| 465 | BASIC_CFLAGS += -DLIBELF_NO_MMAP | 472 | BASIC_CFLAGS += -DLIBELF_NO_MMAP |
| 466 | endif | 473 | endif |
| 467 | else | 474 | else |
| 468 | msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]); | 475 | msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]); |
| 469 | endif | 476 | endif |
| 470 | 477 | ||
| 471 | ifdef NO_DEMANGLE | 478 | ifdef NO_DEMANGLE |
| 472 | BASIC_CFLAGS += -DNO_DEMANGLE | 479 | BASIC_CFLAGS += -DNO_DEMANGLE |
| 473 | else | 480 | else |
| 474 | has_bfd := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd > /dev/null 2>&1 && echo y") | 481 | has_bfd := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd > /dev/null 2>&1 && echo y") |
| 475 | 482 | ||
| 476 | ifeq ($(has_bfd),y) | 483 | ifeq ($(has_bfd),y) |
| 477 | EXTLIBS += -lbfd | 484 | EXTLIBS += -lbfd |
| 478 | else | 485 | else |
| 479 | has_bfd_iberty := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd -liberty > /dev/null 2>&1 && echo y") | 486 | has_bfd_iberty := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd -liberty > /dev/null 2>&1 && echo y") |
| 480 | ifeq ($(has_bfd_iberty),y) | 487 | ifeq ($(has_bfd_iberty),y) |
| 481 | EXTLIBS += -lbfd -liberty | 488 | EXTLIBS += -lbfd -liberty |
| 482 | else | 489 | else |
| 483 | has_bfd_iberty_z := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd -liberty -lz > /dev/null 2>&1 && echo y") | 490 | has_bfd_iberty_z := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd -liberty -lz > /dev/null 2>&1 && echo y") |
| 484 | ifeq ($(has_bfd_iberty_z),y) | 491 | ifeq ($(has_bfd_iberty_z),y) |
| 485 | EXTLIBS += -lbfd -liberty -lz | 492 | EXTLIBS += -lbfd -liberty -lz |
| 486 | else | 493 | else |
| 487 | has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -liberty > /dev/null 2>&1 && echo y") | 494 | has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -liberty > /dev/null 2>&1 && echo y") |
| 488 | ifeq ($(has_cplus_demangle),y) | 495 | ifeq ($(has_cplus_demangle),y) |
| 489 | EXTLIBS += -liberty | 496 | EXTLIBS += -liberty |
| 490 | BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE | 497 | BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE |
| 491 | else | 498 | else |
| 492 | msg := $(warning No bfd.h/libbfd found, install binutils-dev[el] to gain symbol demangling) | 499 | msg := $(warning No bfd.h/libbfd found, install binutils-dev[el] to gain symbol demangling) |
| 493 | BASIC_CFLAGS += -DNO_DEMANGLE | 500 | BASIC_CFLAGS += -DNO_DEMANGLE |
| 494 | endif | 501 | endif |
| 495 | endif | 502 | endif |
| 496 | endif | 503 | endif |
| 497 | endif | 504 | endif |
| 498 | endif | 505 | endif |
| 499 | 506 | ||
| 500 | ifndef CC_LD_DYNPATH | 507 | ifndef CC_LD_DYNPATH |
| 501 | ifdef NO_R_TO_GCC_LINKER | 508 | ifdef NO_R_TO_GCC_LINKER |
| 502 | # Some gcc does not accept and pass -R to the linker to specify | 509 | # Some gcc does not accept and pass -R to the linker to specify |
| 503 | # the runtime dynamic library path. | 510 | # the runtime dynamic library path. |
| 504 | CC_LD_DYNPATH = -Wl,-rpath, | 511 | CC_LD_DYNPATH = -Wl,-rpath, |
| 505 | else | 512 | else |
| 506 | CC_LD_DYNPATH = -R | 513 | CC_LD_DYNPATH = -R |
| 507 | endif | 514 | endif |
| 508 | endif | 515 | endif |
| 509 | 516 | ||
| 510 | ifdef NEEDS_SOCKET | 517 | ifdef NEEDS_SOCKET |
| 511 | EXTLIBS += -lsocket | 518 | EXTLIBS += -lsocket |
| 512 | endif | 519 | endif |
| 513 | ifdef NEEDS_NSL | 520 | ifdef NEEDS_NSL |
| 514 | EXTLIBS += -lnsl | 521 | EXTLIBS += -lnsl |
| 515 | endif | 522 | endif |
| 516 | ifdef NO_D_TYPE_IN_DIRENT | 523 | ifdef NO_D_TYPE_IN_DIRENT |
| 517 | BASIC_CFLAGS += -DNO_D_TYPE_IN_DIRENT | 524 | BASIC_CFLAGS += -DNO_D_TYPE_IN_DIRENT |
| 518 | endif | 525 | endif |
| 519 | ifdef NO_D_INO_IN_DIRENT | 526 | ifdef NO_D_INO_IN_DIRENT |
| 520 | BASIC_CFLAGS += -DNO_D_INO_IN_DIRENT | 527 | BASIC_CFLAGS += -DNO_D_INO_IN_DIRENT |
| 521 | endif | 528 | endif |
| 522 | ifdef NO_ST_BLOCKS_IN_STRUCT_STAT | 529 | ifdef NO_ST_BLOCKS_IN_STRUCT_STAT |
| 523 | BASIC_CFLAGS += -DNO_ST_BLOCKS_IN_STRUCT_STAT | 530 | BASIC_CFLAGS += -DNO_ST_BLOCKS_IN_STRUCT_STAT |
| 524 | endif | 531 | endif |
| 525 | ifdef USE_NSEC | 532 | ifdef USE_NSEC |
| 526 | BASIC_CFLAGS += -DUSE_NSEC | 533 | BASIC_CFLAGS += -DUSE_NSEC |
| 527 | endif | 534 | endif |
| 528 | ifdef USE_ST_TIMESPEC | 535 | ifdef USE_ST_TIMESPEC |
| 529 | BASIC_CFLAGS += -DUSE_ST_TIMESPEC | 536 | BASIC_CFLAGS += -DUSE_ST_TIMESPEC |
| 530 | endif | 537 | endif |
| 531 | ifdef NO_NSEC | 538 | ifdef NO_NSEC |
| 532 | BASIC_CFLAGS += -DNO_NSEC | 539 | BASIC_CFLAGS += -DNO_NSEC |
| 533 | endif | 540 | endif |
| 534 | ifdef NO_C99_FORMAT | 541 | ifdef NO_C99_FORMAT |
| 535 | BASIC_CFLAGS += -DNO_C99_FORMAT | 542 | BASIC_CFLAGS += -DNO_C99_FORMAT |
| 536 | endif | 543 | endif |
| 537 | ifdef SNPRINTF_RETURNS_BOGUS | 544 | ifdef SNPRINTF_RETURNS_BOGUS |
| 538 | COMPAT_CFLAGS += -DSNPRINTF_RETURNS_BOGUS | 545 | COMPAT_CFLAGS += -DSNPRINTF_RETURNS_BOGUS |
| 539 | COMPAT_OBJS += compat/snprintf.o | 546 | COMPAT_OBJS += compat/snprintf.o |
| 540 | endif | 547 | endif |
| 541 | ifdef FREAD_READS_DIRECTORIES | 548 | ifdef FREAD_READS_DIRECTORIES |
| 542 | COMPAT_CFLAGS += -DFREAD_READS_DIRECTORIES | 549 | COMPAT_CFLAGS += -DFREAD_READS_DIRECTORIES |
| 543 | COMPAT_OBJS += compat/fopen.o | 550 | COMPAT_OBJS += compat/fopen.o |
| 544 | endif | 551 | endif |
| 545 | ifdef NO_SYMLINK_HEAD | 552 | ifdef NO_SYMLINK_HEAD |
| 546 | BASIC_CFLAGS += -DNO_SYMLINK_HEAD | 553 | BASIC_CFLAGS += -DNO_SYMLINK_HEAD |
| 547 | endif | 554 | endif |
| 548 | ifdef NO_STRCASESTR | 555 | ifdef NO_STRCASESTR |
| 549 | COMPAT_CFLAGS += -DNO_STRCASESTR | 556 | COMPAT_CFLAGS += -DNO_STRCASESTR |
| 550 | COMPAT_OBJS += compat/strcasestr.o | 557 | COMPAT_OBJS += compat/strcasestr.o |
| 551 | endif | 558 | endif |
| 552 | ifdef NO_STRTOUMAX | 559 | ifdef NO_STRTOUMAX |
| 553 | COMPAT_CFLAGS += -DNO_STRTOUMAX | 560 | COMPAT_CFLAGS += -DNO_STRTOUMAX |
| 554 | COMPAT_OBJS += compat/strtoumax.o | 561 | COMPAT_OBJS += compat/strtoumax.o |
| 555 | endif | 562 | endif |
| 556 | ifdef NO_STRTOULL | 563 | ifdef NO_STRTOULL |
| 557 | COMPAT_CFLAGS += -DNO_STRTOULL | 564 | COMPAT_CFLAGS += -DNO_STRTOULL |
| 558 | endif | 565 | endif |
| 559 | ifdef NO_SETENV | 566 | ifdef NO_SETENV |
| 560 | COMPAT_CFLAGS += -DNO_SETENV | 567 | COMPAT_CFLAGS += -DNO_SETENV |
| 561 | COMPAT_OBJS += compat/setenv.o | 568 | COMPAT_OBJS += compat/setenv.o |
| 562 | endif | 569 | endif |
| 563 | ifdef NO_MKDTEMP | 570 | ifdef NO_MKDTEMP |
| 564 | COMPAT_CFLAGS += -DNO_MKDTEMP | 571 | COMPAT_CFLAGS += -DNO_MKDTEMP |
| 565 | COMPAT_OBJS += compat/mkdtemp.o | 572 | COMPAT_OBJS += compat/mkdtemp.o |
| 566 | endif | 573 | endif |
| 567 | ifdef NO_UNSETENV | 574 | ifdef NO_UNSETENV |
| 568 | COMPAT_CFLAGS += -DNO_UNSETENV | 575 | COMPAT_CFLAGS += -DNO_UNSETENV |
| 569 | COMPAT_OBJS += compat/unsetenv.o | 576 | COMPAT_OBJS += compat/unsetenv.o |
| 570 | endif | 577 | endif |
| 571 | ifdef NO_SYS_SELECT_H | 578 | ifdef NO_SYS_SELECT_H |
| 572 | BASIC_CFLAGS += -DNO_SYS_SELECT_H | 579 | BASIC_CFLAGS += -DNO_SYS_SELECT_H |
| 573 | endif | 580 | endif |
| 574 | ifdef NO_MMAP | 581 | ifdef NO_MMAP |
| 575 | COMPAT_CFLAGS += -DNO_MMAP | 582 | COMPAT_CFLAGS += -DNO_MMAP |
| 576 | COMPAT_OBJS += compat/mmap.o | 583 | COMPAT_OBJS += compat/mmap.o |
| 577 | else | 584 | else |
| 578 | ifdef USE_WIN32_MMAP | 585 | ifdef USE_WIN32_MMAP |
| 579 | COMPAT_CFLAGS += -DUSE_WIN32_MMAP | 586 | COMPAT_CFLAGS += -DUSE_WIN32_MMAP |
| 580 | COMPAT_OBJS += compat/win32mmap.o | 587 | COMPAT_OBJS += compat/win32mmap.o |
| 581 | endif | 588 | endif |
| 582 | endif | 589 | endif |
| 583 | ifdef NO_PREAD | 590 | ifdef NO_PREAD |
| 584 | COMPAT_CFLAGS += -DNO_PREAD | 591 | COMPAT_CFLAGS += -DNO_PREAD |
| 585 | COMPAT_OBJS += compat/pread.o | 592 | COMPAT_OBJS += compat/pread.o |
| 586 | endif | 593 | endif |
| 587 | ifdef NO_FAST_WORKING_DIRECTORY | 594 | ifdef NO_FAST_WORKING_DIRECTORY |
| 588 | BASIC_CFLAGS += -DNO_FAST_WORKING_DIRECTORY | 595 | BASIC_CFLAGS += -DNO_FAST_WORKING_DIRECTORY |
| 589 | endif | 596 | endif |
| 590 | ifdef NO_TRUSTABLE_FILEMODE | 597 | ifdef NO_TRUSTABLE_FILEMODE |
| 591 | BASIC_CFLAGS += -DNO_TRUSTABLE_FILEMODE | 598 | BASIC_CFLAGS += -DNO_TRUSTABLE_FILEMODE |
| 592 | endif | 599 | endif |
| 593 | ifdef NO_IPV6 | 600 | ifdef NO_IPV6 |
| 594 | BASIC_CFLAGS += -DNO_IPV6 | 601 | BASIC_CFLAGS += -DNO_IPV6 |
| 595 | endif | 602 | endif |
| 596 | ifdef NO_UINTMAX_T | 603 | ifdef NO_UINTMAX_T |
| 597 | BASIC_CFLAGS += -Duintmax_t=uint32_t | 604 | BASIC_CFLAGS += -Duintmax_t=uint32_t |
| 598 | endif | 605 | endif |
| 599 | ifdef NO_SOCKADDR_STORAGE | 606 | ifdef NO_SOCKADDR_STORAGE |
| 600 | ifdef NO_IPV6 | 607 | ifdef NO_IPV6 |
| 601 | BASIC_CFLAGS += -Dsockaddr_storage=sockaddr_in | 608 | BASIC_CFLAGS += -Dsockaddr_storage=sockaddr_in |
| 602 | else | 609 | else |
| 603 | BASIC_CFLAGS += -Dsockaddr_storage=sockaddr_in6 | 610 | BASIC_CFLAGS += -Dsockaddr_storage=sockaddr_in6 |
| 604 | endif | 611 | endif |
| 605 | endif | 612 | endif |
| 606 | ifdef NO_INET_NTOP | 613 | ifdef NO_INET_NTOP |
| 607 | LIB_OBJS += compat/inet_ntop.o | 614 | LIB_OBJS += compat/inet_ntop.o |
| 608 | endif | 615 | endif |
| 609 | ifdef NO_INET_PTON | 616 | ifdef NO_INET_PTON |
| 610 | LIB_OBJS += compat/inet_pton.o | 617 | LIB_OBJS += compat/inet_pton.o |
| 611 | endif | 618 | endif |
| 612 | 619 | ||
| 613 | ifdef NO_ICONV | 620 | ifdef NO_ICONV |
| 614 | BASIC_CFLAGS += -DNO_ICONV | 621 | BASIC_CFLAGS += -DNO_ICONV |
| 615 | endif | 622 | endif |
| 616 | 623 | ||
| 617 | ifdef OLD_ICONV | 624 | ifdef OLD_ICONV |
| 618 | BASIC_CFLAGS += -DOLD_ICONV | 625 | BASIC_CFLAGS += -DOLD_ICONV |
| 619 | endif | 626 | endif |
| 620 | 627 | ||
| 621 | ifdef NO_DEFLATE_BOUND | 628 | ifdef NO_DEFLATE_BOUND |
| 622 | BASIC_CFLAGS += -DNO_DEFLATE_BOUND | 629 | BASIC_CFLAGS += -DNO_DEFLATE_BOUND |
| 623 | endif | 630 | endif |
| 624 | 631 | ||
| 625 | ifdef PPC_SHA1 | 632 | ifdef PPC_SHA1 |
| 626 | SHA1_HEADER = "ppc/sha1.h" | 633 | SHA1_HEADER = "ppc/sha1.h" |
| 627 | LIB_OBJS += ppc/sha1.o ppc/sha1ppc.o | 634 | LIB_OBJS += ppc/sha1.o ppc/sha1ppc.o |
| 628 | else | 635 | else |
| 629 | ifdef ARM_SHA1 | 636 | ifdef ARM_SHA1 |
| 630 | SHA1_HEADER = "arm/sha1.h" | 637 | SHA1_HEADER = "arm/sha1.h" |
| 631 | LIB_OBJS += arm/sha1.o arm/sha1_arm.o | 638 | LIB_OBJS += arm/sha1.o arm/sha1_arm.o |
| 632 | else | 639 | else |
| 633 | ifdef MOZILLA_SHA1 | 640 | ifdef MOZILLA_SHA1 |
| 634 | SHA1_HEADER = "mozilla-sha1/sha1.h" | 641 | SHA1_HEADER = "mozilla-sha1/sha1.h" |
| 635 | LIB_OBJS += mozilla-sha1/sha1.o | 642 | LIB_OBJS += mozilla-sha1/sha1.o |
| 636 | else | 643 | else |
| 637 | SHA1_HEADER = <openssl/sha.h> | 644 | SHA1_HEADER = <openssl/sha.h> |
| 638 | EXTLIBS += $(LIB_4_CRYPTO) | 645 | EXTLIBS += $(LIB_4_CRYPTO) |
| 639 | endif | 646 | endif |
| 640 | endif | 647 | endif |
| 641 | endif | 648 | endif |
| 642 | ifdef NO_PERL_MAKEMAKER | 649 | ifdef NO_PERL_MAKEMAKER |
| 643 | export NO_PERL_MAKEMAKER | 650 | export NO_PERL_MAKEMAKER |
| 644 | endif | 651 | endif |
| 645 | ifdef NO_HSTRERROR | 652 | ifdef NO_HSTRERROR |
| 646 | COMPAT_CFLAGS += -DNO_HSTRERROR | 653 | COMPAT_CFLAGS += -DNO_HSTRERROR |
| 647 | COMPAT_OBJS += compat/hstrerror.o | 654 | COMPAT_OBJS += compat/hstrerror.o |
| 648 | endif | 655 | endif |
| 649 | ifdef NO_MEMMEM | 656 | ifdef NO_MEMMEM |
| 650 | COMPAT_CFLAGS += -DNO_MEMMEM | 657 | COMPAT_CFLAGS += -DNO_MEMMEM |
| 651 | COMPAT_OBJS += compat/memmem.o | 658 | COMPAT_OBJS += compat/memmem.o |
| 652 | endif | 659 | endif |
| 653 | ifdef INTERNAL_QSORT | 660 | ifdef INTERNAL_QSORT |
| 654 | COMPAT_CFLAGS += -DINTERNAL_QSORT | 661 | COMPAT_CFLAGS += -DINTERNAL_QSORT |
| 655 | COMPAT_OBJS += compat/qsort.o | 662 | COMPAT_OBJS += compat/qsort.o |
| 656 | endif | 663 | endif |
| 657 | ifdef RUNTIME_PREFIX | 664 | ifdef RUNTIME_PREFIX |
| 658 | COMPAT_CFLAGS += -DRUNTIME_PREFIX | 665 | COMPAT_CFLAGS += -DRUNTIME_PREFIX |
| 659 | endif | 666 | endif |
| 660 | 667 | ||
| 661 | ifdef DIR_HAS_BSD_GROUP_SEMANTICS | 668 | ifdef DIR_HAS_BSD_GROUP_SEMANTICS |
| 662 | COMPAT_CFLAGS += -DDIR_HAS_BSD_GROUP_SEMANTICS | 669 | COMPAT_CFLAGS += -DDIR_HAS_BSD_GROUP_SEMANTICS |
| 663 | endif | 670 | endif |
| 664 | ifdef NO_EXTERNAL_GREP | 671 | ifdef NO_EXTERNAL_GREP |
| 665 | BASIC_CFLAGS += -DNO_EXTERNAL_GREP | 672 | BASIC_CFLAGS += -DNO_EXTERNAL_GREP |
| 666 | endif | 673 | endif |
| 667 | 674 | ||
| 668 | ifeq ($(PERL_PATH),) | 675 | ifeq ($(PERL_PATH),) |
| 669 | NO_PERL=NoThanks | 676 | NO_PERL=NoThanks |
| 670 | endif | 677 | endif |
| 671 | 678 | ||
| 672 | QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir | 679 | QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir |
| 673 | QUIET_SUBDIR1 = | 680 | QUIET_SUBDIR1 = |
| 674 | 681 | ||
| 675 | ifneq ($(findstring $(MAKEFLAGS),w),w) | 682 | ifneq ($(findstring $(MAKEFLAGS),w),w) |
| 676 | PRINT_DIR = --no-print-directory | 683 | PRINT_DIR = --no-print-directory |
| 677 | else # "make -w" | 684 | else # "make -w" |
| 678 | NO_SUBDIR = : | 685 | NO_SUBDIR = : |
| 679 | endif | 686 | endif |
| 680 | 687 | ||
| 681 | ifneq ($(findstring $(MAKEFLAGS),s),s) | 688 | ifneq ($(findstring $(MAKEFLAGS),s),s) |
| 682 | ifndef V | 689 | ifndef V |
| 683 | QUIET_CC = @echo ' ' CC $@; | 690 | QUIET_CC = @echo ' ' CC $@; |
| 684 | QUIET_AR = @echo ' ' AR $@; | 691 | QUIET_AR = @echo ' ' AR $@; |
| 685 | QUIET_LINK = @echo ' ' LINK $@; | 692 | QUIET_LINK = @echo ' ' LINK $@; |
| 686 | QUIET_BUILT_IN = @echo ' ' BUILTIN $@; | 693 | QUIET_BUILT_IN = @echo ' ' BUILTIN $@; |
| 687 | QUIET_GEN = @echo ' ' GEN $@; | 694 | QUIET_GEN = @echo ' ' GEN $@; |
| 688 | QUIET_SUBDIR0 = +@subdir= | 695 | QUIET_SUBDIR0 = +@subdir= |
| 689 | QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \ | 696 | QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \ |
| 690 | $(MAKE) $(PRINT_DIR) -C $$subdir | 697 | $(MAKE) $(PRINT_DIR) -C $$subdir |
| 691 | export V | 698 | export V |
| 692 | export QUIET_GEN | 699 | export QUIET_GEN |
| 693 | export QUIET_BUILT_IN | 700 | export QUIET_BUILT_IN |
| 694 | endif | 701 | endif |
| 695 | endif | 702 | endif |
| 696 | 703 | ||
| 697 | ifdef ASCIIDOC8 | 704 | ifdef ASCIIDOC8 |
| 698 | export ASCIIDOC8 | 705 | export ASCIIDOC8 |
| 699 | endif | 706 | endif |
| 700 | 707 | ||
| 701 | # Shell quote (do not use $(call) to accommodate ancient setups); | 708 | # Shell quote (do not use $(call) to accommodate ancient setups); |
| 702 | 709 | ||
| 703 | SHA1_HEADER_SQ = $(subst ','\'',$(SHA1_HEADER)) | 710 | SHA1_HEADER_SQ = $(subst ','\'',$(SHA1_HEADER)) |
| 704 | ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG)) | 711 | ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG)) |
| 705 | 712 | ||
| 706 | DESTDIR_SQ = $(subst ','\'',$(DESTDIR)) | 713 | DESTDIR_SQ = $(subst ','\'',$(DESTDIR)) |
| 707 | bindir_SQ = $(subst ','\'',$(bindir)) | 714 | bindir_SQ = $(subst ','\'',$(bindir)) |
| 708 | bindir_relative_SQ = $(subst ','\'',$(bindir_relative)) | 715 | bindir_relative_SQ = $(subst ','\'',$(bindir_relative)) |
| 709 | mandir_SQ = $(subst ','\'',$(mandir)) | 716 | mandir_SQ = $(subst ','\'',$(mandir)) |
| 710 | infodir_SQ = $(subst ','\'',$(infodir)) | 717 | infodir_SQ = $(subst ','\'',$(infodir)) |
| 711 | perfexecdir_SQ = $(subst ','\'',$(perfexecdir)) | 718 | perfexecdir_SQ = $(subst ','\'',$(perfexecdir)) |
| 712 | template_dir_SQ = $(subst ','\'',$(template_dir)) | 719 | template_dir_SQ = $(subst ','\'',$(template_dir)) |
| 713 | htmldir_SQ = $(subst ','\'',$(htmldir)) | 720 | htmldir_SQ = $(subst ','\'',$(htmldir)) |
| 714 | prefix_SQ = $(subst ','\'',$(prefix)) | 721 | prefix_SQ = $(subst ','\'',$(prefix)) |
| 715 | 722 | ||
| 716 | SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) | 723 | SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) |
| 717 | PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH)) | 724 | PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH)) |
| 718 | 725 | ||
| 719 | LIBS = $(PERFLIBS) $(EXTLIBS) | 726 | LIBS = $(PERFLIBS) $(EXTLIBS) |
| 720 | 727 | ||
| 721 | BASIC_CFLAGS += -DSHA1_HEADER='$(SHA1_HEADER_SQ)' \ | 728 | BASIC_CFLAGS += -DSHA1_HEADER='$(SHA1_HEADER_SQ)' \ |
| 722 | $(COMPAT_CFLAGS) | 729 | $(COMPAT_CFLAGS) |
| 723 | LIB_OBJS += $(COMPAT_OBJS) | 730 | LIB_OBJS += $(COMPAT_OBJS) |
| 724 | 731 | ||
| 725 | ALL_CFLAGS += $(BASIC_CFLAGS) | 732 | ALL_CFLAGS += $(BASIC_CFLAGS) |
| 726 | ALL_LDFLAGS += $(BASIC_LDFLAGS) | 733 | ALL_LDFLAGS += $(BASIC_LDFLAGS) |
| 727 | 734 | ||
| 728 | export TAR INSTALL DESTDIR SHELL_PATH | 735 | export TAR INSTALL DESTDIR SHELL_PATH |
| 729 | 736 | ||
| 730 | 737 | ||
| 731 | ### Build rules | 738 | ### Build rules |
| 732 | 739 | ||
| 733 | SHELL = $(SHELL_PATH) | 740 | SHELL = $(SHELL_PATH) |
| 734 | 741 | ||
| 735 | all:: shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) PERF-BUILD-OPTIONS | 742 | all:: shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) PERF-BUILD-OPTIONS |
| 736 | ifneq (,$X) | 743 | ifneq (,$X) |
| 737 | $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), test '$p' -ef '$p$X' || $(RM) '$p';) | 744 | $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), test '$p' -ef '$p$X' || $(RM) '$p';) |
| 738 | endif | 745 | endif |
| 739 | 746 | ||
| 740 | all:: | 747 | all:: |
| 741 | 748 | ||
| 742 | please_set_SHELL_PATH_to_a_more_modern_shell: | 749 | please_set_SHELL_PATH_to_a_more_modern_shell: |
| 743 | @$$(:) | 750 | @$$(:) |
| 744 | 751 | ||
| 745 | shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell | 752 | shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell |
| 746 | 753 | ||
| 747 | strip: $(PROGRAMS) perf$X | 754 | strip: $(PROGRAMS) perf$X |
| 748 | $(STRIP) $(STRIP_OPTS) $(PROGRAMS) perf$X | 755 | $(STRIP) $(STRIP_OPTS) $(PROGRAMS) perf$X |
| 749 | 756 | ||
| 750 | perf.o: perf.c common-cmds.h PERF-CFLAGS | 757 | perf.o: perf.c common-cmds.h PERF-CFLAGS |
| 751 | $(QUIET_CC)$(CC) -DPERF_VERSION='"$(PERF_VERSION)"' \ | 758 | $(QUIET_CC)$(CC) -DPERF_VERSION='"$(PERF_VERSION)"' \ |
| 752 | '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ | 759 | '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ |
| 753 | $(ALL_CFLAGS) -c $(filter %.c,$^) | 760 | $(ALL_CFLAGS) -c $(filter %.c,$^) |
| 754 | 761 | ||
| 755 | perf$X: perf.o $(BUILTIN_OBJS) $(PERFLIBS) | 762 | perf$X: perf.o $(BUILTIN_OBJS) $(PERFLIBS) |
| 756 | $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ perf.o \ | 763 | $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ perf.o \ |
| 757 | $(BUILTIN_OBJS) $(ALL_LDFLAGS) $(LIBS) | 764 | $(BUILTIN_OBJS) $(ALL_LDFLAGS) $(LIBS) |
| 758 | 765 | ||
| 759 | builtin-help.o: builtin-help.c common-cmds.h PERF-CFLAGS | 766 | builtin-help.o: builtin-help.c common-cmds.h PERF-CFLAGS |
| 760 | $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \ | 767 | $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \ |
| 761 | '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ | 768 | '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ |
| 762 | '-DPERF_MAN_PATH="$(mandir_SQ)"' \ | 769 | '-DPERF_MAN_PATH="$(mandir_SQ)"' \ |
| 763 | '-DPERF_INFO_PATH="$(infodir_SQ)"' $< | 770 | '-DPERF_INFO_PATH="$(infodir_SQ)"' $< |
| 764 | 771 | ||
| 765 | builtin-timechart.o: builtin-timechart.c common-cmds.h PERF-CFLAGS | 772 | builtin-timechart.o: builtin-timechart.c common-cmds.h PERF-CFLAGS |
| 766 | $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \ | 773 | $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \ |
| 767 | '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ | 774 | '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ |
| 768 | '-DPERF_MAN_PATH="$(mandir_SQ)"' \ | 775 | '-DPERF_MAN_PATH="$(mandir_SQ)"' \ |
| 769 | '-DPERF_INFO_PATH="$(infodir_SQ)"' $< | 776 | '-DPERF_INFO_PATH="$(infodir_SQ)"' $< |
| 770 | 777 | ||
| 771 | $(BUILT_INS): perf$X | 778 | $(BUILT_INS): perf$X |
| 772 | $(QUIET_BUILT_IN)$(RM) $@ && \ | 779 | $(QUIET_BUILT_IN)$(RM) $@ && \ |
| 773 | ln perf$X $@ 2>/dev/null || \ | 780 | ln perf$X $@ 2>/dev/null || \ |
| 774 | ln -s perf$X $@ 2>/dev/null || \ | 781 | ln -s perf$X $@ 2>/dev/null || \ |
| 775 | cp perf$X $@ | 782 | cp perf$X $@ |
| 776 | 783 | ||
| 777 | common-cmds.h: util/generate-cmdlist.sh command-list.txt | 784 | common-cmds.h: util/generate-cmdlist.sh command-list.txt |
| 778 | 785 | ||
| 779 | common-cmds.h: $(wildcard Documentation/perf-*.txt) | 786 | common-cmds.h: $(wildcard Documentation/perf-*.txt) |
| 780 | $(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@ | 787 | $(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@ |
| 781 | 788 | ||
| 782 | $(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh | 789 | $(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh |
| 783 | $(QUIET_GEN)$(RM) $@ $@+ && \ | 790 | $(QUIET_GEN)$(RM) $@ $@+ && \ |
| 784 | sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \ | 791 | sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \ |
| 785 | -e 's|@SHELL_PATH@|$(SHELL_PATH_SQ)|' \ | 792 | -e 's|@SHELL_PATH@|$(SHELL_PATH_SQ)|' \ |
| 786 | -e 's|@@PERL@@|$(PERL_PATH_SQ)|g' \ | 793 | -e 's|@@PERL@@|$(PERL_PATH_SQ)|g' \ |
| 787 | -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \ | 794 | -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \ |
| 788 | -e 's/@@NO_CURL@@/$(NO_CURL)/g' \ | 795 | -e 's/@@NO_CURL@@/$(NO_CURL)/g' \ |
| 789 | $@.sh >$@+ && \ | 796 | $@.sh >$@+ && \ |
| 790 | chmod +x $@+ && \ | 797 | chmod +x $@+ && \ |
| 791 | mv $@+ $@ | 798 | mv $@+ $@ |
| 792 | 799 | ||
| 793 | configure: configure.ac | 800 | configure: configure.ac |
| 794 | $(QUIET_GEN)$(RM) $@ $<+ && \ | 801 | $(QUIET_GEN)$(RM) $@ $<+ && \ |
| 795 | sed -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \ | 802 | sed -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \ |
| 796 | $< > $<+ && \ | 803 | $< > $<+ && \ |
| 797 | autoconf -o $@ $<+ && \ | 804 | autoconf -o $@ $<+ && \ |
| 798 | $(RM) $<+ | 805 | $(RM) $<+ |
| 799 | 806 | ||
| 800 | # These can record PERF_VERSION | 807 | # These can record PERF_VERSION |
| 801 | perf.o perf.spec \ | 808 | perf.o perf.spec \ |
| 802 | $(patsubst %.sh,%,$(SCRIPT_SH)) \ | 809 | $(patsubst %.sh,%,$(SCRIPT_SH)) \ |
| 803 | $(patsubst %.perl,%,$(SCRIPT_PERL)) \ | 810 | $(patsubst %.perl,%,$(SCRIPT_PERL)) \ |
| 804 | : PERF-VERSION-FILE | 811 | : PERF-VERSION-FILE |
| 805 | 812 | ||
| 806 | %.o: %.c PERF-CFLAGS | 813 | %.o: %.c PERF-CFLAGS |
| 807 | $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) $< | 814 | $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) $< |
| 808 | %.s: %.c PERF-CFLAGS | 815 | %.s: %.c PERF-CFLAGS |
| 809 | $(QUIET_CC)$(CC) -S $(ALL_CFLAGS) $< | 816 | $(QUIET_CC)$(CC) -S $(ALL_CFLAGS) $< |
| 810 | %.o: %.S | 817 | %.o: %.S |
| 811 | $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) $< | 818 | $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) $< |
| 812 | 819 | ||
| 813 | util/exec_cmd.o: util/exec_cmd.c PERF-CFLAGS | 820 | util/exec_cmd.o: util/exec_cmd.c PERF-CFLAGS |
| 814 | $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \ | 821 | $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \ |
| 815 | '-DPERF_EXEC_PATH="$(perfexecdir_SQ)"' \ | 822 | '-DPERF_EXEC_PATH="$(perfexecdir_SQ)"' \ |
| 816 | '-DBINDIR="$(bindir_relative_SQ)"' \ | 823 | '-DBINDIR="$(bindir_relative_SQ)"' \ |
| 817 | '-DPREFIX="$(prefix_SQ)"' \ | 824 | '-DPREFIX="$(prefix_SQ)"' \ |
| 818 | $< | 825 | $< |
| 819 | 826 | ||
| 820 | builtin-init-db.o: builtin-init-db.c PERF-CFLAGS | 827 | builtin-init-db.o: builtin-init-db.c PERF-CFLAGS |
| 821 | $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DDEFAULT_PERF_TEMPLATE_DIR='"$(template_dir_SQ)"' $< | 828 | $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DDEFAULT_PERF_TEMPLATE_DIR='"$(template_dir_SQ)"' $< |
| 822 | 829 | ||
| 823 | util/config.o: util/config.c PERF-CFLAGS | 830 | util/config.o: util/config.c PERF-CFLAGS |
| 824 | $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< | 831 | $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< |
| 825 | 832 | ||
| 826 | util/rbtree.o: ../../lib/rbtree.c PERF-CFLAGS | 833 | util/rbtree.o: ../../lib/rbtree.c PERF-CFLAGS |
| 827 | $(QUIET_CC)$(CC) -o util/rbtree.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< | 834 | $(QUIET_CC)$(CC) -o util/rbtree.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< |
| 828 | 835 | ||
| 829 | # some perf warning policies can't fit to lib/bitmap.c, eg: it warns about variable shadowing | 836 | # some perf warning policies can't fit to lib/bitmap.c, eg: it warns about variable shadowing |
| 830 | # from <string.h> that comes from kernel headers wrapping. | 837 | # from <string.h> that comes from kernel headers wrapping. |
| 831 | KBITMAP_FLAGS=`echo $(ALL_CFLAGS) | sed s/-Wshadow// | sed s/-Wswitch-default// | sed s/-Wextra//` | 838 | KBITMAP_FLAGS=`echo $(ALL_CFLAGS) | sed s/-Wshadow// | sed s/-Wswitch-default// | sed s/-Wextra//` |
| 832 | 839 | ||
| 833 | util/bitmap.o: ../../lib/bitmap.c PERF-CFLAGS | 840 | util/bitmap.o: ../../lib/bitmap.c PERF-CFLAGS |
| 834 | $(QUIET_CC)$(CC) -o util/bitmap.o -c $(KBITMAP_FLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< | 841 | $(QUIET_CC)$(CC) -o util/bitmap.o -c $(KBITMAP_FLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< |
| 835 | 842 | ||
| 836 | util/hweight.o: ../../lib/hweight.c PERF-CFLAGS | 843 | util/hweight.o: ../../lib/hweight.c PERF-CFLAGS |
| 837 | $(QUIET_CC)$(CC) -o util/hweight.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< | 844 | $(QUIET_CC)$(CC) -o util/hweight.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< |
| 838 | 845 | ||
| 839 | util/find_next_bit.o: ../../lib/find_next_bit.c PERF-CFLAGS | 846 | util/find_next_bit.o: ../../lib/find_next_bit.c PERF-CFLAGS |
| 840 | $(QUIET_CC)$(CC) -o util/find_next_bit.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< | 847 | $(QUIET_CC)$(CC) -o util/find_next_bit.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< |
| 841 | 848 | ||
| 842 | perf-%$X: %.o $(PERFLIBS) | 849 | perf-%$X: %.o $(PERFLIBS) |
| 843 | $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) | 850 | $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) |
| 844 | 851 | ||
| 845 | $(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H) | 852 | $(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H) |
| 846 | $(patsubst perf-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h) | 853 | $(patsubst perf-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h) |
| 847 | builtin-revert.o wt-status.o: wt-status.h | 854 | builtin-revert.o wt-status.o: wt-status.h |
| 848 | 855 | ||
| 849 | $(LIB_FILE): $(LIB_OBJS) | 856 | $(LIB_FILE): $(LIB_OBJS) |
| 850 | $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS) | 857 | $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS) |
| 851 | 858 | ||
| 852 | doc: | 859 | doc: |
| 853 | $(MAKE) -C Documentation all | 860 | $(MAKE) -C Documentation all |
| 854 | 861 | ||
| 855 | man: | 862 | man: |
| 856 | $(MAKE) -C Documentation man | 863 | $(MAKE) -C Documentation man |
| 857 | 864 | ||
| 858 | html: | 865 | html: |
| 859 | $(MAKE) -C Documentation html | 866 | $(MAKE) -C Documentation html |
| 860 | 867 | ||
| 861 | info: | 868 | info: |
| 862 | $(MAKE) -C Documentation info | 869 | $(MAKE) -C Documentation info |
| 863 | 870 | ||
| 864 | pdf: | 871 | pdf: |
| 865 | $(MAKE) -C Documentation pdf | 872 | $(MAKE) -C Documentation pdf |
| 866 | 873 | ||
| 867 | TAGS: | 874 | TAGS: |
| 868 | $(RM) TAGS | 875 | $(RM) TAGS |
| 869 | $(FIND) . -name '*.[hcS]' -print | xargs etags -a | 876 | $(FIND) . -name '*.[hcS]' -print | xargs etags -a |
| 870 | 877 | ||
| 871 | tags: | 878 | tags: |
| 872 | $(RM) tags | 879 | $(RM) tags |
| 873 | $(FIND) . -name '*.[hcS]' -print | xargs ctags -a | 880 | $(FIND) . -name '*.[hcS]' -print | xargs ctags -a |
| 874 | 881 | ||
| 875 | cscope: | 882 | cscope: |
| 876 | $(RM) cscope* | 883 | $(RM) cscope* |
| 877 | $(FIND) . -name '*.[hcS]' -print | xargs cscope -b | 884 | $(FIND) . -name '*.[hcS]' -print | xargs cscope -b |
| 878 | 885 | ||
| 879 | ### Detect prefix changes | 886 | ### Detect prefix changes |
| 880 | TRACK_CFLAGS = $(subst ','\'',$(ALL_CFLAGS)):\ | 887 | TRACK_CFLAGS = $(subst ','\'',$(ALL_CFLAGS)):\ |
| 881 | $(bindir_SQ):$(perfexecdir_SQ):$(template_dir_SQ):$(prefix_SQ) | 888 | $(bindir_SQ):$(perfexecdir_SQ):$(template_dir_SQ):$(prefix_SQ) |
| 882 | 889 | ||
| 883 | PERF-CFLAGS: .FORCE-PERF-CFLAGS | 890 | PERF-CFLAGS: .FORCE-PERF-CFLAGS |
| 884 | @FLAGS='$(TRACK_CFLAGS)'; \ | 891 | @FLAGS='$(TRACK_CFLAGS)'; \ |
| 885 | if test x"$$FLAGS" != x"`cat PERF-CFLAGS 2>/dev/null`" ; then \ | 892 | if test x"$$FLAGS" != x"`cat PERF-CFLAGS 2>/dev/null`" ; then \ |
| 886 | echo 1>&2 " * new build flags or prefix"; \ | 893 | echo 1>&2 " * new build flags or prefix"; \ |
| 887 | echo "$$FLAGS" >PERF-CFLAGS; \ | 894 | echo "$$FLAGS" >PERF-CFLAGS; \ |
| 888 | fi | 895 | fi |
| 889 | 896 | ||
| 890 | # We need to apply sq twice, once to protect from the shell | 897 | # We need to apply sq twice, once to protect from the shell |
| 891 | # that runs PERF-BUILD-OPTIONS, and then again to protect it | 898 | # that runs PERF-BUILD-OPTIONS, and then again to protect it |
| 892 | # and the first level quoting from the shell that runs "echo". | 899 | # and the first level quoting from the shell that runs "echo". |
| 893 | PERF-BUILD-OPTIONS: .FORCE-PERF-BUILD-OPTIONS | 900 | PERF-BUILD-OPTIONS: .FORCE-PERF-BUILD-OPTIONS |
| 894 | @echo SHELL_PATH=\''$(subst ','\'',$(SHELL_PATH_SQ))'\' >$@ | 901 | @echo SHELL_PATH=\''$(subst ','\'',$(SHELL_PATH_SQ))'\' >$@ |
| 895 | @echo TAR=\''$(subst ','\'',$(subst ','\'',$(TAR)))'\' >>$@ | 902 | @echo TAR=\''$(subst ','\'',$(subst ','\'',$(TAR)))'\' >>$@ |
| 896 | @echo NO_CURL=\''$(subst ','\'',$(subst ','\'',$(NO_CURL)))'\' >>$@ | 903 | @echo NO_CURL=\''$(subst ','\'',$(subst ','\'',$(NO_CURL)))'\' >>$@ |
| 897 | @echo NO_PERL=\''$(subst ','\'',$(subst ','\'',$(NO_PERL)))'\' >>$@ | 904 | @echo NO_PERL=\''$(subst ','\'',$(subst ','\'',$(NO_PERL)))'\' >>$@ |
| 898 | 905 | ||
| 899 | ### Testing rules | 906 | ### Testing rules |
| 900 | 907 | ||
| 901 | # | 908 | # |
| 902 | # None right now: | 909 | # None right now: |
| 903 | # | 910 | # |
| 904 | # TEST_PROGRAMS += test-something$X | 911 | # TEST_PROGRAMS += test-something$X |
| 905 | 912 | ||
| 906 | all:: $(TEST_PROGRAMS) | 913 | all:: $(TEST_PROGRAMS) |
| 907 | 914 | ||
| 908 | # GNU make supports exporting all variables by "export" without parameters. | 915 | # GNU make supports exporting all variables by "export" without parameters. |
| 909 | # However, the environment gets quite big, and some programs have problems | 916 | # However, the environment gets quite big, and some programs have problems |
| 910 | # with that. | 917 | # with that. |
| 911 | 918 | ||
| 912 | export NO_SVN_TESTS | 919 | export NO_SVN_TESTS |
| 913 | 920 | ||
| 914 | check: common-cmds.h | 921 | check: common-cmds.h |
| 915 | if sparse; \ | 922 | if sparse; \ |
| 916 | then \ | 923 | then \ |
| 917 | for i in *.c */*.c; \ | 924 | for i in *.c */*.c; \ |
| 918 | do \ | 925 | do \ |
| 919 | sparse $(ALL_CFLAGS) $(SPARSE_FLAGS) $$i || exit; \ | 926 | sparse $(ALL_CFLAGS) $(SPARSE_FLAGS) $$i || exit; \ |
| 920 | done; \ | 927 | done; \ |
| 921 | else \ | 928 | else \ |
| 922 | echo 2>&1 "Did you mean 'make test'?"; \ | 929 | echo 2>&1 "Did you mean 'make test'?"; \ |
| 923 | exit 1; \ | 930 | exit 1; \ |
| 924 | fi | 931 | fi |
| 925 | 932 | ||
| 926 | remove-dashes: | 933 | remove-dashes: |
| 927 | ./fixup-builtins $(BUILT_INS) $(PROGRAMS) $(SCRIPTS) | 934 | ./fixup-builtins $(BUILT_INS) $(PROGRAMS) $(SCRIPTS) |
| 928 | 935 | ||
| 929 | ### Installation rules | 936 | ### Installation rules |
| 930 | 937 | ||
| 931 | ifneq ($(filter /%,$(firstword $(template_dir))),) | 938 | ifneq ($(filter /%,$(firstword $(template_dir))),) |
| 932 | template_instdir = $(template_dir) | 939 | template_instdir = $(template_dir) |
| 933 | else | 940 | else |
| 934 | template_instdir = $(prefix)/$(template_dir) | 941 | template_instdir = $(prefix)/$(template_dir) |
| 935 | endif | 942 | endif |
| 936 | export template_instdir | 943 | export template_instdir |
| 937 | 944 | ||
| 938 | ifneq ($(filter /%,$(firstword $(perfexecdir))),) | 945 | ifneq ($(filter /%,$(firstword $(perfexecdir))),) |
| 939 | perfexec_instdir = $(perfexecdir) | 946 | perfexec_instdir = $(perfexecdir) |
| 940 | else | 947 | else |
| 941 | perfexec_instdir = $(prefix)/$(perfexecdir) | 948 | perfexec_instdir = $(prefix)/$(perfexecdir) |
| 942 | endif | 949 | endif |
| 943 | perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir)) | 950 | perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir)) |
| 944 | export perfexec_instdir | 951 | export perfexec_instdir |
| 945 | 952 | ||
| 946 | install: all | 953 | install: all |
| 947 | $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' | 954 | $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' |
| 948 | $(INSTALL) perf$X '$(DESTDIR_SQ)$(bindir_SQ)' | 955 | $(INSTALL) perf$X '$(DESTDIR_SQ)$(bindir_SQ)' |
| 949 | ifdef BUILT_INS | 956 | ifdef BUILT_INS |
| 950 | $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' | 957 | $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' |
| 951 | $(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' | 958 | $(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' |
| 952 | ifneq (,$X) | 959 | ifneq (,$X) |
| 953 | $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), $(RM) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$p';) | 960 | $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), $(RM) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$p';) |
| 954 | endif | 961 | endif |
| 955 | endif | 962 | endif |
| 956 | 963 | ||
| 957 | install-doc: | 964 | install-doc: |
| 958 | $(MAKE) -C Documentation install | 965 | $(MAKE) -C Documentation install |
| 959 | 966 | ||
| 960 | install-man: | 967 | install-man: |
| 961 | $(MAKE) -C Documentation install-man | 968 | $(MAKE) -C Documentation install-man |
| 962 | 969 | ||
| 963 | install-html: | 970 | install-html: |
| 964 | $(MAKE) -C Documentation install-html | 971 | $(MAKE) -C Documentation install-html |
| 965 | 972 | ||
| 966 | install-info: | 973 | install-info: |
| 967 | $(MAKE) -C Documentation install-info | 974 | $(MAKE) -C Documentation install-info |
| 968 | 975 | ||
| 969 | install-pdf: | 976 | install-pdf: |
| 970 | $(MAKE) -C Documentation install-pdf | 977 | $(MAKE) -C Documentation install-pdf |
| 971 | 978 | ||
| 972 | quick-install-doc: | 979 | quick-install-doc: |
| 973 | $(MAKE) -C Documentation quick-install | 980 | $(MAKE) -C Documentation quick-install |
| 974 | 981 | ||
| 975 | quick-install-man: | 982 | quick-install-man: |
| 976 | $(MAKE) -C Documentation quick-install-man | 983 | $(MAKE) -C Documentation quick-install-man |
| 977 | 984 | ||
| 978 | quick-install-html: | 985 | quick-install-html: |
| 979 | $(MAKE) -C Documentation quick-install-html | 986 | $(MAKE) -C Documentation quick-install-html |
| 980 | 987 | ||
| 981 | 988 | ||
| 982 | ### Maintainer's dist rules | 989 | ### Maintainer's dist rules |
| 983 | # | 990 | # |
| 984 | # None right now | 991 | # None right now |
| 985 | # | 992 | # |
| 986 | # | 993 | # |
| 987 | # perf.spec: perf.spec.in | 994 | # perf.spec: perf.spec.in |
| 988 | # sed -e 's/@@VERSION@@/$(PERF_VERSION)/g' < $< > $@+ | 995 | # sed -e 's/@@VERSION@@/$(PERF_VERSION)/g' < $< > $@+ |
| 989 | # mv $@+ $@ | 996 | # mv $@+ $@ |
| 990 | # | 997 | # |
| 991 | # PERF_TARNAME=perf-$(PERF_VERSION) | 998 | # PERF_TARNAME=perf-$(PERF_VERSION) |
| 992 | # dist: perf.spec perf-archive$(X) configure | 999 | # dist: perf.spec perf-archive$(X) configure |
| 993 | # ./perf-archive --format=tar \ | 1000 | # ./perf-archive --format=tar \ |
| 994 | # --prefix=$(PERF_TARNAME)/ HEAD^{tree} > $(PERF_TARNAME).tar | 1001 | # --prefix=$(PERF_TARNAME)/ HEAD^{tree} > $(PERF_TARNAME).tar |
| 995 | # @mkdir -p $(PERF_TARNAME) | 1002 | # @mkdir -p $(PERF_TARNAME) |
| 996 | # @cp perf.spec configure $(PERF_TARNAME) | 1003 | # @cp perf.spec configure $(PERF_TARNAME) |
| 997 | # @echo $(PERF_VERSION) > $(PERF_TARNAME)/version | 1004 | # @echo $(PERF_VERSION) > $(PERF_TARNAME)/version |
| 998 | # $(TAR) rf $(PERF_TARNAME).tar \ | 1005 | # $(TAR) rf $(PERF_TARNAME).tar \ |
| 999 | # $(PERF_TARNAME)/perf.spec \ | 1006 | # $(PERF_TARNAME)/perf.spec \ |
| 1000 | # $(PERF_TARNAME)/configure \ | 1007 | # $(PERF_TARNAME)/configure \ |
| 1001 | # $(PERF_TARNAME)/version | 1008 | # $(PERF_TARNAME)/version |
| 1002 | # @$(RM) -r $(PERF_TARNAME) | 1009 | # @$(RM) -r $(PERF_TARNAME) |
| 1003 | # gzip -f -9 $(PERF_TARNAME).tar | 1010 | # gzip -f -9 $(PERF_TARNAME).tar |
| 1004 | # | 1011 | # |
| 1005 | # htmldocs = perf-htmldocs-$(PERF_VERSION) | 1012 | # htmldocs = perf-htmldocs-$(PERF_VERSION) |
| 1006 | # manpages = perf-manpages-$(PERF_VERSION) | 1013 | # manpages = perf-manpages-$(PERF_VERSION) |
| 1007 | # dist-doc: | 1014 | # dist-doc: |
| 1008 | # $(RM) -r .doc-tmp-dir | 1015 | # $(RM) -r .doc-tmp-dir |
| 1009 | # mkdir .doc-tmp-dir | 1016 | # mkdir .doc-tmp-dir |
| 1010 | # $(MAKE) -C Documentation WEBDOC_DEST=../.doc-tmp-dir install-webdoc | 1017 | # $(MAKE) -C Documentation WEBDOC_DEST=../.doc-tmp-dir install-webdoc |
| 1011 | # cd .doc-tmp-dir && $(TAR) cf ../$(htmldocs).tar . | 1018 | # cd .doc-tmp-dir && $(TAR) cf ../$(htmldocs).tar . |
| 1012 | # gzip -n -9 -f $(htmldocs).tar | 1019 | # gzip -n -9 -f $(htmldocs).tar |
| 1013 | # : | 1020 | # : |
| 1014 | # $(RM) -r .doc-tmp-dir | 1021 | # $(RM) -r .doc-tmp-dir |
| 1015 | # mkdir -p .doc-tmp-dir/man1 .doc-tmp-dir/man5 .doc-tmp-dir/man7 | 1022 | # mkdir -p .doc-tmp-dir/man1 .doc-tmp-dir/man5 .doc-tmp-dir/man7 |
| 1016 | # $(MAKE) -C Documentation DESTDIR=./ \ | 1023 | # $(MAKE) -C Documentation DESTDIR=./ \ |
| 1017 | # man1dir=../.doc-tmp-dir/man1 \ | 1024 | # man1dir=../.doc-tmp-dir/man1 \ |
| 1018 | # man5dir=../.doc-tmp-dir/man5 \ | 1025 | # man5dir=../.doc-tmp-dir/man5 \ |
| 1019 | # man7dir=../.doc-tmp-dir/man7 \ | 1026 | # man7dir=../.doc-tmp-dir/man7 \ |
| 1020 | # install | 1027 | # install |
| 1021 | # cd .doc-tmp-dir && $(TAR) cf ../$(manpages).tar . | 1028 | # cd .doc-tmp-dir && $(TAR) cf ../$(manpages).tar . |
| 1022 | # gzip -n -9 -f $(manpages).tar | 1029 | # gzip -n -9 -f $(manpages).tar |
| 1023 | # $(RM) -r .doc-tmp-dir | 1030 | # $(RM) -r .doc-tmp-dir |
| 1024 | # | 1031 | # |
| 1025 | # rpm: dist | 1032 | # rpm: dist |
| 1026 | # $(RPMBUILD) -ta $(PERF_TARNAME).tar.gz | 1033 | # $(RPMBUILD) -ta $(PERF_TARNAME).tar.gz |
| 1027 | 1034 | ||
| 1028 | ### Cleaning rules | 1035 | ### Cleaning rules |
| 1029 | 1036 | ||
| 1030 | distclean: clean | 1037 | distclean: clean |
| 1031 | # $(RM) configure | 1038 | # $(RM) configure |
| 1032 | 1039 | ||
| 1033 | clean: | 1040 | clean: |
| 1034 | $(RM) *.o */*.o $(LIB_FILE) | 1041 | $(RM) *.o */*.o $(LIB_FILE) |
| 1035 | $(RM) $(ALL_PROGRAMS) $(BUILT_INS) perf$X | 1042 | $(RM) $(ALL_PROGRAMS) $(BUILT_INS) perf$X |
| 1036 | $(RM) $(TEST_PROGRAMS) | 1043 | $(RM) $(TEST_PROGRAMS) |
| 1037 | $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h TAGS tags cscope* | 1044 | $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h TAGS tags cscope* |
| 1038 | $(RM) -r autom4te.cache | 1045 | $(RM) -r autom4te.cache |
| 1039 | $(RM) config.log config.mak.autogen config.mak.append config.status config.cache | 1046 | $(RM) config.log config.mak.autogen config.mak.append config.status config.cache |
| 1040 | $(RM) -r $(PERF_TARNAME) .doc-tmp-dir | 1047 | $(RM) -r $(PERF_TARNAME) .doc-tmp-dir |
| 1041 | $(RM) $(PERF_TARNAME).tar.gz perf-core_$(PERF_VERSION)-*.tar.gz | 1048 | $(RM) $(PERF_TARNAME).tar.gz perf-core_$(PERF_VERSION)-*.tar.gz |
| 1042 | $(RM) $(htmldocs).tar.gz $(manpages).tar.gz | 1049 | $(RM) $(htmldocs).tar.gz $(manpages).tar.gz |
| 1043 | $(MAKE) -C Documentation/ clean | 1050 | $(MAKE) -C Documentation/ clean |
| 1044 | $(RM) PERF-VERSION-FILE PERF-CFLAGS PERF-BUILD-OPTIONS | 1051 | $(RM) PERF-VERSION-FILE PERF-CFLAGS PERF-BUILD-OPTIONS |
| 1045 | 1052 | ||
| 1046 | .PHONY: all install clean strip | 1053 | .PHONY: all install clean strip |
| 1047 | .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell | 1054 | .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell |
| 1048 | .PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS | 1055 | .PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS |
| 1049 | .PHONY: .FORCE-PERF-BUILD-OPTIONS | 1056 | .PHONY: .FORCE-PERF-BUILD-OPTIONS |
| 1050 | 1057 | ||
| 1051 | ### Make sure built-ins do not have dups and listed in perf.c | 1058 | ### Make sure built-ins do not have dups and listed in perf.c |
| 1052 | # | 1059 | # |
| 1053 | check-builtins:: | 1060 | check-builtins:: |
| 1054 | ./check-builtins.sh | 1061 | ./check-builtins.sh |
| 1055 | 1062 | ||
| 1056 | ### Test suite coverage testing | 1063 | ### Test suite coverage testing |
| 1057 | # | 1064 | # |
| 1058 | # None right now | 1065 | # None right now |
| 1059 | # | 1066 | # |
| 1060 | # .PHONY: coverage coverage-clean coverage-build coverage-report | 1067 | # .PHONY: coverage coverage-clean coverage-build coverage-report |
| 1061 | # | 1068 | # |
| 1062 | # coverage: | 1069 | # coverage: |
| 1063 | # $(MAKE) coverage-build | 1070 | # $(MAKE) coverage-build |
| 1064 | # $(MAKE) coverage-report | 1071 | # $(MAKE) coverage-report |
| 1065 | # | 1072 | # |
| 1066 | # coverage-clean: | 1073 | # coverage-clean: |
| 1067 | # rm -f *.gcda *.gcno | 1074 | # rm -f *.gcda *.gcno |
| 1068 | # | 1075 | # |
| 1069 | # COVERAGE_CFLAGS = $(CFLAGS) -O0 -ftest-coverage -fprofile-arcs | 1076 | # COVERAGE_CFLAGS = $(CFLAGS) -O0 -ftest-coverage -fprofile-arcs |
| 1070 | # COVERAGE_LDFLAGS = $(CFLAGS) -O0 -lgcov | 1077 | # COVERAGE_LDFLAGS = $(CFLAGS) -O0 -lgcov |
| 1071 | # | 1078 | # |
| 1072 | # coverage-build: coverage-clean | 1079 | # coverage-build: coverage-clean |
| 1073 | # $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" all | 1080 | # $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" all |
| 1074 | # $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" \ | 1081 | # $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" \ |
| 1075 | # -j1 test | 1082 | # -j1 test |
| 1076 | # | 1083 | # |
| 1077 | # coverage-report: | 1084 | # coverage-report: |
| 1078 | # gcov -b *.c */*.c | 1085 | # gcov -b *.c */*.c |
| 1079 | # grep '^function.*called 0 ' *.c.gcov */*.c.gcov \ | 1086 | # grep '^function.*called 0 ' *.c.gcov */*.c.gcov \ |
| 1080 | # | sed -e 's/\([^:]*\)\.gcov: *function \([^ ]*\) called.*/\1: \2/' \ | 1087 | # | sed -e 's/\([^:]*\)\.gcov: *function \([^ ]*\) called.*/\1: \2/' \ |
| 1081 | # | tee coverage-untested-functions | 1088 | # | tee coverage-untested-functions |
| 1082 | 1089 |
tools/perf/bench/bench.h
| File was created | 1 | #ifndef BENCH_H | |
| 2 | #define BENCH_H | ||
| 3 | |||
| 4 | extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); | ||
| 5 | extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); | ||
| 6 | |||
| 7 | #define BENCH_FORMAT_DEFAULT_STR "default" | ||
| 8 | #define BENCH_FORMAT_DEFAULT 0 | ||
| 9 | #define BENCH_FORMAT_SIMPLE_STR "simple" | ||
| 10 | #define BENCH_FORMAT_SIMPLE 1 | ||
| 11 | |||
| 12 | #define BENCH_FORMAT_UNKNOWN -1 | ||
| 13 | |||
| 14 | extern int bench_format; | ||
| 15 | |||
| 16 | #endif | ||
| 17 |
tools/perf/bench/sched-messaging.c
| File was created | 1 | /* | |
| 2 | * | ||
| 3 | * builtin-bench-messaging.c | ||
| 4 | * | ||
| 5 | * messaging: Benchmark for scheduler and IPC mechanisms | ||
| 6 | * | ||
| 7 | * Based on hackbench by Rusty Russell <rusty@rustcorp.com.au> | ||
| 8 | * Ported to perf by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp> | ||
| 9 | * | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include "../perf.h" | ||
| 13 | #include "../util/util.h" | ||
| 14 | #include "../util/parse-options.h" | ||
| 15 | #include "../builtin.h" | ||
| 16 | #include "bench.h" | ||
| 17 | |||
| 18 | /* Test groups of 20 processes spraying to 20 receivers */ | ||
| 19 | #include <pthread.h> | ||
| 20 | #include <stdio.h> | ||
| 21 | #include <stdlib.h> | ||
| 22 | #include <string.h> | ||
| 23 | #include <errno.h> | ||
| 24 | #include <unistd.h> | ||
| 25 | #include <sys/types.h> | ||
| 26 | #include <sys/socket.h> | ||
| 27 | #include <sys/wait.h> | ||
| 28 | #include <sys/time.h> | ||
| 29 | #include <sys/poll.h> | ||
| 30 | #include <limits.h> | ||
| 31 | |||
| 32 | #define DATASIZE 100 | ||
| 33 | |||
| 34 | static int use_pipes = 0; | ||
| 35 | static unsigned int loops = 100; | ||
| 36 | static unsigned int thread_mode = 0; | ||
| 37 | static unsigned int num_groups = 10; | ||
| 38 | |||
| 39 | struct sender_context { | ||
| 40 | unsigned int num_fds; | ||
| 41 | int ready_out; | ||
| 42 | int wakefd; | ||
| 43 | int out_fds[0]; | ||
| 44 | }; | ||
| 45 | |||
| 46 | struct receiver_context { | ||
| 47 | unsigned int num_packets; | ||
| 48 | int in_fds[2]; | ||
| 49 | int ready_out; | ||
| 50 | int wakefd; | ||
| 51 | }; | ||
| 52 | |||
| 53 | static void barf(const char *msg) | ||
| 54 | { | ||
| 55 | fprintf(stderr, "%s (error: %s)\n", msg, strerror(errno)); | ||
| 56 | exit(1); | ||
| 57 | } | ||
| 58 | |||
| 59 | static void fdpair(int fds[2]) | ||
| 60 | { | ||
| 61 | if (use_pipes) { | ||
| 62 | if (pipe(fds) == 0) | ||
| 63 | return; | ||
| 64 | } else { | ||
| 65 | if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == 0) | ||
| 66 | return; | ||
| 67 | } | ||
| 68 | |||
| 69 | barf(use_pipes ? "pipe()" : "socketpair()"); | ||
| 70 | } | ||
| 71 | |||
| 72 | /* Block until we're ready to go */ | ||
| 73 | static void ready(int ready_out, int wakefd) | ||
| 74 | { | ||
| 75 | char dummy; | ||
| 76 | struct pollfd pollfd = { .fd = wakefd, .events = POLLIN }; | ||
| 77 | |||
| 78 | /* Tell them we're ready. */ | ||
| 79 | if (write(ready_out, &dummy, 1) != 1) | ||
| 80 | barf("CLIENT: ready write"); | ||
| 81 | |||
| 82 | /* Wait for "GO" signal */ | ||
| 83 | if (poll(&pollfd, 1, -1) != 1) | ||
| 84 | barf("poll"); | ||
| 85 | } | ||
| 86 | |||
| 87 | /* Sender sprays loops messages down each file descriptor */ | ||
| 88 | static void *sender(struct sender_context *ctx) | ||
| 89 | { | ||
| 90 | char data[DATASIZE]; | ||
| 91 | unsigned int i, j; | ||
| 92 | |||
| 93 | ready(ctx->ready_out, ctx->wakefd); | ||
| 94 | |||
| 95 | /* Now pump to every receiver. */ | ||
| 96 | for (i = 0; i < loops; i++) { | ||
| 97 | for (j = 0; j < ctx->num_fds; j++) { | ||
| 98 | int ret, done = 0; | ||
| 99 | |||
| 100 | again: | ||
| 101 | ret = write(ctx->out_fds[j], data + done, | ||
| 102 | sizeof(data)-done); | ||
| 103 | if (ret < 0) | ||
| 104 | barf("SENDER: write"); | ||
| 105 | done += ret; | ||
| 106 | if (done < DATASIZE) | ||
| 107 | goto again; | ||
| 108 | } | ||
| 109 | } | ||
| 110 | |||
| 111 | return NULL; | ||
| 112 | } | ||
| 113 | |||
| 114 | |||
| 115 | /* One receiver per fd */ | ||
| 116 | static void *receiver(struct receiver_context* ctx) | ||
| 117 | { | ||
| 118 | unsigned int i; | ||
| 119 | |||
| 120 | if (!thread_mode) | ||
| 121 | close(ctx->in_fds[1]); | ||
| 122 | |||
| 123 | /* Wait for start... */ | ||
| 124 | ready(ctx->ready_out, ctx->wakefd); | ||
| 125 | |||
| 126 | /* Receive them all */ | ||
| 127 | for (i = 0; i < ctx->num_packets; i++) { | ||
| 128 | char data[DATASIZE]; | ||
| 129 | int ret, done = 0; | ||
| 130 | |||
| 131 | again: | ||
| 132 | ret = read(ctx->in_fds[0], data + done, DATASIZE - done); | ||
| 133 | if (ret < 0) | ||
| 134 | barf("SERVER: read"); | ||
| 135 | done += ret; | ||
| 136 | if (done < DATASIZE) | ||
| 137 | goto again; | ||
| 138 | } | ||
| 139 | |||
| 140 | return NULL; | ||
| 141 | } | ||
| 142 | |||
| 143 | static pthread_t create_worker(void *ctx, void *(*func)(void *)) | ||
| 144 | { | ||
| 145 | pthread_attr_t attr; | ||
| 146 | pthread_t childid; | ||
| 147 | int err; | ||
| 148 | |||
| 149 | if (!thread_mode) { | ||
| 150 | /* process mode */ | ||
| 151 | /* Fork the receiver. */ | ||
| 152 | switch (fork()) { | ||
| 153 | case -1: | ||
| 154 | barf("fork()"); | ||
| 155 | break; | ||
| 156 | case 0: | ||
| 157 | (*func) (ctx); | ||
| 158 | exit(0); | ||
| 159 | break; | ||
| 160 | default: | ||
| 161 | break; | ||
| 162 | } | ||
| 163 | |||
| 164 | return (pthread_t)0; | ||
| 165 | } | ||
| 166 | |||
| 167 | if (pthread_attr_init(&attr) != 0) | ||
| 168 | barf("pthread_attr_init:"); | ||
| 169 | |||
| 170 | #ifndef __ia64__ | ||
| 171 | if (pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN) != 0) | ||
| 172 | barf("pthread_attr_setstacksize"); | ||
| 173 | #endif | ||
| 174 | |||
| 175 | err = pthread_create(&childid, &attr, func, ctx); | ||
| 176 | if (err != 0) { | ||
| 177 | fprintf(stderr, "pthread_create failed: %s (%d)\n", | ||
| 178 | strerror(err), err); | ||
| 179 | exit(-1); | ||
| 180 | } | ||
| 181 | return childid; | ||
| 182 | } | ||
| 183 | |||
| 184 | static void reap_worker(pthread_t id) | ||
| 185 | { | ||
| 186 | int proc_status; | ||
| 187 | void *thread_status; | ||
| 188 | |||
| 189 | if (!thread_mode) { | ||
| 190 | /* process mode */ | ||
| 191 | wait(&proc_status); | ||
| 192 | if (!WIFEXITED(proc_status)) | ||
| 193 | exit(1); | ||
| 194 | } else { | ||
| 195 | pthread_join(id, &thread_status); | ||
| 196 | } | ||
| 197 | } | ||
| 198 | |||
| 199 | /* One group of senders and receivers */ | ||
| 200 | static unsigned int group(pthread_t *pth, | ||
| 201 | unsigned int num_fds, | ||
| 202 | int ready_out, | ||
| 203 | int wakefd) | ||
| 204 | { | ||
| 205 | unsigned int i; | ||
| 206 | struct sender_context *snd_ctx = malloc(sizeof(struct sender_context) | ||
| 207 | + num_fds * sizeof(int)); | ||
| 208 | |||
| 209 | if (!snd_ctx) | ||
| 210 | barf("malloc()"); | ||
| 211 | |||
| 212 | for (i = 0; i < num_fds; i++) { | ||
| 213 | int fds[2]; | ||
| 214 | struct receiver_context *ctx = malloc(sizeof(*ctx)); | ||
| 215 | |||
| 216 | if (!ctx) | ||
| 217 | barf("malloc()"); | ||
| 218 | |||
| 219 | |||
| 220 | /* Create the pipe between client and server */ | ||
| 221 | fdpair(fds); | ||
| 222 | |||
| 223 | ctx->num_packets = num_fds * loops; | ||
| 224 | ctx->in_fds[0] = fds[0]; | ||
| 225 | ctx->in_fds[1] = fds[1]; | ||
| 226 | ctx->ready_out = ready_out; | ||
| 227 | ctx->wakefd = wakefd; | ||
| 228 | |||
| 229 | pth[i] = create_worker(ctx, (void *)receiver); | ||
| 230 | |||
| 231 | snd_ctx->out_fds[i] = fds[1]; | ||
| 232 | if (!thread_mode) | ||
| 233 | close(fds[0]); | ||
| 234 | } | ||
| 235 | |||
| 236 | /* Now we have all the fds, fork the senders */ | ||
| 237 | for (i = 0; i < num_fds; i++) { | ||
| 238 | snd_ctx->ready_out = ready_out; | ||
| 239 | snd_ctx->wakefd = wakefd; | ||
| 240 | snd_ctx->num_fds = num_fds; | ||
| 241 | |||
| 242 | pth[num_fds+i] = create_worker(snd_ctx, (void *)sender); | ||
| 243 | } | ||
| 244 | |||
| 245 | /* Close the fds we have left */ | ||
| 246 | if (!thread_mode) | ||
| 247 | for (i = 0; i < num_fds; i++) | ||
| 248 | close(snd_ctx->out_fds[i]); | ||
| 249 | |||
| 250 | /* Return number of children to reap */ | ||
| 251 | return num_fds * 2; | ||
| 252 | } | ||
| 253 | |||
| 254 | static const struct option options[] = { | ||
| 255 | OPT_BOOLEAN('p', "pipe", &use_pipes, | ||
| 256 | "Use pipe() instead of socketpair()"), | ||
| 257 | OPT_BOOLEAN('t', "thread", &thread_mode, | ||
| 258 | "Be multi thread instead of multi process"), | ||
| 259 | OPT_INTEGER('g', "group", &num_groups, | ||
| 260 | "Specify number of groups"), | ||
| 261 | OPT_INTEGER('l', "loop", &loops, | ||
| 262 | "Specify number of loops"), | ||
| 263 | OPT_END() | ||
| 264 | }; | ||
| 265 | |||
| 266 | static const char * const bench_sched_message_usage[] = { | ||
| 267 | "perf bench sched messaging <options>", | ||
| 268 | NULL | ||
| 269 | }; | ||
| 270 | |||
| 271 | int bench_sched_messaging(int argc, const char **argv, | ||
| 272 | const char *prefix __used) | ||
| 273 | { | ||
| 274 | unsigned int i, total_children; | ||
| 275 | struct timeval start, stop, diff; | ||
| 276 | unsigned int num_fds = 20; | ||
| 277 | int readyfds[2], wakefds[2]; | ||
| 278 | char dummy; | ||
| 279 | pthread_t *pth_tab; | ||
| 280 | |||
| 281 | argc = parse_options(argc, argv, options, | ||
| 282 | bench_sched_message_usage, 0); | ||
| 283 | |||
| 284 | pth_tab = malloc(num_fds * 2 * num_groups * sizeof(pthread_t)); | ||
| 285 | if (!pth_tab) | ||
| 286 | barf("main:malloc()"); | ||
| 287 | |||
| 288 | fdpair(readyfds); | ||
| 289 | fdpair(wakefds); | ||
| 290 | |||
| 291 | total_children = 0; | ||
| 292 | for (i = 0; i < num_groups; i++) | ||
| 293 | total_children += group(pth_tab+total_children, num_fds, | ||
| 294 | readyfds[1], wakefds[0]); | ||
| 295 | |||
| 296 | /* Wait for everyone to be ready */ | ||
| 297 | for (i = 0; i < total_children; i++) | ||
| 298 | if (read(readyfds[0], &dummy, 1) != 1) | ||
| 299 | barf("Reading for readyfds"); | ||
| 300 | |||
| 301 | gettimeofday(&start, NULL); | ||
| 302 | |||
| 303 | /* Kick them off */ | ||
| 304 | if (write(wakefds[1], &dummy, 1) != 1) | ||
| 305 | barf("Writing to start them"); | ||
| 306 | |||
| 307 | /* Reap them all */ | ||
| 308 | for (i = 0; i < total_children; i++) | ||
| 309 | reap_worker(pth_tab[i]); | ||
| 310 | |||
| 311 | gettimeofday(&stop, NULL); | ||
| 312 | |||
| 313 | timersub(&stop, &start, &diff); | ||
| 314 | |||
| 315 | switch (bench_format) { | ||
| 316 | case BENCH_FORMAT_DEFAULT: | ||
| 317 | printf("# %d sender and receiver %s per group\n", | ||
| 318 | num_fds, thread_mode ? "threads" : "processes"); | ||
| 319 | printf("# %d groups == %d %s run\n\n", | ||
| 320 | num_groups, num_groups * 2 * num_fds, | ||
| 321 | thread_mode ? "threads" : "processes"); | ||
| 322 | printf(" %14s: %lu.%03lu [sec]\n", "Total time", | ||
| 323 | diff.tv_sec, diff.tv_usec/1000); | ||
| 324 | break; | ||
| 325 | case BENCH_FORMAT_SIMPLE: | ||
| 326 | printf("%lu.%03lu\n", diff.tv_sec, diff.tv_usec/1000); | ||
| 327 | break; | ||
| 328 | default: | ||
| 329 | /* reaching here is something disaster */ | ||
| 330 | fprintf(stderr, "Unknown format:%d\n", bench_format); | ||
| 331 | exit(1); | ||
| 332 | break; | ||
| 333 | } | ||
| 334 | |||
| 335 | return 0; | ||
| 336 | } | ||
| 337 |
tools/perf/bench/sched-pipe.c
| File was created | 1 | /* | |
| 2 | * | ||
| 3 | * builtin-bench-pipe.c | ||
| 4 | * | ||
| 5 | * pipe: Benchmark for pipe() | ||
| 6 | * | ||
| 7 | * Based on pipe-test-1m.c by Ingo Molnar <mingo@redhat.com> | ||
| 8 | * http://people.redhat.com/mingo/cfs-scheduler/tools/pipe-test-1m.c | ||
| 9 | * Ported to perf by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp> | ||
| 10 | * | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include "../perf.h" | ||
| 14 | #include "../util/util.h" | ||
| 15 | #include "../util/parse-options.h" | ||
| 16 | #include "../builtin.h" | ||
| 17 | #include "bench.h" | ||
| 18 | |||
| 19 | #include <unistd.h> | ||
| 20 | #include <stdio.h> | ||
| 21 | #include <stdlib.h> | ||
| 22 | #include <signal.h> | ||
| 23 | #include <sys/wait.h> | ||
| 24 | #include <linux/unistd.h> | ||
| 25 | #include <string.h> | ||
| 26 | #include <errno.h> | ||
| 27 | #include <assert.h> | ||
| 28 | #include <sys/time.h> | ||
| 29 | #include <sys/types.h> | ||
| 30 | |||
| 31 | #define LOOPS_DEFAULT 1000000 | ||
| 32 | static int loops = LOOPS_DEFAULT; | ||
| 33 | |||
| 34 | static const struct option options[] = { | ||
| 35 | OPT_INTEGER('l', "loop", &loops, | ||
| 36 | "Specify number of loops"), | ||
| 37 | OPT_END() | ||
| 38 | }; | ||
| 39 | |||
| 40 | static const char * const bench_sched_pipe_usage[] = { | ||
| 41 | "perf bench sched pipe <options>", | ||
| 42 | NULL | ||
| 43 | }; | ||
| 44 | |||
| 45 | int bench_sched_pipe(int argc, const char **argv, | ||
| 46 | const char *prefix __used) | ||
| 47 | { | ||
| 48 | int pipe_1[2], pipe_2[2]; | ||
| 49 | int m = 0, i; | ||
| 50 | struct timeval start, stop, diff; | ||
| 51 | unsigned long long result_usec = 0; | ||
| 52 | |||
| 53 | /* | ||
| 54 | * why does "ret" exist? | ||
| 55 | * discarding returned value of read(), write() | ||
| 56 | * causes error in building environment for perf | ||
| 57 | */ | ||
| 58 | int ret, wait_stat; | ||
| 59 | pid_t pid, retpid; | ||
| 60 | |||
| 61 | argc = parse_options(argc, argv, options, | ||
| 62 | bench_sched_pipe_usage, 0); | ||
| 63 | |||
| 64 | assert(!pipe(pipe_1)); | ||
| 65 | assert(!pipe(pipe_2)); | ||
| 66 | |||
| 67 | pid = fork(); | ||
| 68 | assert(pid >= 0); | ||
| 69 | |||
| 70 | gettimeofday(&start, NULL); | ||
| 71 | |||
| 72 | if (!pid) { | ||
| 73 | for (i = 0; i < loops; i++) { | ||
| 74 | ret = read(pipe_1[0], &m, sizeof(int)); | ||
| 75 | ret = write(pipe_2[1], &m, sizeof(int)); | ||
| 76 | } | ||
| 77 | } else { | ||
| 78 | for (i = 0; i < loops; i++) { | ||
| 79 | ret = write(pipe_1[1], &m, sizeof(int)); | ||
| 80 | ret = read(pipe_2[0], &m, sizeof(int)); | ||
| 81 | } | ||
| 82 | } | ||
| 83 | |||
| 84 | gettimeofday(&stop, NULL); | ||
| 85 | timersub(&stop, &start, &diff); | ||
| 86 | |||
| 87 | if (pid) { | ||
| 88 | retpid = waitpid(pid, &wait_stat, 0); | ||
| 89 | assert((retpid == pid) && WIFEXITED(wait_stat)); | ||
| 90 | return 0; | ||
| 91 | } | ||
| 92 | |||
| 93 | switch (bench_format) { | ||
| 94 | case BENCH_FORMAT_DEFAULT: | ||
| 95 | printf("# Extecuted %d pipe operations between two tasks\n\n", | ||
| 96 | loops); | ||
| 97 | |||
| 98 | result_usec = diff.tv_sec * 1000000; | ||
| 99 | result_usec += diff.tv_usec; | ||
| 100 | |||
| 101 | printf(" %14s: %lu.%03lu [sec]\n\n", "Total time", | ||
| 102 | diff.tv_sec, diff.tv_usec/1000); | ||
| 103 | |||
| 104 | printf(" %14lf usecs/op\n", | ||
| 105 | (double)result_usec / (double)loops); | ||
| 106 | printf(" %14d ops/sec\n", | ||
| 107 | (int)((double)loops / | ||
| 108 | ((double)result_usec / (double)1000000))); | ||
| 109 | break; | ||
| 110 | |||
| 111 | case BENCH_FORMAT_SIMPLE: | ||
| 112 | printf("%lu.%03lu\n", | ||
| 113 | diff.tv_sec, diff.tv_usec / 1000); | ||
| 114 | break; | ||
| 115 | |||
| 116 | default: | ||
| 117 | /* reaching here is something disaster */ | ||
| 118 | fprintf(stderr, "Unknown format:%d\n", bench_format); | ||
| 119 | exit(1); | ||
| 120 | break; | ||
| 121 | } | ||
| 122 | |||
| 123 | return 0; | ||
| 124 | } | ||
| 125 |
tools/perf/builtin-bench.c
| File was created | 1 | /* | |
| 2 | * | ||
| 3 | * builtin-bench.c | ||
| 4 | * | ||
| 5 | * General benchmarking subsystem provided by perf | ||
| 6 | * | ||
| 7 | * Copyright (C) 2009, Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp> | ||
| 8 | * | ||
| 9 | */ | ||
| 10 | |||
| 11 | /* | ||
| 12 | * | ||
| 13 | * Available subsystem list: | ||
| 14 | * sched ... scheduler and IPC mechanism | ||
| 15 | * | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include "perf.h" | ||
| 19 | #include "util/util.h" | ||
| 20 | #include "util/parse-options.h" | ||
| 21 | #include "builtin.h" | ||
| 22 | #include "bench/bench.h" | ||
| 23 | |||
| 24 | #include <stdio.h> | ||
| 25 | #include <stdlib.h> | ||
| 26 | #include <string.h> | ||
| 27 | |||
| 28 | struct bench_suite { | ||
| 29 | const char *name; | ||
| 30 | const char *summary; | ||
| 31 | int (*fn)(int, const char **, const char *); | ||
| 32 | }; | ||
| 33 | |||
| 34 | static struct bench_suite sched_suites[] = { | ||
| 35 | { "messaging", | ||
| 36 | "Benchmark for scheduler and IPC mechanisms", | ||
| 37 | bench_sched_messaging }, | ||
| 38 | { "pipe", | ||
| 39 | "Flood of communication over pipe() between two processes", | ||
| 40 | bench_sched_pipe }, | ||
| 41 | { NULL, | ||
| 42 | NULL, | ||
| 43 | NULL } | ||
| 44 | }; | ||
| 45 | |||
| 46 | struct bench_subsys { | ||
| 47 | const char *name; | ||
| 48 | const char *summary; | ||
| 49 | struct bench_suite *suites; | ||
| 50 | }; | ||
| 51 | |||
| 52 | static struct bench_subsys subsystems[] = { | ||
| 53 | { "sched", | ||
| 54 | "scheduler and IPC mechanism", | ||
| 55 | sched_suites }, | ||
| 56 | { NULL, | ||
| 57 | NULL, | ||
| 58 | NULL } | ||
| 59 | }; | ||
| 60 | |||
| 61 | static void dump_suites(int subsys_index) | ||
| 62 | { | ||
| 63 | int i; | ||
| 64 | |||
| 65 | printf("List of available suites for %s...\n\n", | ||
| 66 | subsystems[subsys_index].name); | ||
| 67 | |||
| 68 | for (i = 0; subsystems[subsys_index].suites[i].name; i++) | ||
| 69 | printf("\t%s: %s\n", | ||
| 70 | subsystems[subsys_index].suites[i].name, | ||
| 71 | subsystems[subsys_index].suites[i].summary); | ||
| 72 | |||
| 73 | printf("\n"); | ||
| 74 | return; | ||
| 75 | } | ||
| 76 | |||
| 77 | static char *bench_format_str; | ||
| 78 | int bench_format = BENCH_FORMAT_DEFAULT; | ||
| 79 | |||
| 80 | static const struct option bench_options[] = { | ||
| 81 | OPT_STRING('f', "format", &bench_format_str, "default", | ||
| 82 | "Specify format style"), | ||
| 83 | OPT_END() | ||
| 84 | }; | ||
| 85 | |||
| 86 | static const char * const bench_usage[] = { | ||
| 87 | "perf bench [<common options>] <subsystem> <suite> [<options>]", | ||
| 88 | NULL | ||
| 89 | }; | ||
| 90 | |||
| 91 | static void print_usage(void) | ||
| 92 | { | ||
| 93 | int i; | ||
| 94 | |||
| 95 | printf("Usage: \n"); | ||
| 96 | for (i = 0; bench_usage[i]; i++) | ||
| 97 | printf("\t%s\n", bench_usage[i]); | ||
| 98 | printf("\n"); | ||
| 99 | |||
| 100 | printf("List of available subsystems...\n\n"); | ||
| 101 | |||
| 102 | for (i = 0; subsystems[i].name; i++) | ||
| 103 | printf("\t%s: %s\n", | ||
| 104 | subsystems[i].name, subsystems[i].summary); | ||
| 105 | printf("\n"); | ||
| 106 | } | ||
| 107 | |||
| 108 | static int bench_str2int(char *str) | ||
| 109 | { | ||
| 110 | if (!str) | ||
| 111 | return BENCH_FORMAT_DEFAULT; | ||
| 112 | |||
| 113 | if (!strcmp(str, BENCH_FORMAT_DEFAULT_STR)) | ||
| 114 | return BENCH_FORMAT_DEFAULT; | ||
| 115 | else if (!strcmp(str, BENCH_FORMAT_SIMPLE_STR)) | ||
| 116 | return BENCH_FORMAT_SIMPLE; | ||
| 117 | |||
| 118 | return BENCH_FORMAT_UNKNOWN; | ||
| 119 | } | ||
| 120 | |||
| 121 | int cmd_bench(int argc, const char **argv, const char *prefix __used) | ||
| 122 | { | ||
| 123 | int i, j, status = 0; | ||
| 124 | |||
| 125 | if (argc < 2) { | ||
| 126 | /* No subsystem specified. */ | ||
| 127 | print_usage(); | ||
| 128 | goto end; | ||
| 129 | } | ||
| 130 | |||
| 131 | argc = parse_options(argc, argv, bench_options, bench_usage, | ||
| 132 | PARSE_OPT_STOP_AT_NON_OPTION); | ||
| 133 | |||
| 134 | bench_format = bench_str2int(bench_format_str); | ||
| 135 | if (bench_format == BENCH_FORMAT_UNKNOWN) { | ||
| 136 | printf("Unknown format descriptor:%s\n", bench_format_str); | ||
| 137 | goto end; | ||
| 138 | } | ||
| 139 | |||
| 140 | if (argc < 1) { | ||
| 141 | print_usage(); | ||
| 142 | goto end; | ||
| 143 | } | ||
| 144 | |||
| 145 | for (i = 0; subsystems[i].name; i++) { | ||
| 146 | if (strcmp(subsystems[i].name, argv[0])) | ||
| 147 | continue; | ||
| 148 | |||
| 149 | if (argc < 2) { | ||
| 150 | /* No suite specified. */ | ||
| 151 | dump_suites(i); | ||
| 152 | goto end; | ||
| 153 | } | ||
| 154 | |||
| 155 | for (j = 0; subsystems[i].suites[j].name; j++) { | ||
| 156 | if (strcmp(subsystems[i].suites[j].name, argv[1])) | ||
| 157 | continue; | ||
| 158 | |||
| 159 | if (bench_format == BENCH_FORMAT_DEFAULT) | ||
| 160 | printf("# Running %s/%s benchmark...\n", | ||
| 161 | subsystems[i].name, | ||
| 162 | subsystems[i].suites[j].name); | ||
| 163 | status = subsystems[i].suites[j].fn(argc - 1, | ||
| 164 | argv + 1, prefix); | ||
| 165 | goto end; | ||
| 166 | } | ||
| 167 | |||
| 168 | if (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")) { | ||
| 169 | dump_suites(i); | ||
| 170 | goto end; | ||
| 171 | } | ||
| 172 | |||
| 173 | printf("Unknown suite:%s for %s\n", argv[1], argv[0]); | ||
| 174 | status = 1; | ||
| 175 | goto end; | ||
| 176 | } | ||
| 177 | |||
| 178 | printf("Unknown subsystem:%s\n", argv[0]); | ||
| 179 | status = 1; | ||
| 180 | |||
| 181 | end: | ||
| 182 | return status; | ||
| 183 | } | ||
| 184 |
tools/perf/builtin.h
| 1 | #ifndef BUILTIN_H | 1 | #ifndef BUILTIN_H |
| 2 | #define BUILTIN_H | 2 | #define BUILTIN_H |
| 3 | 3 | ||
| 4 | #include "util/util.h" | 4 | #include "util/util.h" |
| 5 | #include "util/strbuf.h" | 5 | #include "util/strbuf.h" |
| 6 | 6 | ||
| 7 | extern const char perf_version_string[]; | 7 | extern const char perf_version_string[]; |
| 8 | extern const char perf_usage_string[]; | 8 | extern const char perf_usage_string[]; |
| 9 | extern const char perf_more_info_string[]; | 9 | extern const char perf_more_info_string[]; |
| 10 | 10 | ||
| 11 | extern void list_common_cmds_help(void); | 11 | extern void list_common_cmds_help(void); |
| 12 | extern const char *help_unknown_cmd(const char *cmd); | 12 | extern const char *help_unknown_cmd(const char *cmd); |
| 13 | extern void prune_packed_objects(int); | 13 | extern void prune_packed_objects(int); |
| 14 | extern int read_line_with_nul(char *buf, int size, FILE *file); | 14 | extern int read_line_with_nul(char *buf, int size, FILE *file); |
| 15 | extern int check_pager_config(const char *cmd); | 15 | extern int check_pager_config(const char *cmd); |
| 16 | 16 | ||
| 17 | extern int cmd_annotate(int argc, const char **argv, const char *prefix); | 17 | extern int cmd_annotate(int argc, const char **argv, const char *prefix); |
| 18 | extern int cmd_bench(int argc, const char **argv, const char *prefix); | ||
| 18 | extern int cmd_help(int argc, const char **argv, const char *prefix); | 19 | extern int cmd_help(int argc, const char **argv, const char *prefix); |
| 19 | extern int cmd_sched(int argc, const char **argv, const char *prefix); | 20 | extern int cmd_sched(int argc, const char **argv, const char *prefix); |
| 20 | extern int cmd_list(int argc, const char **argv, const char *prefix); | 21 | extern int cmd_list(int argc, const char **argv, const char *prefix); |
| 21 | extern int cmd_record(int argc, const char **argv, const char *prefix); | 22 | extern int cmd_record(int argc, const char **argv, const char *prefix); |
| 22 | extern int cmd_report(int argc, const char **argv, const char *prefix); | 23 | extern int cmd_report(int argc, const char **argv, const char *prefix); |
| 23 | extern int cmd_stat(int argc, const char **argv, const char *prefix); | 24 | extern int cmd_stat(int argc, const char **argv, const char *prefix); |
| 24 | extern int cmd_timechart(int argc, const char **argv, const char *prefix); | 25 | extern int cmd_timechart(int argc, const char **argv, const char *prefix); |
| 25 | extern int cmd_top(int argc, const char **argv, const char *prefix); | 26 | extern int cmd_top(int argc, const char **argv, const char *prefix); |
| 26 | extern int cmd_trace(int argc, const char **argv, const char *prefix); | 27 | extern int cmd_trace(int argc, const char **argv, const char *prefix); |
| 27 | extern int cmd_version(int argc, const char **argv, const char *prefix); | 28 | extern int cmd_version(int argc, const char **argv, const char *prefix); |
| 28 | 29 | ||
| 29 | #endif | 30 | #endif |
| 30 | 31 |
tools/perf/command-list.txt
| 1 | # | 1 | # |
| 2 | # List of known perf commands. | 2 | # List of known perf commands. |
| 3 | # command name category [deprecated] [common] | 3 | # command name category [deprecated] [common] |
| 4 | # | 4 | # |
| 5 | perf-annotate mainporcelain common | 5 | perf-annotate mainporcelain common |
| 6 | perf-bench mainporcelain common | ||
| 6 | perf-list mainporcelain common | 7 | perf-list mainporcelain common |
| 7 | perf-sched mainporcelain common | 8 | perf-sched mainporcelain common |
| 8 | perf-record mainporcelain common | 9 | perf-record mainporcelain common |
| 9 | perf-report mainporcelain common | 10 | perf-report mainporcelain common |
| 10 | perf-stat mainporcelain common | 11 | perf-stat mainporcelain common |
| 11 | perf-timechart mainporcelain common | 12 | perf-timechart mainporcelain common |
| 12 | perf-top mainporcelain common | 13 | perf-top mainporcelain common |
| 13 | perf-trace mainporcelain common | 14 | perf-trace mainporcelain common |
| 14 | 15 |
tools/perf/design.txt
| 1 | 1 | ||
| 2 | Performance Counters for Linux | 2 | Performance Counters for Linux |
| 3 | ------------------------------ | 3 | ------------------------------ |
| 4 | 4 | ||
| 5 | Performance counters are special hardware registers available on most modern | 5 | Performance counters are special hardware registers available on most modern |
| 6 | CPUs. These registers count the number of certain types of hw events: such | 6 | CPUs. These registers count the number of certain types of hw events: such |
| 7 | as instructions executed, cachemisses suffered, or branches mis-predicted - | 7 | as instructions executed, cachemisses suffered, or branches mis-predicted - |
| 8 | without slowing down the kernel or applications. These registers can also | 8 | without slowing down the kernel or applications. These registers can also |
| 9 | trigger interrupts when a threshold number of events have passed - and can | 9 | trigger interrupts when a threshold number of events have passed - and can |
| 10 | thus be used to profile the code that runs on that CPU. | 10 | thus be used to profile the code that runs on that CPU. |
| 11 | 11 | ||
| 12 | The Linux Performance Counter subsystem provides an abstraction of these | 12 | The Linux Performance Counter subsystem provides an abstraction of these |
| 13 | hardware capabilities. It provides per task and per CPU counters, counter | 13 | hardware capabilities. It provides per task and per CPU counters, counter |
| 14 | groups, and it provides event capabilities on top of those. It | 14 | groups, and it provides event capabilities on top of those. It |
| 15 | provides "virtual" 64-bit counters, regardless of the width of the | 15 | provides "virtual" 64-bit counters, regardless of the width of the |
| 16 | underlying hardware counters. | 16 | underlying hardware counters. |
| 17 | 17 | ||
| 18 | Performance counters are accessed via special file descriptors. | 18 | Performance counters are accessed via special file descriptors. |
| 19 | There's one file descriptor per virtual counter used. | 19 | There's one file descriptor per virtual counter used. |
| 20 | 20 | ||
| 21 | The special file descriptor is opened via the perf_event_open() | 21 | The special file descriptor is opened via the perf_event_open() |
| 22 | system call: | 22 | system call: |
| 23 | 23 | ||
| 24 | int sys_perf_event_open(struct perf_event_hw_event *hw_event_uptr, | 24 | int sys_perf_event_open(struct perf_event_hw_event *hw_event_uptr, |
| 25 | pid_t pid, int cpu, int group_fd, | 25 | pid_t pid, int cpu, int group_fd, |
| 26 | unsigned long flags); | 26 | unsigned long flags); |
| 27 | 27 | ||
| 28 | The syscall returns the new fd. The fd can be used via the normal | 28 | The syscall returns the new fd. The fd can be used via the normal |
| 29 | VFS system calls: read() can be used to read the counter, fcntl() | 29 | VFS system calls: read() can be used to read the counter, fcntl() |
| 30 | can be used to set the blocking mode, etc. | 30 | can be used to set the blocking mode, etc. |
| 31 | 31 | ||
| 32 | Multiple counters can be kept open at a time, and the counters | 32 | Multiple counters can be kept open at a time, and the counters |
| 33 | can be poll()ed. | 33 | can be poll()ed. |
| 34 | 34 | ||
| 35 | When creating a new counter fd, 'perf_event_hw_event' is: | 35 | When creating a new counter fd, 'perf_event_hw_event' is: |
| 36 | 36 | ||
| 37 | struct perf_event_hw_event { | 37 | struct perf_event_hw_event { |
| 38 | /* | 38 | /* |
| 39 | * The MSB of the config word signifies if the rest contains cpu | 39 | * The MSB of the config word signifies if the rest contains cpu |
| 40 | * specific (raw) counter configuration data, if unset, the next | 40 | * specific (raw) counter configuration data, if unset, the next |
| 41 | * 7 bits are an event type and the rest of the bits are the event | 41 | * 7 bits are an event type and the rest of the bits are the event |
| 42 | * identifier. | 42 | * identifier. |
| 43 | */ | 43 | */ |
| 44 | __u64 config; | 44 | __u64 config; |
| 45 | 45 | ||
| 46 | __u64 irq_period; | 46 | __u64 irq_period; |
| 47 | __u32 record_type; | 47 | __u32 record_type; |
| 48 | __u32 read_format; | 48 | __u32 read_format; |
| 49 | 49 | ||
| 50 | __u64 disabled : 1, /* off by default */ | 50 | __u64 disabled : 1, /* off by default */ |
| 51 | inherit : 1, /* children inherit it */ | 51 | inherit : 1, /* children inherit it */ |
| 52 | pinned : 1, /* must always be on PMU */ | 52 | pinned : 1, /* must always be on PMU */ |
| 53 | exclusive : 1, /* only group on PMU */ | 53 | exclusive : 1, /* only group on PMU */ |
| 54 | exclude_user : 1, /* don't count user */ | 54 | exclude_user : 1, /* don't count user */ |
| 55 | exclude_kernel : 1, /* ditto kernel */ | 55 | exclude_kernel : 1, /* ditto kernel */ |
| 56 | exclude_hv : 1, /* ditto hypervisor */ | 56 | exclude_hv : 1, /* ditto hypervisor */ |
| 57 | exclude_idle : 1, /* don't count when idle */ | 57 | exclude_idle : 1, /* don't count when idle */ |
| 58 | mmap : 1, /* include mmap data */ | 58 | mmap : 1, /* include mmap data */ |
| 59 | munmap : 1, /* include munmap data */ | 59 | munmap : 1, /* include munmap data */ |
| 60 | comm : 1, /* include comm data */ | 60 | comm : 1, /* include comm data */ |
| 61 | 61 | ||
| 62 | __reserved_1 : 52; | 62 | __reserved_1 : 52; |
| 63 | 63 | ||
| 64 | __u32 extra_config_len; | 64 | __u32 extra_config_len; |
| 65 | __u32 wakeup_events; /* wakeup every n events */ | 65 | __u32 wakeup_events; /* wakeup every n events */ |
| 66 | 66 | ||
| 67 | __u64 __reserved_2; | 67 | __u64 __reserved_2; |
| 68 | __u64 __reserved_3; | 68 | __u64 __reserved_3; |
| 69 | }; | 69 | }; |
| 70 | 70 | ||
| 71 | The 'config' field specifies what the counter should count. It | 71 | The 'config' field specifies what the counter should count. It |
| 72 | is divided into 3 bit-fields: | 72 | is divided into 3 bit-fields: |
| 73 | 73 | ||
| 74 | raw_type: 1 bit (most significant bit) 0x8000_0000_0000_0000 | 74 | raw_type: 1 bit (most significant bit) 0x8000_0000_0000_0000 |
| 75 | type: 7 bits (next most significant) 0x7f00_0000_0000_0000 | 75 | type: 7 bits (next most significant) 0x7f00_0000_0000_0000 |
| 76 | event_id: 56 bits (least significant) 0x00ff_ffff_ffff_ffff | 76 | event_id: 56 bits (least significant) 0x00ff_ffff_ffff_ffff |
| 77 | 77 | ||
| 78 | If 'raw_type' is 1, then the counter will count a hardware event | 78 | If 'raw_type' is 1, then the counter will count a hardware event |
| 79 | specified by the remaining 63 bits of event_config. The encoding is | 79 | specified by the remaining 63 bits of event_config. The encoding is |
| 80 | machine-specific. | 80 | machine-specific. |
| 81 | 81 | ||
| 82 | If 'raw_type' is 0, then the 'type' field says what kind of counter | 82 | If 'raw_type' is 0, then the 'type' field says what kind of counter |
| 83 | this is, with the following encoding: | 83 | this is, with the following encoding: |
| 84 | 84 | ||
| 85 | enum perf_event_types { | 85 | enum perf_event_types { |
| 86 | PERF_TYPE_HARDWARE = 0, | 86 | PERF_TYPE_HARDWARE = 0, |
| 87 | PERF_TYPE_SOFTWARE = 1, | 87 | PERF_TYPE_SOFTWARE = 1, |
| 88 | PERF_TYPE_TRACEPOINT = 2, | 88 | PERF_TYPE_TRACEPOINT = 2, |
| 89 | }; | 89 | }; |
| 90 | 90 | ||
| 91 | A counter of PERF_TYPE_HARDWARE will count the hardware event | 91 | A counter of PERF_TYPE_HARDWARE will count the hardware event |
| 92 | specified by 'event_id': | 92 | specified by 'event_id': |
| 93 | 93 | ||
| 94 | /* | 94 | /* |
| 95 | * Generalized performance counter event types, used by the hw_event.event_id | 95 | * Generalized performance counter event types, used by the hw_event.event_id |
| 96 | * parameter of the sys_perf_event_open() syscall: | 96 | * parameter of the sys_perf_event_open() syscall: |
| 97 | */ | 97 | */ |
| 98 | enum hw_event_ids { | 98 | enum hw_event_ids { |
| 99 | /* | 99 | /* |
| 100 | * Common hardware events, generalized by the kernel: | 100 | * Common hardware events, generalized by the kernel: |
| 101 | */ | 101 | */ |
| 102 | PERF_COUNT_HW_CPU_CYCLES = 0, | 102 | PERF_COUNT_HW_CPU_CYCLES = 0, |
| 103 | PERF_COUNT_HW_INSTRUCTIONS = 1, | 103 | PERF_COUNT_HW_INSTRUCTIONS = 1, |
| 104 | PERF_COUNT_HW_CACHE_REFERENCES = 2, | 104 | PERF_COUNT_HW_CACHE_REFERENCES = 2, |
| 105 | PERF_COUNT_HW_CACHE_MISSES = 3, | 105 | PERF_COUNT_HW_CACHE_MISSES = 3, |
| 106 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, | 106 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, |
| 107 | PERF_COUNT_HW_BRANCH_MISSES = 5, | 107 | PERF_COUNT_HW_BRANCH_MISSES = 5, |
| 108 | PERF_COUNT_HW_BUS_CYCLES = 6, | 108 | PERF_COUNT_HW_BUS_CYCLES = 6, |
| 109 | }; | 109 | }; |
| 110 | 110 | ||
| 111 | These are standardized types of events that work relatively uniformly | 111 | These are standardized types of events that work relatively uniformly |
| 112 | on all CPUs that implement Performance Counters support under Linux, | 112 | on all CPUs that implement Performance Counters support under Linux, |
| 113 | although there may be variations (e.g., different CPUs might count | 113 | although there may be variations (e.g., different CPUs might count |
| 114 | cache references and misses at different levels of the cache hierarchy). | 114 | cache references and misses at different levels of the cache hierarchy). |
| 115 | If a CPU is not able to count the selected event, then the system call | 115 | If a CPU is not able to count the selected event, then the system call |
| 116 | will return -EINVAL. | 116 | will return -EINVAL. |
| 117 | 117 | ||
| 118 | More hw_event_types are supported as well, but they are CPU-specific | 118 | More hw_event_types are supported as well, but they are CPU-specific |
| 119 | and accessed as raw events. For example, to count "External bus | 119 | and accessed as raw events. For example, to count "External bus |
| 120 | cycles while bus lock signal asserted" events on Intel Core CPUs, pass | 120 | cycles while bus lock signal asserted" events on Intel Core CPUs, pass |
| 121 | in a 0x4064 event_id value and set hw_event.raw_type to 1. | 121 | in a 0x4064 event_id value and set hw_event.raw_type to 1. |
| 122 | 122 | ||
| 123 | A counter of type PERF_TYPE_SOFTWARE will count one of the available | 123 | A counter of type PERF_TYPE_SOFTWARE will count one of the available |
| 124 | software events, selected by 'event_id': | 124 | software events, selected by 'event_id': |
| 125 | 125 | ||
| 126 | /* | 126 | /* |
| 127 | * Special "software" counters provided by the kernel, even if the hardware | 127 | * Special "software" counters provided by the kernel, even if the hardware |
| 128 | * does not support performance counters. These counters measure various | 128 | * does not support performance counters. These counters measure various |
| 129 | * physical and sw events of the kernel (and allow the profiling of them as | 129 | * physical and sw events of the kernel (and allow the profiling of them as |
| 130 | * well): | 130 | * well): |
| 131 | */ | 131 | */ |
| 132 | enum sw_event_ids { | 132 | enum sw_event_ids { |
| 133 | PERF_COUNT_SW_CPU_CLOCK = 0, | 133 | PERF_COUNT_SW_CPU_CLOCK = 0, |
| 134 | PERF_COUNT_SW_TASK_CLOCK = 1, | 134 | PERF_COUNT_SW_TASK_CLOCK = 1, |
| 135 | PERF_COUNT_SW_PAGE_FAULTS = 2, | 135 | PERF_COUNT_SW_PAGE_FAULTS = 2, |
| 136 | PERF_COUNT_SW_CONTEXT_SWITCHES = 3, | 136 | PERF_COUNT_SW_CONTEXT_SWITCHES = 3, |
| 137 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, | 137 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, |
| 138 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, | 138 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, |
| 139 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, | 139 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, |
| 140 | PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, | ||
| 141 | PERF_COUNT_SW_EMULATION_FAULTS = 8, | ||
| 140 | }; | 142 | }; |
| 141 | 143 | ||
| 142 | Counters of the type PERF_TYPE_TRACEPOINT are available when the ftrace event | 144 | Counters of the type PERF_TYPE_TRACEPOINT are available when the ftrace event |
| 143 | tracer is available, and event_id values can be obtained from | 145 | tracer is available, and event_id values can be obtained from |
| 144 | /debug/tracing/events/*/*/id | 146 | /debug/tracing/events/*/*/id |
| 145 | 147 | ||
| 146 | 148 | ||
| 147 | Counters come in two flavours: counting counters and sampling | 149 | Counters come in two flavours: counting counters and sampling |
| 148 | counters. A "counting" counter is one that is used for counting the | 150 | counters. A "counting" counter is one that is used for counting the |
| 149 | number of events that occur, and is characterised by having | 151 | number of events that occur, and is characterised by having |
| 150 | irq_period = 0. | 152 | irq_period = 0. |
| 151 | 153 | ||
| 152 | 154 | ||
| 153 | A read() on a counter returns the current value of the counter and possible | 155 | A read() on a counter returns the current value of the counter and possible |
| 154 | additional values as specified by 'read_format', each value is a u64 (8 bytes) | 156 | additional values as specified by 'read_format', each value is a u64 (8 bytes) |
| 155 | in size. | 157 | in size. |
| 156 | 158 | ||
| 157 | /* | 159 | /* |
| 158 | * Bits that can be set in hw_event.read_format to request that | 160 | * Bits that can be set in hw_event.read_format to request that |
| 159 | * reads on the counter should return the indicated quantities, | 161 | * reads on the counter should return the indicated quantities, |
| 160 | * in increasing order of bit value, after the counter value. | 162 | * in increasing order of bit value, after the counter value. |
| 161 | */ | 163 | */ |
| 162 | enum perf_event_read_format { | 164 | enum perf_event_read_format { |
| 163 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1, | 165 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1, |
| 164 | PERF_FORMAT_TOTAL_TIME_RUNNING = 2, | 166 | PERF_FORMAT_TOTAL_TIME_RUNNING = 2, |
| 165 | }; | 167 | }; |
| 166 | 168 | ||
| 167 | Using these additional values one can establish the overcommit ratio for a | 169 | Using these additional values one can establish the overcommit ratio for a |
| 168 | particular counter allowing one to take the round-robin scheduling effect | 170 | particular counter allowing one to take the round-robin scheduling effect |
| 169 | into account. | 171 | into account. |
| 170 | 172 | ||
| 171 | 173 | ||
| 172 | A "sampling" counter is one that is set up to generate an interrupt | 174 | A "sampling" counter is one that is set up to generate an interrupt |
| 173 | every N events, where N is given by 'irq_period'. A sampling counter | 175 | every N events, where N is given by 'irq_period'. A sampling counter |
| 174 | has irq_period > 0. The record_type controls what data is recorded on each | 176 | has irq_period > 0. The record_type controls what data is recorded on each |
| 175 | interrupt: | 177 | interrupt: |
| 176 | 178 | ||
| 177 | /* | 179 | /* |
| 178 | * Bits that can be set in hw_event.record_type to request information | 180 | * Bits that can be set in hw_event.record_type to request information |
| 179 | * in the overflow packets. | 181 | * in the overflow packets. |
| 180 | */ | 182 | */ |
| 181 | enum perf_event_record_format { | 183 | enum perf_event_record_format { |
| 182 | PERF_RECORD_IP = 1U << 0, | 184 | PERF_RECORD_IP = 1U << 0, |
| 183 | PERF_RECORD_TID = 1U << 1, | 185 | PERF_RECORD_TID = 1U << 1, |
| 184 | PERF_RECORD_TIME = 1U << 2, | 186 | PERF_RECORD_TIME = 1U << 2, |
| 185 | PERF_RECORD_ADDR = 1U << 3, | 187 | PERF_RECORD_ADDR = 1U << 3, |
| 186 | PERF_RECORD_GROUP = 1U << 4, | 188 | PERF_RECORD_GROUP = 1U << 4, |
| 187 | PERF_RECORD_CALLCHAIN = 1U << 5, | 189 | PERF_RECORD_CALLCHAIN = 1U << 5, |
| 188 | }; | 190 | }; |
| 189 | 191 | ||
| 190 | Such (and other) events will be recorded in a ring-buffer, which is | 192 | Such (and other) events will be recorded in a ring-buffer, which is |
| 191 | available to user-space using mmap() (see below). | 193 | available to user-space using mmap() (see below). |
| 192 | 194 | ||
| 193 | The 'disabled' bit specifies whether the counter starts out disabled | 195 | The 'disabled' bit specifies whether the counter starts out disabled |
| 194 | or enabled. If it is initially disabled, it can be enabled by ioctl | 196 | or enabled. If it is initially disabled, it can be enabled by ioctl |
| 195 | or prctl (see below). | 197 | or prctl (see below). |
| 196 | 198 | ||
| 197 | The 'inherit' bit, if set, specifies that this counter should count | 199 | The 'inherit' bit, if set, specifies that this counter should count |
| 198 | events on descendant tasks as well as the task specified. This only | 200 | events on descendant tasks as well as the task specified. This only |
| 199 | applies to new descendents, not to any existing descendents at the | 201 | applies to new descendents, not to any existing descendents at the |
| 200 | time the counter is created (nor to any new descendents of existing | 202 | time the counter is created (nor to any new descendents of existing |
| 201 | descendents). | 203 | descendents). |
| 202 | 204 | ||
| 203 | The 'pinned' bit, if set, specifies that the counter should always be | 205 | The 'pinned' bit, if set, specifies that the counter should always be |
| 204 | on the CPU if at all possible. It only applies to hardware counters | 206 | on the CPU if at all possible. It only applies to hardware counters |
| 205 | and only to group leaders. If a pinned counter cannot be put onto the | 207 | and only to group leaders. If a pinned counter cannot be put onto the |
| 206 | CPU (e.g. because there are not enough hardware counters or because of | 208 | CPU (e.g. because there are not enough hardware counters or because of |
| 207 | a conflict with some other event), then the counter goes into an | 209 | a conflict with some other event), then the counter goes into an |
| 208 | 'error' state, where reads return end-of-file (i.e. read() returns 0) | 210 | 'error' state, where reads return end-of-file (i.e. read() returns 0) |
| 209 | until the counter is subsequently enabled or disabled. | 211 | until the counter is subsequently enabled or disabled. |
| 210 | 212 | ||
| 211 | The 'exclusive' bit, if set, specifies that when this counter's group | 213 | The 'exclusive' bit, if set, specifies that when this counter's group |
| 212 | is on the CPU, it should be the only group using the CPU's counters. | 214 | is on the CPU, it should be the only group using the CPU's counters. |
| 213 | In future, this will allow sophisticated monitoring programs to supply | 215 | In future, this will allow sophisticated monitoring programs to supply |
| 214 | extra configuration information via 'extra_config_len' to exploit | 216 | extra configuration information via 'extra_config_len' to exploit |
| 215 | advanced features of the CPU's Performance Monitor Unit (PMU) that are | 217 | advanced features of the CPU's Performance Monitor Unit (PMU) that are |
| 216 | not otherwise accessible and that might disrupt other hardware | 218 | not otherwise accessible and that might disrupt other hardware |
| 217 | counters. | 219 | counters. |
| 218 | 220 | ||
| 219 | The 'exclude_user', 'exclude_kernel' and 'exclude_hv' bits provide a | 221 | The 'exclude_user', 'exclude_kernel' and 'exclude_hv' bits provide a |
| 220 | way to request that counting of events be restricted to times when the | 222 | way to request that counting of events be restricted to times when the |
| 221 | CPU is in user, kernel and/or hypervisor mode. | 223 | CPU is in user, kernel and/or hypervisor mode. |
| 222 | 224 | ||
| 223 | The 'mmap' and 'munmap' bits allow recording of PROT_EXEC mmap/munmap | 225 | The 'mmap' and 'munmap' bits allow recording of PROT_EXEC mmap/munmap |
| 224 | operations, these can be used to relate userspace IP addresses to actual | 226 | operations, these can be used to relate userspace IP addresses to actual |
| 225 | code, even after the mapping (or even the whole process) is gone, | 227 | code, even after the mapping (or even the whole process) is gone, |
| 226 | these events are recorded in the ring-buffer (see below). | 228 | these events are recorded in the ring-buffer (see below). |
| 227 | 229 | ||
| 228 | The 'comm' bit allows tracking of process comm data on process creation. | 230 | The 'comm' bit allows tracking of process comm data on process creation. |
| 229 | This too is recorded in the ring-buffer (see below). | 231 | This too is recorded in the ring-buffer (see below). |
| 230 | 232 | ||
| 231 | The 'pid' parameter to the perf_event_open() system call allows the | 233 | The 'pid' parameter to the perf_event_open() system call allows the |
| 232 | counter to be specific to a task: | 234 | counter to be specific to a task: |
| 233 | 235 | ||
| 234 | pid == 0: if the pid parameter is zero, the counter is attached to the | 236 | pid == 0: if the pid parameter is zero, the counter is attached to the |
| 235 | current task. | 237 | current task. |
| 236 | 238 | ||
| 237 | pid > 0: the counter is attached to a specific task (if the current task | 239 | pid > 0: the counter is attached to a specific task (if the current task |
| 238 | has sufficient privilege to do so) | 240 | has sufficient privilege to do so) |
| 239 | 241 | ||
| 240 | pid < 0: all tasks are counted (per cpu counters) | 242 | pid < 0: all tasks are counted (per cpu counters) |
| 241 | 243 | ||
| 242 | The 'cpu' parameter allows a counter to be made specific to a CPU: | 244 | The 'cpu' parameter allows a counter to be made specific to a CPU: |
| 243 | 245 | ||
| 244 | cpu >= 0: the counter is restricted to a specific CPU | 246 | cpu >= 0: the counter is restricted to a specific CPU |
| 245 | cpu == -1: the counter counts on all CPUs | 247 | cpu == -1: the counter counts on all CPUs |
| 246 | 248 | ||
| 247 | (Note: the combination of 'pid == -1' and 'cpu == -1' is not valid.) | 249 | (Note: the combination of 'pid == -1' and 'cpu == -1' is not valid.) |
| 248 | 250 | ||
| 249 | A 'pid > 0' and 'cpu == -1' counter is a per task counter that counts | 251 | A 'pid > 0' and 'cpu == -1' counter is a per task counter that counts |
| 250 | events of that task and 'follows' that task to whatever CPU the task | 252 | events of that task and 'follows' that task to whatever CPU the task |
| 251 | gets schedule to. Per task counters can be created by any user, for | 253 | gets schedule to. Per task counters can be created by any user, for |
| 252 | their own tasks. | 254 | their own tasks. |
| 253 | 255 | ||
| 254 | A 'pid == -1' and 'cpu == x' counter is a per CPU counter that counts | 256 | A 'pid == -1' and 'cpu == x' counter is a per CPU counter that counts |
| 255 | all events on CPU-x. Per CPU counters need CAP_SYS_ADMIN privilege. | 257 | all events on CPU-x. Per CPU counters need CAP_SYS_ADMIN privilege. |
| 256 | 258 | ||
| 257 | The 'flags' parameter is currently unused and must be zero. | 259 | The 'flags' parameter is currently unused and must be zero. |
| 258 | 260 | ||
| 259 | The 'group_fd' parameter allows counter "groups" to be set up. A | 261 | The 'group_fd' parameter allows counter "groups" to be set up. A |
| 260 | counter group has one counter which is the group "leader". The leader | 262 | counter group has one counter which is the group "leader". The leader |
| 261 | is created first, with group_fd = -1 in the perf_event_open call | 263 | is created first, with group_fd = -1 in the perf_event_open call |
| 262 | that creates it. The rest of the group members are created | 264 | that creates it. The rest of the group members are created |
| 263 | subsequently, with group_fd giving the fd of the group leader. | 265 | subsequently, with group_fd giving the fd of the group leader. |
| 264 | (A single counter on its own is created with group_fd = -1 and is | 266 | (A single counter on its own is created with group_fd = -1 and is |
| 265 | considered to be a group with only 1 member.) | 267 | considered to be a group with only 1 member.) |
| 266 | 268 | ||
| 267 | A counter group is scheduled onto the CPU as a unit, that is, it will | 269 | A counter group is scheduled onto the CPU as a unit, that is, it will |
| 268 | only be put onto the CPU if all of the counters in the group can be | 270 | only be put onto the CPU if all of the counters in the group can be |
| 269 | put onto the CPU. This means that the values of the member counters | 271 | put onto the CPU. This means that the values of the member counters |
| 270 | can be meaningfully compared, added, divided (to get ratios), etc., | 272 | can be meaningfully compared, added, divided (to get ratios), etc., |
| 271 | with each other, since they have counted events for the same set of | 273 | with each other, since they have counted events for the same set of |
| 272 | executed instructions. | 274 | executed instructions. |
| 273 | 275 | ||
| 274 | 276 | ||
| 275 | Like stated, asynchronous events, like counter overflow or PROT_EXEC mmap | 277 | Like stated, asynchronous events, like counter overflow or PROT_EXEC mmap |
| 276 | tracking are logged into a ring-buffer. This ring-buffer is created and | 278 | tracking are logged into a ring-buffer. This ring-buffer is created and |
| 277 | accessed through mmap(). | 279 | accessed through mmap(). |
| 278 | 280 | ||
| 279 | The mmap size should be 1+2^n pages, where the first page is a meta-data page | 281 | The mmap size should be 1+2^n pages, where the first page is a meta-data page |
| 280 | (struct perf_event_mmap_page) that contains various bits of information such | 282 | (struct perf_event_mmap_page) that contains various bits of information such |
| 281 | as where the ring-buffer head is. | 283 | as where the ring-buffer head is. |
| 282 | 284 | ||
| 283 | /* | 285 | /* |
| 284 | * Structure of the page that can be mapped via mmap | 286 | * Structure of the page that can be mapped via mmap |
| 285 | */ | 287 | */ |
| 286 | struct perf_event_mmap_page { | 288 | struct perf_event_mmap_page { |
| 287 | __u32 version; /* version number of this structure */ | 289 | __u32 version; /* version number of this structure */ |
| 288 | __u32 compat_version; /* lowest version this is compat with */ | 290 | __u32 compat_version; /* lowest version this is compat with */ |
| 289 | 291 | ||
| 290 | /* | 292 | /* |
| 291 | * Bits needed to read the hw counters in user-space. | 293 | * Bits needed to read the hw counters in user-space. |
| 292 | * | 294 | * |
| 293 | * u32 seq; | 295 | * u32 seq; |
| 294 | * s64 count; | 296 | * s64 count; |
| 295 | * | 297 | * |
| 296 | * do { | 298 | * do { |
| 297 | * seq = pc->lock; | 299 | * seq = pc->lock; |
| 298 | * | 300 | * |
| 299 | * barrier() | 301 | * barrier() |
| 300 | * if (pc->index) { | 302 | * if (pc->index) { |
| 301 | * count = pmc_read(pc->index - 1); | 303 | * count = pmc_read(pc->index - 1); |
| 302 | * count += pc->offset; | 304 | * count += pc->offset; |
| 303 | * } else | 305 | * } else |
| 304 | * goto regular_read; | 306 | * goto regular_read; |
| 305 | * | 307 | * |
| 306 | * barrier(); | 308 | * barrier(); |
| 307 | * } while (pc->lock != seq); | 309 | * } while (pc->lock != seq); |
| 308 | * | 310 | * |
| 309 | * NOTE: for obvious reason this only works on self-monitoring | 311 | * NOTE: for obvious reason this only works on self-monitoring |
| 310 | * processes. | 312 | * processes. |
| 311 | */ | 313 | */ |
| 312 | __u32 lock; /* seqlock for synchronization */ | 314 | __u32 lock; /* seqlock for synchronization */ |
| 313 | __u32 index; /* hardware counter identifier */ | 315 | __u32 index; /* hardware counter identifier */ |
| 314 | __s64 offset; /* add to hardware counter value */ | 316 | __s64 offset; /* add to hardware counter value */ |
| 315 | 317 | ||
| 316 | /* | 318 | /* |
| 317 | * Control data for the mmap() data buffer. | 319 | * Control data for the mmap() data buffer. |
| 318 | * | 320 | * |
| 319 | * User-space reading this value should issue an rmb(), on SMP capable | 321 | * User-space reading this value should issue an rmb(), on SMP capable |
| 320 | * platforms, after reading this value -- see perf_event_wakeup(). | 322 | * platforms, after reading this value -- see perf_event_wakeup(). |
| 321 | */ | 323 | */ |
| 322 | __u32 data_head; /* head in the data section */ | 324 | __u32 data_head; /* head in the data section */ |
| 323 | }; | 325 | }; |
| 324 | 326 | ||
| 325 | NOTE: the hw-counter userspace bits are arch specific and are currently only | 327 | NOTE: the hw-counter userspace bits are arch specific and are currently only |
| 326 | implemented on powerpc. | 328 | implemented on powerpc. |
| 327 | 329 | ||
| 328 | The following 2^n pages are the ring-buffer which contains events of the form: | 330 | The following 2^n pages are the ring-buffer which contains events of the form: |
| 329 | 331 | ||
| 330 | #define PERF_RECORD_MISC_KERNEL (1 << 0) | 332 | #define PERF_RECORD_MISC_KERNEL (1 << 0) |
| 331 | #define PERF_RECORD_MISC_USER (1 << 1) | 333 | #define PERF_RECORD_MISC_USER (1 << 1) |
| 332 | #define PERF_RECORD_MISC_OVERFLOW (1 << 2) | 334 | #define PERF_RECORD_MISC_OVERFLOW (1 << 2) |
| 333 | 335 | ||
| 334 | struct perf_event_header { | 336 | struct perf_event_header { |
| 335 | __u32 type; | 337 | __u32 type; |
| 336 | __u16 misc; | 338 | __u16 misc; |
| 337 | __u16 size; | 339 | __u16 size; |
| 338 | }; | 340 | }; |
| 339 | 341 | ||
| 340 | enum perf_event_type { | 342 | enum perf_event_type { |
| 341 | 343 | ||
| 342 | /* | 344 | /* |
| 343 | * The MMAP events record the PROT_EXEC mappings so that we can | 345 | * The MMAP events record the PROT_EXEC mappings so that we can |
| 344 | * correlate userspace IPs to code. They have the following structure: | 346 | * correlate userspace IPs to code. They have the following structure: |
| 345 | * | 347 | * |
| 346 | * struct { | 348 | * struct { |
| 347 | * struct perf_event_header header; | 349 | * struct perf_event_header header; |
| 348 | * | 350 | * |
| 349 | * u32 pid, tid; | 351 | * u32 pid, tid; |
| 350 | * u64 addr; | 352 | * u64 addr; |
| 351 | * u64 len; | 353 | * u64 len; |
| 352 | * u64 pgoff; | 354 | * u64 pgoff; |
| 353 | * char filename[]; | 355 | * char filename[]; |
| 354 | * }; | 356 | * }; |
| 355 | */ | 357 | */ |
| 356 | PERF_RECORD_MMAP = 1, | 358 | PERF_RECORD_MMAP = 1, |
| 357 | PERF_RECORD_MUNMAP = 2, | 359 | PERF_RECORD_MUNMAP = 2, |
| 358 | 360 | ||
| 359 | /* | 361 | /* |
| 360 | * struct { | 362 | * struct { |
| 361 | * struct perf_event_header header; | 363 | * struct perf_event_header header; |
| 362 | * | 364 | * |
| 363 | * u32 pid, tid; | 365 | * u32 pid, tid; |
| 364 | * char comm[]; | 366 | * char comm[]; |
| 365 | * }; | 367 | * }; |
| 366 | */ | 368 | */ |
| 367 | PERF_RECORD_COMM = 3, | 369 | PERF_RECORD_COMM = 3, |
| 368 | 370 | ||
| 369 | /* | 371 | /* |
| 370 | * When header.misc & PERF_RECORD_MISC_OVERFLOW the event_type field | 372 | * When header.misc & PERF_RECORD_MISC_OVERFLOW the event_type field |
| 371 | * will be PERF_RECORD_* | 373 | * will be PERF_RECORD_* |
| 372 | * | 374 | * |
| 373 | * struct { | 375 | * struct { |
| 374 | * struct perf_event_header header; | 376 | * struct perf_event_header header; |
| 375 | * | 377 | * |
| 376 | * { u64 ip; } && PERF_RECORD_IP | 378 | * { u64 ip; } && PERF_RECORD_IP |
| 377 | * { u32 pid, tid; } && PERF_RECORD_TID | 379 | * { u32 pid, tid; } && PERF_RECORD_TID |
| 378 | * { u64 time; } && PERF_RECORD_TIME | 380 | * { u64 time; } && PERF_RECORD_TIME |
| 379 | * { u64 addr; } && PERF_RECORD_ADDR | 381 | * { u64 addr; } && PERF_RECORD_ADDR |
| 380 | * | 382 | * |
| 381 | * { u64 nr; | 383 | * { u64 nr; |
| 382 | * { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP | 384 | * { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP |
| 383 | * | 385 | * |
| 384 | * { u16 nr, | 386 | * { u16 nr, |
| 385 | * hv, | 387 | * hv, |
| 386 | * kernel, | 388 | * kernel, |
| 387 | * user; | 389 | * user; |
| 388 | * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN | 390 | * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN |
| 389 | * }; | 391 | * }; |
| 390 | */ | 392 | */ |
| 391 | }; | 393 | }; |
| 392 | 394 | ||
| 393 | NOTE: PERF_RECORD_CALLCHAIN is arch specific and currently only implemented | 395 | NOTE: PERF_RECORD_CALLCHAIN is arch specific and currently only implemented |
| 394 | on x86. | 396 | on x86. |
| 395 | 397 | ||
| 396 | Notification of new events is possible through poll()/select()/epoll() and | 398 | Notification of new events is possible through poll()/select()/epoll() and |
| 397 | fcntl() managing signals. | 399 | fcntl() managing signals. |
| 398 | 400 | ||
| 399 | Normally a notification is generated for every page filled, however one can | 401 | Normally a notification is generated for every page filled, however one can |
| 400 | additionally set perf_event_hw_event.wakeup_events to generate one every | 402 | additionally set perf_event_hw_event.wakeup_events to generate one every |
| 401 | so many counter overflow events. | 403 | so many counter overflow events. |
| 402 | 404 | ||
| 403 | Future work will include a splice() interface to the ring-buffer. | 405 | Future work will include a splice() interface to the ring-buffer. |
| 404 | 406 | ||
| 405 | 407 | ||
| 406 | Counters can be enabled and disabled in two ways: via ioctl and via | 408 | Counters can be enabled and disabled in two ways: via ioctl and via |
| 407 | prctl. When a counter is disabled, it doesn't count or generate | 409 | prctl. When a counter is disabled, it doesn't count or generate |
| 408 | events but does continue to exist and maintain its count value. | 410 | events but does continue to exist and maintain its count value. |
| 409 | 411 | ||
| 410 | An individual counter or counter group can be enabled with | 412 | An individual counter or counter group can be enabled with |
| 411 | 413 | ||
| 412 | ioctl(fd, PERF_EVENT_IOC_ENABLE); | 414 | ioctl(fd, PERF_EVENT_IOC_ENABLE); |
| 413 | 415 | ||
| 414 | or disabled with | 416 | or disabled with |
| 415 | 417 | ||
| 416 | ioctl(fd, PERF_EVENT_IOC_DISABLE); | 418 | ioctl(fd, PERF_EVENT_IOC_DISABLE); |
| 417 | 419 | ||
| 418 | Enabling or disabling the leader of a group enables or disables the | 420 | Enabling or disabling the leader of a group enables or disables the |
| 419 | whole group; that is, while the group leader is disabled, none of the | 421 | whole group; that is, while the group leader is disabled, none of the |
| 420 | counters in the group will count. Enabling or disabling a member of a | 422 | counters in the group will count. Enabling or disabling a member of a |
| 421 | group other than the leader only affects that counter - disabling an | 423 | group other than the leader only affects that counter - disabling an |
| 422 | non-leader stops that counter from counting but doesn't affect any | 424 | non-leader stops that counter from counting but doesn't affect any |
| 423 | other counter. | 425 | other counter. |
| 424 | 426 | ||
| 425 | Additionally, non-inherited overflow counters can use | 427 | Additionally, non-inherited overflow counters can use |
| 426 | 428 | ||
| 427 | ioctl(fd, PERF_EVENT_IOC_REFRESH, nr); | 429 | ioctl(fd, PERF_EVENT_IOC_REFRESH, nr); |
| 428 | 430 | ||
| 429 | to enable a counter for 'nr' events, after which it gets disabled again. | 431 | to enable a counter for 'nr' events, after which it gets disabled again. |
| 430 | 432 | ||
| 431 | A process can enable or disable all the counter groups that are | 433 | A process can enable or disable all the counter groups that are |
| 432 | attached to it, using prctl: | 434 | attached to it, using prctl: |
| 433 | 435 | ||
| 434 | prctl(PR_TASK_PERF_EVENTS_ENABLE); | 436 | prctl(PR_TASK_PERF_EVENTS_ENABLE); |
| 435 | 437 | ||
| 436 | prctl(PR_TASK_PERF_EVENTS_DISABLE); | 438 | prctl(PR_TASK_PERF_EVENTS_DISABLE); |
| 437 | 439 | ||
| 438 | This applies to all counters on the current process, whether created | 440 | This applies to all counters on the current process, whether created |
| 439 | by this process or by another, and doesn't affect any counters that | 441 | by this process or by another, and doesn't affect any counters that |
| 440 | this process has created on other processes. It only enables or | 442 | this process has created on other processes. It only enables or |
| 441 | disables the group leaders, not any other members in the groups. | 443 | disables the group leaders, not any other members in the groups. |
| 442 | 444 | ||
| 443 | 445 | ||
| 444 | Arch requirements | 446 | Arch requirements |
| 445 | ----------------- | 447 | ----------------- |
| 446 | 448 | ||
| 447 | If your architecture does not have hardware performance metrics, you can | 449 | If your architecture does not have hardware performance metrics, you can |
| 448 | still use the generic software counters based on hrtimers for sampling. | 450 | still use the generic software counters based on hrtimers for sampling. |
| 449 | 451 | ||
| 450 | So to start with, in order to add HAVE_PERF_EVENTS to your Kconfig, you | 452 | So to start with, in order to add HAVE_PERF_EVENTS to your Kconfig, you |
| 451 | will need at least this: | 453 | will need at least this: |
| 452 | - asm/perf_event.h - a basic stub will suffice at first | 454 | - asm/perf_event.h - a basic stub will suffice at first |
| 453 | - support for atomic64 types (and associated helper functions) | 455 | - support for atomic64 types (and associated helper functions) |
| 454 | - set_perf_event_pending() implemented | 456 | - set_perf_event_pending() implemented |
| 455 | 457 | ||
| 456 | If your architecture does have hardware capabilities, you can override the | 458 | If your architecture does have hardware capabilities, you can override the |
| 457 | weak stub hw_perf_event_init() to register hardware counters. | 459 | weak stub hw_perf_event_init() to register hardware counters. |
| 458 | 460 | ||
| 459 | Architectures that have d-cache aliassing issues, such as Sparc and ARM, | 461 | Architectures that have d-cache aliassing issues, such as Sparc and ARM, |
| 460 | should select PERF_USE_VMALLOC in order to avoid these for perf mmap(). | 462 | should select PERF_USE_VMALLOC in order to avoid these for perf mmap(). |
| 461 | 463 |
tools/perf/perf.c
| 1 | /* | 1 | /* |
| 2 | * perf.c | 2 | * perf.c |
| 3 | * | 3 | * |
| 4 | * Performance analysis utility. | 4 | * Performance analysis utility. |
| 5 | * | 5 | * |
| 6 | * This is the main hub from which the sub-commands (perf stat, | 6 | * This is the main hub from which the sub-commands (perf stat, |
| 7 | * perf top, perf record, perf report, etc.) are started. | 7 | * perf top, perf record, perf report, etc.) are started. |
| 8 | */ | 8 | */ |
| 9 | #include "builtin.h" | 9 | #include "builtin.h" |
| 10 | 10 | ||
| 11 | #include "util/exec_cmd.h" | 11 | #include "util/exec_cmd.h" |
| 12 | #include "util/cache.h" | 12 | #include "util/cache.h" |
| 13 | #include "util/quote.h" | 13 | #include "util/quote.h" |
| 14 | #include "util/run-command.h" | 14 | #include "util/run-command.h" |
| 15 | #include "util/parse-events.h" | 15 | #include "util/parse-events.h" |
| 16 | #include "util/string.h" | 16 | #include "util/string.h" |
| 17 | #include "util/debugfs.h" | 17 | #include "util/debugfs.h" |
| 18 | 18 | ||
| 19 | const char perf_usage_string[] = | 19 | const char perf_usage_string[] = |
| 20 | "perf [--version] [--help] COMMAND [ARGS]"; | 20 | "perf [--version] [--help] COMMAND [ARGS]"; |
| 21 | 21 | ||
| 22 | const char perf_more_info_string[] = | 22 | const char perf_more_info_string[] = |
| 23 | "See 'perf help COMMAND' for more information on a specific command."; | 23 | "See 'perf help COMMAND' for more information on a specific command."; |
| 24 | 24 | ||
| 25 | static int use_pager = -1; | 25 | static int use_pager = -1; |
| 26 | struct pager_config { | 26 | struct pager_config { |
| 27 | const char *cmd; | 27 | const char *cmd; |
| 28 | int val; | 28 | int val; |
| 29 | }; | 29 | }; |
| 30 | 30 | ||
| 31 | static char debugfs_mntpt[MAXPATHLEN]; | 31 | static char debugfs_mntpt[MAXPATHLEN]; |
| 32 | 32 | ||
| 33 | static int pager_command_config(const char *var, const char *value, void *data) | 33 | static int pager_command_config(const char *var, const char *value, void *data) |
| 34 | { | 34 | { |
| 35 | struct pager_config *c = data; | 35 | struct pager_config *c = data; |
| 36 | if (!prefixcmp(var, "pager.") && !strcmp(var + 6, c->cmd)) | 36 | if (!prefixcmp(var, "pager.") && !strcmp(var + 6, c->cmd)) |
| 37 | c->val = perf_config_bool(var, value); | 37 | c->val = perf_config_bool(var, value); |
| 38 | return 0; | 38 | return 0; |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | /* returns 0 for "no pager", 1 for "use pager", and -1 for "not specified" */ | 41 | /* returns 0 for "no pager", 1 for "use pager", and -1 for "not specified" */ |
| 42 | int check_pager_config(const char *cmd) | 42 | int check_pager_config(const char *cmd) |
| 43 | { | 43 | { |
| 44 | struct pager_config c; | 44 | struct pager_config c; |
| 45 | c.cmd = cmd; | 45 | c.cmd = cmd; |
| 46 | c.val = -1; | 46 | c.val = -1; |
| 47 | perf_config(pager_command_config, &c); | 47 | perf_config(pager_command_config, &c); |
| 48 | return c.val; | 48 | return c.val; |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | static void commit_pager_choice(void) { | 51 | static void commit_pager_choice(void) { |
| 52 | switch (use_pager) { | 52 | switch (use_pager) { |
| 53 | case 0: | 53 | case 0: |
| 54 | setenv("PERF_PAGER", "cat", 1); | 54 | setenv("PERF_PAGER", "cat", 1); |
| 55 | break; | 55 | break; |
| 56 | case 1: | 56 | case 1: |
| 57 | /* setup_pager(); */ | 57 | /* setup_pager(); */ |
| 58 | break; | 58 | break; |
| 59 | default: | 59 | default: |
| 60 | break; | 60 | break; |
| 61 | } | 61 | } |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | static void set_debugfs_path(void) | 64 | static void set_debugfs_path(void) |
| 65 | { | 65 | { |
| 66 | char *path; | 66 | char *path; |
| 67 | 67 | ||
| 68 | path = getenv(PERF_DEBUGFS_ENVIRONMENT); | 68 | path = getenv(PERF_DEBUGFS_ENVIRONMENT); |
| 69 | snprintf(debugfs_path, MAXPATHLEN, "%s/%s", path ?: debugfs_mntpt, | 69 | snprintf(debugfs_path, MAXPATHLEN, "%s/%s", path ?: debugfs_mntpt, |
| 70 | "tracing/events"); | 70 | "tracing/events"); |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | static int handle_options(const char*** argv, int* argc, int* envchanged) | 73 | static int handle_options(const char*** argv, int* argc, int* envchanged) |
| 74 | { | 74 | { |
| 75 | int handled = 0; | 75 | int handled = 0; |
| 76 | 76 | ||
| 77 | while (*argc > 0) { | 77 | while (*argc > 0) { |
| 78 | const char *cmd = (*argv)[0]; | 78 | const char *cmd = (*argv)[0]; |
| 79 | if (cmd[0] != '-') | 79 | if (cmd[0] != '-') |
| 80 | break; | 80 | break; |
| 81 | 81 | ||
| 82 | /* | 82 | /* |
| 83 | * For legacy reasons, the "version" and "help" | 83 | * For legacy reasons, the "version" and "help" |
| 84 | * commands can be written with "--" prepended | 84 | * commands can be written with "--" prepended |
| 85 | * to make them look like flags. | 85 | * to make them look like flags. |
| 86 | */ | 86 | */ |
| 87 | if (!strcmp(cmd, "--help") || !strcmp(cmd, "--version")) | 87 | if (!strcmp(cmd, "--help") || !strcmp(cmd, "--version")) |
| 88 | break; | 88 | break; |
| 89 | 89 | ||
| 90 | /* | 90 | /* |
| 91 | * Check remaining flags. | 91 | * Check remaining flags. |
| 92 | */ | 92 | */ |
| 93 | if (!prefixcmp(cmd, CMD_EXEC_PATH)) { | 93 | if (!prefixcmp(cmd, CMD_EXEC_PATH)) { |
| 94 | cmd += strlen(CMD_EXEC_PATH); | 94 | cmd += strlen(CMD_EXEC_PATH); |
| 95 | if (*cmd == '=') | 95 | if (*cmd == '=') |
| 96 | perf_set_argv_exec_path(cmd + 1); | 96 | perf_set_argv_exec_path(cmd + 1); |
| 97 | else { | 97 | else { |
| 98 | puts(perf_exec_path()); | 98 | puts(perf_exec_path()); |
| 99 | exit(0); | 99 | exit(0); |
| 100 | } | 100 | } |
| 101 | } else if (!strcmp(cmd, "--html-path")) { | 101 | } else if (!strcmp(cmd, "--html-path")) { |
| 102 | puts(system_path(PERF_HTML_PATH)); | 102 | puts(system_path(PERF_HTML_PATH)); |
| 103 | exit(0); | 103 | exit(0); |
| 104 | } else if (!strcmp(cmd, "-p") || !strcmp(cmd, "--paginate")) { | 104 | } else if (!strcmp(cmd, "-p") || !strcmp(cmd, "--paginate")) { |
| 105 | use_pager = 1; | 105 | use_pager = 1; |
| 106 | } else if (!strcmp(cmd, "--no-pager")) { | 106 | } else if (!strcmp(cmd, "--no-pager")) { |
| 107 | use_pager = 0; | 107 | use_pager = 0; |
| 108 | if (envchanged) | 108 | if (envchanged) |
| 109 | *envchanged = 1; | 109 | *envchanged = 1; |
| 110 | } else if (!strcmp(cmd, "--perf-dir")) { | 110 | } else if (!strcmp(cmd, "--perf-dir")) { |
| 111 | if (*argc < 2) { | 111 | if (*argc < 2) { |
| 112 | fprintf(stderr, "No directory given for --perf-dir.\n" ); | 112 | fprintf(stderr, "No directory given for --perf-dir.\n" ); |
| 113 | usage(perf_usage_string); | 113 | usage(perf_usage_string); |
| 114 | } | 114 | } |
| 115 | setenv(PERF_DIR_ENVIRONMENT, (*argv)[1], 1); | 115 | setenv(PERF_DIR_ENVIRONMENT, (*argv)[1], 1); |
| 116 | if (envchanged) | 116 | if (envchanged) |
| 117 | *envchanged = 1; | 117 | *envchanged = 1; |
| 118 | (*argv)++; | 118 | (*argv)++; |
| 119 | (*argc)--; | 119 | (*argc)--; |
| 120 | handled++; | 120 | handled++; |
| 121 | } else if (!prefixcmp(cmd, CMD_PERF_DIR)) { | 121 | } else if (!prefixcmp(cmd, CMD_PERF_DIR)) { |
| 122 | setenv(PERF_DIR_ENVIRONMENT, cmd + strlen(CMD_PERF_DIR), 1); | 122 | setenv(PERF_DIR_ENVIRONMENT, cmd + strlen(CMD_PERF_DIR), 1); |
| 123 | if (envchanged) | 123 | if (envchanged) |
| 124 | *envchanged = 1; | 124 | *envchanged = 1; |
| 125 | } else if (!strcmp(cmd, "--work-tree")) { | 125 | } else if (!strcmp(cmd, "--work-tree")) { |
| 126 | if (*argc < 2) { | 126 | if (*argc < 2) { |
| 127 | fprintf(stderr, "No directory given for --work-tree.\n" ); | 127 | fprintf(stderr, "No directory given for --work-tree.\n" ); |
| 128 | usage(perf_usage_string); | 128 | usage(perf_usage_string); |
| 129 | } | 129 | } |
| 130 | setenv(PERF_WORK_TREE_ENVIRONMENT, (*argv)[1], 1); | 130 | setenv(PERF_WORK_TREE_ENVIRONMENT, (*argv)[1], 1); |
| 131 | if (envchanged) | 131 | if (envchanged) |
| 132 | *envchanged = 1; | 132 | *envchanged = 1; |
| 133 | (*argv)++; | 133 | (*argv)++; |
| 134 | (*argc)--; | 134 | (*argc)--; |
| 135 | } else if (!prefixcmp(cmd, CMD_WORK_TREE)) { | 135 | } else if (!prefixcmp(cmd, CMD_WORK_TREE)) { |
| 136 | setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + strlen(CMD_WORK_TREE), 1); | 136 | setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + strlen(CMD_WORK_TREE), 1); |
| 137 | if (envchanged) | 137 | if (envchanged) |
| 138 | *envchanged = 1; | 138 | *envchanged = 1; |
| 139 | } else if (!strcmp(cmd, "--debugfs-dir")) { | 139 | } else if (!strcmp(cmd, "--debugfs-dir")) { |
| 140 | if (*argc < 2) { | 140 | if (*argc < 2) { |
| 141 | fprintf(stderr, "No directory given for --debugfs-dir.\n"); | 141 | fprintf(stderr, "No directory given for --debugfs-dir.\n"); |
| 142 | usage(perf_usage_string); | 142 | usage(perf_usage_string); |
| 143 | } | 143 | } |
| 144 | strncpy(debugfs_mntpt, (*argv)[1], MAXPATHLEN); | 144 | strncpy(debugfs_mntpt, (*argv)[1], MAXPATHLEN); |
| 145 | debugfs_mntpt[MAXPATHLEN - 1] = '\0'; | 145 | debugfs_mntpt[MAXPATHLEN - 1] = '\0'; |
| 146 | if (envchanged) | 146 | if (envchanged) |
| 147 | *envchanged = 1; | 147 | *envchanged = 1; |
| 148 | (*argv)++; | 148 | (*argv)++; |
| 149 | (*argc)--; | 149 | (*argc)--; |
| 150 | } else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) { | 150 | } else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) { |
| 151 | strncpy(debugfs_mntpt, cmd + strlen(CMD_DEBUGFS_DIR), MAXPATHLEN); | 151 | strncpy(debugfs_mntpt, cmd + strlen(CMD_DEBUGFS_DIR), MAXPATHLEN); |
| 152 | debugfs_mntpt[MAXPATHLEN - 1] = '\0'; | 152 | debugfs_mntpt[MAXPATHLEN - 1] = '\0'; |
| 153 | if (envchanged) | 153 | if (envchanged) |
| 154 | *envchanged = 1; | 154 | *envchanged = 1; |
| 155 | } else { | 155 | } else { |
| 156 | fprintf(stderr, "Unknown option: %s\n", cmd); | 156 | fprintf(stderr, "Unknown option: %s\n", cmd); |
| 157 | usage(perf_usage_string); | 157 | usage(perf_usage_string); |
| 158 | } | 158 | } |
| 159 | 159 | ||
| 160 | (*argv)++; | 160 | (*argv)++; |
| 161 | (*argc)--; | 161 | (*argc)--; |
| 162 | handled++; | 162 | handled++; |
| 163 | } | 163 | } |
| 164 | return handled; | 164 | return handled; |
| 165 | } | 165 | } |
| 166 | 166 | ||
| 167 | static int handle_alias(int *argcp, const char ***argv) | 167 | static int handle_alias(int *argcp, const char ***argv) |
| 168 | { | 168 | { |
| 169 | int envchanged = 0, ret = 0, saved_errno = errno; | 169 | int envchanged = 0, ret = 0, saved_errno = errno; |
| 170 | int count, option_count; | 170 | int count, option_count; |
| 171 | const char** new_argv; | 171 | const char** new_argv; |
| 172 | const char *alias_command; | 172 | const char *alias_command; |
| 173 | char *alias_string; | 173 | char *alias_string; |
| 174 | 174 | ||
| 175 | alias_command = (*argv)[0]; | 175 | alias_command = (*argv)[0]; |
| 176 | alias_string = alias_lookup(alias_command); | 176 | alias_string = alias_lookup(alias_command); |
| 177 | if (alias_string) { | 177 | if (alias_string) { |
| 178 | if (alias_string[0] == '!') { | 178 | if (alias_string[0] == '!') { |
| 179 | if (*argcp > 1) { | 179 | if (*argcp > 1) { |
| 180 | struct strbuf buf; | 180 | struct strbuf buf; |
| 181 | 181 | ||
| 182 | strbuf_init(&buf, PATH_MAX); | 182 | strbuf_init(&buf, PATH_MAX); |
| 183 | strbuf_addstr(&buf, alias_string); | 183 | strbuf_addstr(&buf, alias_string); |
| 184 | sq_quote_argv(&buf, (*argv) + 1, PATH_MAX); | 184 | sq_quote_argv(&buf, (*argv) + 1, PATH_MAX); |
| 185 | free(alias_string); | 185 | free(alias_string); |
| 186 | alias_string = buf.buf; | 186 | alias_string = buf.buf; |
| 187 | } | 187 | } |
| 188 | ret = system(alias_string + 1); | 188 | ret = system(alias_string + 1); |
| 189 | if (ret >= 0 && WIFEXITED(ret) && | 189 | if (ret >= 0 && WIFEXITED(ret) && |
| 190 | WEXITSTATUS(ret) != 127) | 190 | WEXITSTATUS(ret) != 127) |
| 191 | exit(WEXITSTATUS(ret)); | 191 | exit(WEXITSTATUS(ret)); |
| 192 | die("Failed to run '%s' when expanding alias '%s'", | 192 | die("Failed to run '%s' when expanding alias '%s'", |
| 193 | alias_string + 1, alias_command); | 193 | alias_string + 1, alias_command); |
| 194 | } | 194 | } |
| 195 | count = split_cmdline(alias_string, &new_argv); | 195 | count = split_cmdline(alias_string, &new_argv); |
| 196 | if (count < 0) | 196 | if (count < 0) |
| 197 | die("Bad alias.%s string", alias_command); | 197 | die("Bad alias.%s string", alias_command); |
| 198 | option_count = handle_options(&new_argv, &count, &envchanged); | 198 | option_count = handle_options(&new_argv, &count, &envchanged); |
| 199 | if (envchanged) | 199 | if (envchanged) |
| 200 | die("alias '%s' changes environment variables\n" | 200 | die("alias '%s' changes environment variables\n" |
| 201 | "You can use '!perf' in the alias to do this.", | 201 | "You can use '!perf' in the alias to do this.", |
| 202 | alias_command); | 202 | alias_command); |
| 203 | memmove(new_argv - option_count, new_argv, | 203 | memmove(new_argv - option_count, new_argv, |
| 204 | count * sizeof(char *)); | 204 | count * sizeof(char *)); |
| 205 | new_argv -= option_count; | 205 | new_argv -= option_count; |
| 206 | 206 | ||
| 207 | if (count < 1) | 207 | if (count < 1) |
| 208 | die("empty alias for %s", alias_command); | 208 | die("empty alias for %s", alias_command); |
| 209 | 209 | ||
| 210 | if (!strcmp(alias_command, new_argv[0])) | 210 | if (!strcmp(alias_command, new_argv[0])) |
| 211 | die("recursive alias: %s", alias_command); | 211 | die("recursive alias: %s", alias_command); |
| 212 | 212 | ||
| 213 | new_argv = realloc(new_argv, sizeof(char*) * | 213 | new_argv = realloc(new_argv, sizeof(char*) * |
| 214 | (count + *argcp + 1)); | 214 | (count + *argcp + 1)); |
| 215 | /* insert after command name */ | 215 | /* insert after command name */ |
| 216 | memcpy(new_argv + count, *argv + 1, sizeof(char*) * *argcp); | 216 | memcpy(new_argv + count, *argv + 1, sizeof(char*) * *argcp); |
| 217 | new_argv[count+*argcp] = NULL; | 217 | new_argv[count+*argcp] = NULL; |
| 218 | 218 | ||
| 219 | *argv = new_argv; | 219 | *argv = new_argv; |
| 220 | *argcp += count - 1; | 220 | *argcp += count - 1; |
| 221 | 221 | ||
| 222 | ret = 1; | 222 | ret = 1; |
| 223 | } | 223 | } |
| 224 | 224 | ||
| 225 | errno = saved_errno; | 225 | errno = saved_errno; |
| 226 | 226 | ||
| 227 | return ret; | 227 | return ret; |
| 228 | } | 228 | } |
| 229 | 229 | ||
| 230 | const char perf_version_string[] = PERF_VERSION; | 230 | const char perf_version_string[] = PERF_VERSION; |
| 231 | 231 | ||
| 232 | #define RUN_SETUP (1<<0) | 232 | #define RUN_SETUP (1<<0) |
| 233 | #define USE_PAGER (1<<1) | 233 | #define USE_PAGER (1<<1) |
| 234 | /* | 234 | /* |
| 235 | * require working tree to be present -- anything uses this needs | 235 | * require working tree to be present -- anything uses this needs |
| 236 | * RUN_SETUP for reading from the configuration file. | 236 | * RUN_SETUP for reading from the configuration file. |
| 237 | */ | 237 | */ |
| 238 | #define NEED_WORK_TREE (1<<2) | 238 | #define NEED_WORK_TREE (1<<2) |
| 239 | 239 | ||
| 240 | struct cmd_struct { | 240 | struct cmd_struct { |
| 241 | const char *cmd; | 241 | const char *cmd; |
| 242 | int (*fn)(int, const char **, const char *); | 242 | int (*fn)(int, const char **, const char *); |
| 243 | int option; | 243 | int option; |
| 244 | }; | 244 | }; |
| 245 | 245 | ||
| 246 | static int run_builtin(struct cmd_struct *p, int argc, const char **argv) | 246 | static int run_builtin(struct cmd_struct *p, int argc, const char **argv) |
| 247 | { | 247 | { |
| 248 | int status; | 248 | int status; |
| 249 | struct stat st; | 249 | struct stat st; |
| 250 | const char *prefix; | 250 | const char *prefix; |
| 251 | 251 | ||
| 252 | prefix = NULL; | 252 | prefix = NULL; |
| 253 | if (p->option & RUN_SETUP) | 253 | if (p->option & RUN_SETUP) |
| 254 | prefix = NULL; /* setup_perf_directory(); */ | 254 | prefix = NULL; /* setup_perf_directory(); */ |
| 255 | 255 | ||
| 256 | if (use_pager == -1 && p->option & RUN_SETUP) | 256 | if (use_pager == -1 && p->option & RUN_SETUP) |
| 257 | use_pager = check_pager_config(p->cmd); | 257 | use_pager = check_pager_config(p->cmd); |
| 258 | if (use_pager == -1 && p->option & USE_PAGER) | 258 | if (use_pager == -1 && p->option & USE_PAGER) |
| 259 | use_pager = 1; | 259 | use_pager = 1; |
| 260 | commit_pager_choice(); | 260 | commit_pager_choice(); |
| 261 | set_debugfs_path(); | 261 | set_debugfs_path(); |
| 262 | 262 | ||
| 263 | status = p->fn(argc, argv, prefix); | 263 | status = p->fn(argc, argv, prefix); |
| 264 | if (status) | 264 | if (status) |
| 265 | return status & 0xff; | 265 | return status & 0xff; |
| 266 | 266 | ||
| 267 | /* Somebody closed stdout? */ | 267 | /* Somebody closed stdout? */ |
| 268 | if (fstat(fileno(stdout), &st)) | 268 | if (fstat(fileno(stdout), &st)) |
| 269 | return 0; | 269 | return 0; |
| 270 | /* Ignore write errors for pipes and sockets.. */ | 270 | /* Ignore write errors for pipes and sockets.. */ |
| 271 | if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) | 271 | if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) |
| 272 | return 0; | 272 | return 0; |
| 273 | 273 | ||
| 274 | /* Check for ENOSPC and EIO errors.. */ | 274 | /* Check for ENOSPC and EIO errors.. */ |
| 275 | if (fflush(stdout)) | 275 | if (fflush(stdout)) |
| 276 | die("write failure on standard output: %s", strerror(errno)); | 276 | die("write failure on standard output: %s", strerror(errno)); |
| 277 | if (ferror(stdout)) | 277 | if (ferror(stdout)) |
| 278 | die("unknown write failure on standard output"); | 278 | die("unknown write failure on standard output"); |
| 279 | if (fclose(stdout)) | 279 | if (fclose(stdout)) |
| 280 | die("close failed on standard output: %s", strerror(errno)); | 280 | die("close failed on standard output: %s", strerror(errno)); |
| 281 | return 0; | 281 | return 0; |
| 282 | } | 282 | } |
| 283 | 283 | ||
| 284 | static void handle_internal_command(int argc, const char **argv) | 284 | static void handle_internal_command(int argc, const char **argv) |
| 285 | { | 285 | { |
| 286 | const char *cmd = argv[0]; | 286 | const char *cmd = argv[0]; |
| 287 | static struct cmd_struct commands[] = { | 287 | static struct cmd_struct commands[] = { |
| 288 | { "help", cmd_help, 0 }, | 288 | { "help", cmd_help, 0 }, |
| 289 | { "list", cmd_list, 0 }, | 289 | { "list", cmd_list, 0 }, |
| 290 | { "record", cmd_record, 0 }, | 290 | { "record", cmd_record, 0 }, |
| 291 | { "report", cmd_report, 0 }, | 291 | { "report", cmd_report, 0 }, |
| 292 | { "bench", cmd_bench, 0 }, | ||
| 292 | { "stat", cmd_stat, 0 }, | 293 | { "stat", cmd_stat, 0 }, |
| 293 | { "timechart", cmd_timechart, 0 }, | 294 | { "timechart", cmd_timechart, 0 }, |
| 294 | { "top", cmd_top, 0 }, | 295 | { "top", cmd_top, 0 }, |
| 295 | { "annotate", cmd_annotate, 0 }, | 296 | { "annotate", cmd_annotate, 0 }, |
| 296 | { "version", cmd_version, 0 }, | 297 | { "version", cmd_version, 0 }, |
| 297 | { "trace", cmd_trace, 0 }, | 298 | { "trace", cmd_trace, 0 }, |
| 298 | { "sched", cmd_sched, 0 }, | 299 | { "sched", cmd_sched, 0 }, |
| 299 | }; | 300 | }; |
| 300 | unsigned int i; | 301 | unsigned int i; |
| 301 | static const char ext[] = STRIP_EXTENSION; | 302 | static const char ext[] = STRIP_EXTENSION; |
| 302 | 303 | ||
| 303 | if (sizeof(ext) > 1) { | 304 | if (sizeof(ext) > 1) { |
| 304 | i = strlen(argv[0]) - strlen(ext); | 305 | i = strlen(argv[0]) - strlen(ext); |
| 305 | if (i > 0 && !strcmp(argv[0] + i, ext)) { | 306 | if (i > 0 && !strcmp(argv[0] + i, ext)) { |
| 306 | char *argv0 = strdup(argv[0]); | 307 | char *argv0 = strdup(argv[0]); |
| 307 | argv[0] = cmd = argv0; | 308 | argv[0] = cmd = argv0; |
| 308 | argv0[i] = '\0'; | 309 | argv0[i] = '\0'; |
| 309 | } | 310 | } |
| 310 | } | 311 | } |
| 311 | 312 | ||
| 312 | /* Turn "perf cmd --help" into "perf help cmd" */ | 313 | /* Turn "perf cmd --help" into "perf help cmd" */ |
| 313 | if (argc > 1 && !strcmp(argv[1], "--help")) { | 314 | if (argc > 1 && !strcmp(argv[1], "--help")) { |
| 314 | argv[1] = argv[0]; | 315 | argv[1] = argv[0]; |
| 315 | argv[0] = cmd = "help"; | 316 | argv[0] = cmd = "help"; |
| 316 | } | 317 | } |
| 317 | 318 | ||
| 318 | for (i = 0; i < ARRAY_SIZE(commands); i++) { | 319 | for (i = 0; i < ARRAY_SIZE(commands); i++) { |
| 319 | struct cmd_struct *p = commands+i; | 320 | struct cmd_struct *p = commands+i; |
| 320 | if (strcmp(p->cmd, cmd)) | 321 | if (strcmp(p->cmd, cmd)) |
| 321 | continue; | 322 | continue; |
| 322 | exit(run_builtin(p, argc, argv)); | 323 | exit(run_builtin(p, argc, argv)); |
| 323 | } | 324 | } |
| 324 | } | 325 | } |
| 325 | 326 | ||
| 326 | static void execv_dashed_external(const char **argv) | 327 | static void execv_dashed_external(const char **argv) |
| 327 | { | 328 | { |
| 328 | struct strbuf cmd = STRBUF_INIT; | 329 | struct strbuf cmd = STRBUF_INIT; |
| 329 | const char *tmp; | 330 | const char *tmp; |
| 330 | int status; | 331 | int status; |
| 331 | 332 | ||
| 332 | strbuf_addf(&cmd, "perf-%s", argv[0]); | 333 | strbuf_addf(&cmd, "perf-%s", argv[0]); |
| 333 | 334 | ||
| 334 | /* | 335 | /* |
| 335 | * argv[0] must be the perf command, but the argv array | 336 | * argv[0] must be the perf command, but the argv array |
| 336 | * belongs to the caller, and may be reused in | 337 | * belongs to the caller, and may be reused in |
| 337 | * subsequent loop iterations. Save argv[0] and | 338 | * subsequent loop iterations. Save argv[0] and |
| 338 | * restore it on error. | 339 | * restore it on error. |
| 339 | */ | 340 | */ |
| 340 | tmp = argv[0]; | 341 | tmp = argv[0]; |
| 341 | argv[0] = cmd.buf; | 342 | argv[0] = cmd.buf; |
| 342 | 343 | ||
| 343 | /* | 344 | /* |
| 344 | * if we fail because the command is not found, it is | 345 | * if we fail because the command is not found, it is |
| 345 | * OK to return. Otherwise, we just pass along the status code. | 346 | * OK to return. Otherwise, we just pass along the status code. |
| 346 | */ | 347 | */ |
| 347 | status = run_command_v_opt(argv, 0); | 348 | status = run_command_v_opt(argv, 0); |
| 348 | if (status != -ERR_RUN_COMMAND_EXEC) { | 349 | if (status != -ERR_RUN_COMMAND_EXEC) { |
| 349 | if (IS_RUN_COMMAND_ERR(status)) | 350 | if (IS_RUN_COMMAND_ERR(status)) |
| 350 | die("unable to run '%s'", argv[0]); | 351 | die("unable to run '%s'", argv[0]); |
| 351 | exit(-status); | 352 | exit(-status); |
| 352 | } | 353 | } |
| 353 | errno = ENOENT; /* as if we called execvp */ | 354 | errno = ENOENT; /* as if we called execvp */ |
| 354 | 355 | ||
| 355 | argv[0] = tmp; | 356 | argv[0] = tmp; |
| 356 | 357 | ||
| 357 | strbuf_release(&cmd); | 358 | strbuf_release(&cmd); |
| 358 | } | 359 | } |
| 359 | 360 | ||
| 360 | static int run_argv(int *argcp, const char ***argv) | 361 | static int run_argv(int *argcp, const char ***argv) |
| 361 | { | 362 | { |
| 362 | int done_alias = 0; | 363 | int done_alias = 0; |
| 363 | 364 | ||
| 364 | while (1) { | 365 | while (1) { |
| 365 | /* See if it's an internal command */ | 366 | /* See if it's an internal command */ |
| 366 | handle_internal_command(*argcp, *argv); | 367 | handle_internal_command(*argcp, *argv); |
| 367 | 368 | ||
| 368 | /* .. then try the external ones */ | 369 | /* .. then try the external ones */ |
| 369 | execv_dashed_external(*argv); | 370 | execv_dashed_external(*argv); |
| 370 | 371 | ||
| 371 | /* It could be an alias -- this works around the insanity | 372 | /* It could be an alias -- this works around the insanity |
| 372 | * of overriding "perf log" with "perf show" by having | 373 | * of overriding "perf log" with "perf show" by having |
| 373 | * alias.log = show | 374 | * alias.log = show |
| 374 | */ | 375 | */ |
| 375 | if (done_alias || !handle_alias(argcp, argv)) | 376 | if (done_alias || !handle_alias(argcp, argv)) |
| 376 | break; | 377 | break; |
| 377 | done_alias = 1; | 378 | done_alias = 1; |
| 378 | } | 379 | } |
| 379 | 380 | ||
| 380 | return done_alias; | 381 | return done_alias; |
| 381 | } | 382 | } |
| 382 | 383 | ||
| 383 | /* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */ | 384 | /* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */ |
| 384 | static void get_debugfs_mntpt(void) | 385 | static void get_debugfs_mntpt(void) |
| 385 | { | 386 | { |
| 386 | const char *path = debugfs_find_mountpoint(); | 387 | const char *path = debugfs_find_mountpoint(); |
| 387 | 388 | ||
| 388 | if (path) | 389 | if (path) |
| 389 | strncpy(debugfs_mntpt, path, sizeof(debugfs_mntpt)); | 390 | strncpy(debugfs_mntpt, path, sizeof(debugfs_mntpt)); |
| 390 | else | 391 | else |
| 391 | debugfs_mntpt[0] = '\0'; | 392 | debugfs_mntpt[0] = '\0'; |
| 392 | } | 393 | } |
| 393 | 394 | ||
| 394 | int main(int argc, const char **argv) | 395 | int main(int argc, const char **argv) |
| 395 | { | 396 | { |
| 396 | const char *cmd; | 397 | const char *cmd; |
| 397 | 398 | ||
| 398 | cmd = perf_extract_argv0_path(argv[0]); | 399 | cmd = perf_extract_argv0_path(argv[0]); |
| 399 | if (!cmd) | 400 | if (!cmd) |
| 400 | cmd = "perf-help"; | 401 | cmd = "perf-help"; |
| 401 | /* get debugfs mount point from /proc/mounts */ | 402 | /* get debugfs mount point from /proc/mounts */ |
| 402 | get_debugfs_mntpt(); | 403 | get_debugfs_mntpt(); |
| 403 | /* | 404 | /* |
| 404 | * "perf-xxxx" is the same as "perf xxxx", but we obviously: | 405 | * "perf-xxxx" is the same as "perf xxxx", but we obviously: |
| 405 | * | 406 | * |
| 406 | * - cannot take flags in between the "perf" and the "xxxx". | 407 | * - cannot take flags in between the "perf" and the "xxxx". |
| 407 | * - cannot execute it externally (since it would just do | 408 | * - cannot execute it externally (since it would just do |
| 408 | * the same thing over again) | 409 | * the same thing over again) |
| 409 | * | 410 | * |
| 410 | * So we just directly call the internal command handler, and | 411 | * So we just directly call the internal command handler, and |
| 411 | * die if that one cannot handle it. | 412 | * die if that one cannot handle it. |
| 412 | */ | 413 | */ |
| 413 | if (!prefixcmp(cmd, "perf-")) { | 414 | if (!prefixcmp(cmd, "perf-")) { |
| 414 | cmd += 5; | 415 | cmd += 5; |
| 415 | argv[0] = cmd; | 416 | argv[0] = cmd; |
| 416 | handle_internal_command(argc, argv); | 417 | handle_internal_command(argc, argv); |
| 417 | die("cannot handle %s internally", cmd); | 418 | die("cannot handle %s internally", cmd); |
| 418 | } | 419 | } |
| 419 | 420 | ||
| 420 | /* Look for flags.. */ | 421 | /* Look for flags.. */ |
| 421 | argv++; | 422 | argv++; |
| 422 | argc--; | 423 | argc--; |
| 423 | handle_options(&argv, &argc, NULL); | 424 | handle_options(&argv, &argc, NULL); |
| 424 | commit_pager_choice(); | 425 | commit_pager_choice(); |
| 425 | set_debugfs_path(); | 426 | set_debugfs_path(); |
| 426 | if (argc > 0) { | 427 | if (argc > 0) { |
| 427 | if (!prefixcmp(argv[0], "--")) | 428 | if (!prefixcmp(argv[0], "--")) |
| 428 | argv[0] += 2; | 429 | argv[0] += 2; |
| 429 | } else { | 430 | } else { |
| 430 | /* The user didn't specify a command; give them help */ | 431 | /* The user didn't specify a command; give them help */ |
| 431 | printf("\n usage: %s\n\n", perf_usage_string); | 432 | printf("\n usage: %s\n\n", perf_usage_string); |
| 432 | list_common_cmds_help(); | 433 | list_common_cmds_help(); |
| 433 | printf("\n %s\n\n", perf_more_info_string); | 434 | printf("\n %s\n\n", perf_more_info_string); |
| 434 | exit(1); | 435 | exit(1); |
| 435 | } | 436 | } |
| 436 | cmd = argv[0]; | 437 | cmd = argv[0]; |
| 437 | 438 | ||
| 438 | /* | 439 | /* |
| 439 | * We use PATH to find perf commands, but we prepend some higher | 440 | * We use PATH to find perf commands, but we prepend some higher |
| 440 | * precidence paths: the "--exec-path" option, the PERF_EXEC_PATH | 441 | * precidence paths: the "--exec-path" option, the PERF_EXEC_PATH |
| 441 | * environment, and the $(perfexecdir) from the Makefile at build | 442 | * environment, and the $(perfexecdir) from the Makefile at build |
| 442 | * time. | 443 | * time. |
| 443 | */ | 444 | */ |
| 444 | setup_path(); | 445 | setup_path(); |
| 445 | 446 | ||
| 446 | while (1) { | 447 | while (1) { |
| 447 | static int done_help = 0; | 448 | static int done_help = 0; |
| 448 | static int was_alias = 0; | 449 | static int was_alias = 0; |
| 449 | 450 | ||
| 450 | was_alias = run_argv(&argc, &argv); | 451 | was_alias = run_argv(&argc, &argv); |
| 451 | if (errno != ENOENT) | 452 | if (errno != ENOENT) |
| 452 | break; | 453 | break; |
| 453 | 454 | ||
| 454 | if (was_alias) { | 455 | if (was_alias) { |
| 455 | fprintf(stderr, "Expansion of alias '%s' failed; " | 456 | fprintf(stderr, "Expansion of alias '%s' failed; " |
| 456 | "'%s' is not a perf-command\n", | 457 | "'%s' is not a perf-command\n", |
| 457 | cmd, argv[0]); | 458 | cmd, argv[0]); |
| 458 | exit(1); | 459 | exit(1); |
| 459 | } | 460 | } |
| 460 | if (!done_help) { | 461 | if (!done_help) { |
| 461 | cmd = argv[0] = help_unknown_cmd(cmd); | 462 | cmd = argv[0] = help_unknown_cmd(cmd); |
| 462 | done_help = 1; | 463 | done_help = 1; |
| 463 | } else | 464 | } else |
| 464 | break; | 465 | break; |
| 465 | } | 466 | } |
| 466 | 467 | ||
| 467 | fprintf(stderr, "Failed to run command '%s': %s\n", | 468 | fprintf(stderr, "Failed to run command '%s': %s\n", |
| 468 | cmd, strerror(errno)); | 469 | cmd, strerror(errno)); |
| 469 | 470 | ||
| 470 | return 1; | 471 | return 1; |
| 471 | } | 472 | } |
| 472 | 473 |
tools/perf/util/parse-events.c
| 1 | 1 | ||
| 2 | #include "util.h" | 2 | #include "util.h" |
| 3 | #include "../perf.h" | 3 | #include "../perf.h" |
| 4 | #include "parse-options.h" | 4 | #include "parse-options.h" |
| 5 | #include "parse-events.h" | 5 | #include "parse-events.h" |
| 6 | #include "exec_cmd.h" | 6 | #include "exec_cmd.h" |
| 7 | #include "string.h" | 7 | #include "string.h" |
| 8 | #include "cache.h" | 8 | #include "cache.h" |
| 9 | #include "header.h" | 9 | #include "header.h" |
| 10 | #include "debugfs.h" | 10 | #include "debugfs.h" |
| 11 | 11 | ||
| 12 | int nr_counters; | 12 | int nr_counters; |
| 13 | 13 | ||
| 14 | struct perf_event_attr attrs[MAX_COUNTERS]; | 14 | struct perf_event_attr attrs[MAX_COUNTERS]; |
| 15 | char *filters[MAX_COUNTERS]; | 15 | char *filters[MAX_COUNTERS]; |
| 16 | 16 | ||
| 17 | struct event_symbol { | 17 | struct event_symbol { |
| 18 | u8 type; | 18 | u8 type; |
| 19 | u64 config; | 19 | u64 config; |
| 20 | const char *symbol; | 20 | const char *symbol; |
| 21 | const char *alias; | 21 | const char *alias; |
| 22 | }; | 22 | }; |
| 23 | 23 | ||
| 24 | enum event_result { | 24 | enum event_result { |
| 25 | EVT_FAILED, | 25 | EVT_FAILED, |
| 26 | EVT_HANDLED, | 26 | EVT_HANDLED, |
| 27 | EVT_HANDLED_ALL | 27 | EVT_HANDLED_ALL |
| 28 | }; | 28 | }; |
| 29 | 29 | ||
| 30 | char debugfs_path[MAXPATHLEN]; | 30 | char debugfs_path[MAXPATHLEN]; |
| 31 | 31 | ||
| 32 | #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x | 32 | #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x |
| 33 | #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x | 33 | #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x |
| 34 | 34 | ||
| 35 | static struct event_symbol event_symbols[] = { | 35 | static struct event_symbol event_symbols[] = { |
| 36 | { CHW(CPU_CYCLES), "cpu-cycles", "cycles" }, | 36 | { CHW(CPU_CYCLES), "cpu-cycles", "cycles" }, |
| 37 | { CHW(INSTRUCTIONS), "instructions", "" }, | 37 | { CHW(INSTRUCTIONS), "instructions", "" }, |
| 38 | { CHW(CACHE_REFERENCES), "cache-references", "" }, | 38 | { CHW(CACHE_REFERENCES), "cache-references", "" }, |
| 39 | { CHW(CACHE_MISSES), "cache-misses", "" }, | 39 | { CHW(CACHE_MISSES), "cache-misses", "" }, |
| 40 | { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, | 40 | { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, |
| 41 | { CHW(BRANCH_MISSES), "branch-misses", "" }, | 41 | { CHW(BRANCH_MISSES), "branch-misses", "" }, |
| 42 | { CHW(BUS_CYCLES), "bus-cycles", "" }, | 42 | { CHW(BUS_CYCLES), "bus-cycles", "" }, |
| 43 | 43 | ||
| 44 | { CSW(CPU_CLOCK), "cpu-clock", "" }, | 44 | { CSW(CPU_CLOCK), "cpu-clock", "" }, |
| 45 | { CSW(TASK_CLOCK), "task-clock", "" }, | 45 | { CSW(TASK_CLOCK), "task-clock", "" }, |
| 46 | { CSW(PAGE_FAULTS), "page-faults", "faults" }, | 46 | { CSW(PAGE_FAULTS), "page-faults", "faults" }, |
| 47 | { CSW(PAGE_FAULTS_MIN), "minor-faults", "" }, | 47 | { CSW(PAGE_FAULTS_MIN), "minor-faults", "" }, |
| 48 | { CSW(PAGE_FAULTS_MAJ), "major-faults", "" }, | 48 | { CSW(PAGE_FAULTS_MAJ), "major-faults", "" }, |
| 49 | { CSW(CONTEXT_SWITCHES), "context-switches", "cs" }, | 49 | { CSW(CONTEXT_SWITCHES), "context-switches", "cs" }, |
| 50 | { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, | 50 | { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, |
| 51 | { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" }, | ||
| 52 | { CSW(EMULATION_FAULTS), "emulation-faults", "" }, | ||
| 51 | }; | 53 | }; |
| 52 | 54 | ||
| 53 | #define __PERF_EVENT_FIELD(config, name) \ | 55 | #define __PERF_EVENT_FIELD(config, name) \ |
| 54 | ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT) | 56 | ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT) |
| 55 | 57 | ||
| 56 | #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) | 58 | #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) |
| 57 | #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG) | 59 | #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG) |
| 58 | #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) | 60 | #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) |
| 59 | #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) | 61 | #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) |
| 60 | 62 | ||
| 61 | static const char *hw_event_names[] = { | 63 | static const char *hw_event_names[] = { |
| 62 | "cycles", | 64 | "cycles", |
| 63 | "instructions", | 65 | "instructions", |
| 64 | "cache-references", | 66 | "cache-references", |
| 65 | "cache-misses", | 67 | "cache-misses", |
| 66 | "branches", | 68 | "branches", |
| 67 | "branch-misses", | 69 | "branch-misses", |
| 68 | "bus-cycles", | 70 | "bus-cycles", |
| 69 | }; | 71 | }; |
| 70 | 72 | ||
| 71 | static const char *sw_event_names[] = { | 73 | static const char *sw_event_names[] = { |
| 72 | "cpu-clock-msecs", | 74 | "cpu-clock-msecs", |
| 73 | "task-clock-msecs", | 75 | "task-clock-msecs", |
| 74 | "page-faults", | 76 | "page-faults", |
| 75 | "context-switches", | 77 | "context-switches", |
| 76 | "CPU-migrations", | 78 | "CPU-migrations", |
| 77 | "minor-faults", | 79 | "minor-faults", |
| 78 | "major-faults", | 80 | "major-faults", |
| 81 | "alignment-faults", | ||
| 82 | "emulation-faults", | ||
| 79 | }; | 83 | }; |
| 80 | 84 | ||
| 81 | #define MAX_ALIASES 8 | 85 | #define MAX_ALIASES 8 |
| 82 | 86 | ||
| 83 | static const char *hw_cache[][MAX_ALIASES] = { | 87 | static const char *hw_cache[][MAX_ALIASES] = { |
| 84 | { "L1-dcache", "l1-d", "l1d", "L1-data", }, | 88 | { "L1-dcache", "l1-d", "l1d", "L1-data", }, |
| 85 | { "L1-icache", "l1-i", "l1i", "L1-instruction", }, | 89 | { "L1-icache", "l1-i", "l1i", "L1-instruction", }, |
| 86 | { "LLC", "L2" }, | 90 | { "LLC", "L2" }, |
| 87 | { "dTLB", "d-tlb", "Data-TLB", }, | 91 | { "dTLB", "d-tlb", "Data-TLB", }, |
| 88 | { "iTLB", "i-tlb", "Instruction-TLB", }, | 92 | { "iTLB", "i-tlb", "Instruction-TLB", }, |
| 89 | { "branch", "branches", "bpu", "btb", "bpc", }, | 93 | { "branch", "branches", "bpu", "btb", "bpc", }, |
| 90 | }; | 94 | }; |
| 91 | 95 | ||
| 92 | static const char *hw_cache_op[][MAX_ALIASES] = { | 96 | static const char *hw_cache_op[][MAX_ALIASES] = { |
| 93 | { "load", "loads", "read", }, | 97 | { "load", "loads", "read", }, |
| 94 | { "store", "stores", "write", }, | 98 | { "store", "stores", "write", }, |
| 95 | { "prefetch", "prefetches", "speculative-read", "speculative-load", }, | 99 | { "prefetch", "prefetches", "speculative-read", "speculative-load", }, |
| 96 | }; | 100 | }; |
| 97 | 101 | ||
| 98 | static const char *hw_cache_result[][MAX_ALIASES] = { | 102 | static const char *hw_cache_result[][MAX_ALIASES] = { |
| 99 | { "refs", "Reference", "ops", "access", }, | 103 | { "refs", "Reference", "ops", "access", }, |
| 100 | { "misses", "miss", }, | 104 | { "misses", "miss", }, |
| 101 | }; | 105 | }; |
| 102 | 106 | ||
| 103 | #define C(x) PERF_COUNT_HW_CACHE_##x | 107 | #define C(x) PERF_COUNT_HW_CACHE_##x |
| 104 | #define CACHE_READ (1 << C(OP_READ)) | 108 | #define CACHE_READ (1 << C(OP_READ)) |
| 105 | #define CACHE_WRITE (1 << C(OP_WRITE)) | 109 | #define CACHE_WRITE (1 << C(OP_WRITE)) |
| 106 | #define CACHE_PREFETCH (1 << C(OP_PREFETCH)) | 110 | #define CACHE_PREFETCH (1 << C(OP_PREFETCH)) |
| 107 | #define COP(x) (1 << x) | 111 | #define COP(x) (1 << x) |
| 108 | 112 | ||
| 109 | /* | 113 | /* |
| 110 | * cache operartion stat | 114 | * cache operartion stat |
| 111 | * L1I : Read and prefetch only | 115 | * L1I : Read and prefetch only |
| 112 | * ITLB and BPU : Read-only | 116 | * ITLB and BPU : Read-only |
| 113 | */ | 117 | */ |
| 114 | static unsigned long hw_cache_stat[C(MAX)] = { | 118 | static unsigned long hw_cache_stat[C(MAX)] = { |
| 115 | [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | 119 | [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), |
| 116 | [C(L1I)] = (CACHE_READ | CACHE_PREFETCH), | 120 | [C(L1I)] = (CACHE_READ | CACHE_PREFETCH), |
| 117 | [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | 121 | [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), |
| 118 | [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | 122 | [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), |
| 119 | [C(ITLB)] = (CACHE_READ), | 123 | [C(ITLB)] = (CACHE_READ), |
| 120 | [C(BPU)] = (CACHE_READ), | 124 | [C(BPU)] = (CACHE_READ), |
| 121 | }; | 125 | }; |
| 122 | 126 | ||
| 123 | #define for_each_subsystem(sys_dir, sys_dirent, sys_next) \ | 127 | #define for_each_subsystem(sys_dir, sys_dirent, sys_next) \ |
| 124 | while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \ | 128 | while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \ |
| 125 | if (sys_dirent.d_type == DT_DIR && \ | 129 | if (sys_dirent.d_type == DT_DIR && \ |
| 126 | (strcmp(sys_dirent.d_name, ".")) && \ | 130 | (strcmp(sys_dirent.d_name, ".")) && \ |
| 127 | (strcmp(sys_dirent.d_name, ".."))) | 131 | (strcmp(sys_dirent.d_name, ".."))) |
| 128 | 132 | ||
| 129 | static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) | 133 | static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) |
| 130 | { | 134 | { |
| 131 | char evt_path[MAXPATHLEN]; | 135 | char evt_path[MAXPATHLEN]; |
| 132 | int fd; | 136 | int fd; |
| 133 | 137 | ||
| 134 | snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path, | 138 | snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path, |
| 135 | sys_dir->d_name, evt_dir->d_name); | 139 | sys_dir->d_name, evt_dir->d_name); |
| 136 | fd = open(evt_path, O_RDONLY); | 140 | fd = open(evt_path, O_RDONLY); |
| 137 | if (fd < 0) | 141 | if (fd < 0) |
| 138 | return -EINVAL; | 142 | return -EINVAL; |
| 139 | close(fd); | 143 | close(fd); |
| 140 | 144 | ||
| 141 | return 0; | 145 | return 0; |
| 142 | } | 146 | } |
| 143 | 147 | ||
| 144 | #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \ | 148 | #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \ |
| 145 | while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \ | 149 | while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \ |
| 146 | if (evt_dirent.d_type == DT_DIR && \ | 150 | if (evt_dirent.d_type == DT_DIR && \ |
| 147 | (strcmp(evt_dirent.d_name, ".")) && \ | 151 | (strcmp(evt_dirent.d_name, ".")) && \ |
| 148 | (strcmp(evt_dirent.d_name, "..")) && \ | 152 | (strcmp(evt_dirent.d_name, "..")) && \ |
| 149 | (!tp_event_has_id(&sys_dirent, &evt_dirent))) | 153 | (!tp_event_has_id(&sys_dirent, &evt_dirent))) |
| 150 | 154 | ||
| 151 | #define MAX_EVENT_LENGTH 512 | 155 | #define MAX_EVENT_LENGTH 512 |
| 152 | 156 | ||
| 153 | 157 | ||
| 154 | struct tracepoint_path *tracepoint_id_to_path(u64 config) | 158 | struct tracepoint_path *tracepoint_id_to_path(u64 config) |
| 155 | { | 159 | { |
| 156 | struct tracepoint_path *path = NULL; | 160 | struct tracepoint_path *path = NULL; |
| 157 | DIR *sys_dir, *evt_dir; | 161 | DIR *sys_dir, *evt_dir; |
| 158 | struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; | 162 | struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; |
| 159 | char id_buf[4]; | 163 | char id_buf[4]; |
| 160 | int fd; | 164 | int fd; |
| 161 | u64 id; | 165 | u64 id; |
| 162 | char evt_path[MAXPATHLEN]; | 166 | char evt_path[MAXPATHLEN]; |
| 163 | char dir_path[MAXPATHLEN]; | 167 | char dir_path[MAXPATHLEN]; |
| 164 | 168 | ||
| 165 | if (debugfs_valid_mountpoint(debugfs_path)) | 169 | if (debugfs_valid_mountpoint(debugfs_path)) |
| 166 | return NULL; | 170 | return NULL; |
| 167 | 171 | ||
| 168 | sys_dir = opendir(debugfs_path); | 172 | sys_dir = opendir(debugfs_path); |
| 169 | if (!sys_dir) | 173 | if (!sys_dir) |
| 170 | return NULL; | 174 | return NULL; |
| 171 | 175 | ||
| 172 | for_each_subsystem(sys_dir, sys_dirent, sys_next) { | 176 | for_each_subsystem(sys_dir, sys_dirent, sys_next) { |
| 173 | 177 | ||
| 174 | snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path, | 178 | snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path, |
| 175 | sys_dirent.d_name); | 179 | sys_dirent.d_name); |
| 176 | evt_dir = opendir(dir_path); | 180 | evt_dir = opendir(dir_path); |
| 177 | if (!evt_dir) | 181 | if (!evt_dir) |
| 178 | continue; | 182 | continue; |
| 179 | 183 | ||
| 180 | for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { | 184 | for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { |
| 181 | 185 | ||
| 182 | snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, | 186 | snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, |
| 183 | evt_dirent.d_name); | 187 | evt_dirent.d_name); |
| 184 | fd = open(evt_path, O_RDONLY); | 188 | fd = open(evt_path, O_RDONLY); |
| 185 | if (fd < 0) | 189 | if (fd < 0) |
| 186 | continue; | 190 | continue; |
| 187 | if (read(fd, id_buf, sizeof(id_buf)) < 0) { | 191 | if (read(fd, id_buf, sizeof(id_buf)) < 0) { |
| 188 | close(fd); | 192 | close(fd); |
| 189 | continue; | 193 | continue; |
| 190 | } | 194 | } |
| 191 | close(fd); | 195 | close(fd); |
| 192 | id = atoll(id_buf); | 196 | id = atoll(id_buf); |
| 193 | if (id == config) { | 197 | if (id == config) { |
| 194 | closedir(evt_dir); | 198 | closedir(evt_dir); |
| 195 | closedir(sys_dir); | 199 | closedir(sys_dir); |
| 196 | path = calloc(1, sizeof(path)); | 200 | path = calloc(1, sizeof(path)); |
| 197 | path->system = malloc(MAX_EVENT_LENGTH); | 201 | path->system = malloc(MAX_EVENT_LENGTH); |
| 198 | if (!path->system) { | 202 | if (!path->system) { |
| 199 | free(path); | 203 | free(path); |
| 200 | return NULL; | 204 | return NULL; |
| 201 | } | 205 | } |
| 202 | path->name = malloc(MAX_EVENT_LENGTH); | 206 | path->name = malloc(MAX_EVENT_LENGTH); |
| 203 | if (!path->name) { | 207 | if (!path->name) { |
| 204 | free(path->system); | 208 | free(path->system); |
| 205 | free(path); | 209 | free(path); |
| 206 | return NULL; | 210 | return NULL; |
| 207 | } | 211 | } |
| 208 | strncpy(path->system, sys_dirent.d_name, | 212 | strncpy(path->system, sys_dirent.d_name, |
| 209 | MAX_EVENT_LENGTH); | 213 | MAX_EVENT_LENGTH); |
| 210 | strncpy(path->name, evt_dirent.d_name, | 214 | strncpy(path->name, evt_dirent.d_name, |
| 211 | MAX_EVENT_LENGTH); | 215 | MAX_EVENT_LENGTH); |
| 212 | return path; | 216 | return path; |
| 213 | } | 217 | } |
| 214 | } | 218 | } |
| 215 | closedir(evt_dir); | 219 | closedir(evt_dir); |
| 216 | } | 220 | } |
| 217 | 221 | ||
| 218 | closedir(sys_dir); | 222 | closedir(sys_dir); |
| 219 | return NULL; | 223 | return NULL; |
| 220 | } | 224 | } |
| 221 | 225 | ||
| 222 | #define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1) | 226 | #define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1) |
| 223 | static const char *tracepoint_id_to_name(u64 config) | 227 | static const char *tracepoint_id_to_name(u64 config) |
| 224 | { | 228 | { |
| 225 | static char buf[TP_PATH_LEN]; | 229 | static char buf[TP_PATH_LEN]; |
| 226 | struct tracepoint_path *path; | 230 | struct tracepoint_path *path; |
| 227 | 231 | ||
| 228 | path = tracepoint_id_to_path(config); | 232 | path = tracepoint_id_to_path(config); |
| 229 | if (path) { | 233 | if (path) { |
| 230 | snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name); | 234 | snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name); |
| 231 | free(path->name); | 235 | free(path->name); |
| 232 | free(path->system); | 236 | free(path->system); |
| 233 | free(path); | 237 | free(path); |
| 234 | } else | 238 | } else |
| 235 | snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown"); | 239 | snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown"); |
| 236 | 240 | ||
| 237 | return buf; | 241 | return buf; |
| 238 | } | 242 | } |
| 239 | 243 | ||
| 240 | static int is_cache_op_valid(u8 cache_type, u8 cache_op) | 244 | static int is_cache_op_valid(u8 cache_type, u8 cache_op) |
| 241 | { | 245 | { |
| 242 | if (hw_cache_stat[cache_type] & COP(cache_op)) | 246 | if (hw_cache_stat[cache_type] & COP(cache_op)) |
| 243 | return 1; /* valid */ | 247 | return 1; /* valid */ |
| 244 | else | 248 | else |
| 245 | return 0; /* invalid */ | 249 | return 0; /* invalid */ |
| 246 | } | 250 | } |
| 247 | 251 | ||
| 248 | static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result) | 252 | static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result) |
| 249 | { | 253 | { |
| 250 | static char name[50]; | 254 | static char name[50]; |
| 251 | 255 | ||
| 252 | if (cache_result) { | 256 | if (cache_result) { |
| 253 | sprintf(name, "%s-%s-%s", hw_cache[cache_type][0], | 257 | sprintf(name, "%s-%s-%s", hw_cache[cache_type][0], |
| 254 | hw_cache_op[cache_op][0], | 258 | hw_cache_op[cache_op][0], |
| 255 | hw_cache_result[cache_result][0]); | 259 | hw_cache_result[cache_result][0]); |
| 256 | } else { | 260 | } else { |
| 257 | sprintf(name, "%s-%s", hw_cache[cache_type][0], | 261 | sprintf(name, "%s-%s", hw_cache[cache_type][0], |
| 258 | hw_cache_op[cache_op][1]); | 262 | hw_cache_op[cache_op][1]); |
| 259 | } | 263 | } |
| 260 | 264 | ||
| 261 | return name; | 265 | return name; |
| 262 | } | 266 | } |
| 263 | 267 | ||
| 264 | const char *event_name(int counter) | 268 | const char *event_name(int counter) |
| 265 | { | 269 | { |
| 266 | u64 config = attrs[counter].config; | 270 | u64 config = attrs[counter].config; |
| 267 | int type = attrs[counter].type; | 271 | int type = attrs[counter].type; |
| 268 | 272 | ||
| 269 | return __event_name(type, config); | 273 | return __event_name(type, config); |
| 270 | } | 274 | } |
| 271 | 275 | ||
| 272 | const char *__event_name(int type, u64 config) | 276 | const char *__event_name(int type, u64 config) |
| 273 | { | 277 | { |
| 274 | static char buf[32]; | 278 | static char buf[32]; |
| 275 | 279 | ||
| 276 | if (type == PERF_TYPE_RAW) { | 280 | if (type == PERF_TYPE_RAW) { |
| 277 | sprintf(buf, "raw 0x%llx", config); | 281 | sprintf(buf, "raw 0x%llx", config); |
| 278 | return buf; | 282 | return buf; |
| 279 | } | 283 | } |
| 280 | 284 | ||
| 281 | switch (type) { | 285 | switch (type) { |
| 282 | case PERF_TYPE_HARDWARE: | 286 | case PERF_TYPE_HARDWARE: |
| 283 | if (config < PERF_COUNT_HW_MAX) | 287 | if (config < PERF_COUNT_HW_MAX) |
| 284 | return hw_event_names[config]; | 288 | return hw_event_names[config]; |
| 285 | return "unknown-hardware"; | 289 | return "unknown-hardware"; |
| 286 | 290 | ||
| 287 | case PERF_TYPE_HW_CACHE: { | 291 | case PERF_TYPE_HW_CACHE: { |
| 288 | u8 cache_type, cache_op, cache_result; | 292 | u8 cache_type, cache_op, cache_result; |
| 289 | 293 | ||
| 290 | cache_type = (config >> 0) & 0xff; | 294 | cache_type = (config >> 0) & 0xff; |
| 291 | if (cache_type > PERF_COUNT_HW_CACHE_MAX) | 295 | if (cache_type > PERF_COUNT_HW_CACHE_MAX) |
| 292 | return "unknown-ext-hardware-cache-type"; | 296 | return "unknown-ext-hardware-cache-type"; |
| 293 | 297 | ||
| 294 | cache_op = (config >> 8) & 0xff; | 298 | cache_op = (config >> 8) & 0xff; |
| 295 | if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX) | 299 | if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX) |
| 296 | return "unknown-ext-hardware-cache-op"; | 300 | return "unknown-ext-hardware-cache-op"; |
| 297 | 301 | ||
| 298 | cache_result = (config >> 16) & 0xff; | 302 | cache_result = (config >> 16) & 0xff; |
| 299 | if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX) | 303 | if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX) |
| 300 | return "unknown-ext-hardware-cache-result"; | 304 | return "unknown-ext-hardware-cache-result"; |
| 301 | 305 | ||
| 302 | if (!is_cache_op_valid(cache_type, cache_op)) | 306 | if (!is_cache_op_valid(cache_type, cache_op)) |
| 303 | return "invalid-cache"; | 307 | return "invalid-cache"; |
| 304 | 308 | ||
| 305 | return event_cache_name(cache_type, cache_op, cache_result); | 309 | return event_cache_name(cache_type, cache_op, cache_result); |
| 306 | } | 310 | } |
| 307 | 311 | ||
| 308 | case PERF_TYPE_SOFTWARE: | 312 | case PERF_TYPE_SOFTWARE: |
| 309 | if (config < PERF_COUNT_SW_MAX) | 313 | if (config < PERF_COUNT_SW_MAX) |
| 310 | return sw_event_names[config]; | 314 | return sw_event_names[config]; |
| 311 | return "unknown-software"; | 315 | return "unknown-software"; |
| 312 | 316 | ||
| 313 | case PERF_TYPE_TRACEPOINT: | 317 | case PERF_TYPE_TRACEPOINT: |
| 314 | return tracepoint_id_to_name(config); | 318 | return tracepoint_id_to_name(config); |
| 315 | 319 | ||
| 316 | default: | 320 | default: |
| 317 | break; | 321 | break; |
| 318 | } | 322 | } |
| 319 | 323 | ||
| 320 | return "unknown"; | 324 | return "unknown"; |
| 321 | } | 325 | } |
| 322 | 326 | ||
| 323 | static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int size) | 327 | static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int size) |
| 324 | { | 328 | { |
| 325 | int i, j; | 329 | int i, j; |
| 326 | int n, longest = -1; | 330 | int n, longest = -1; |
| 327 | 331 | ||
| 328 | for (i = 0; i < size; i++) { | 332 | for (i = 0; i < size; i++) { |
| 329 | for (j = 0; j < MAX_ALIASES && names[i][j]; j++) { | 333 | for (j = 0; j < MAX_ALIASES && names[i][j]; j++) { |
| 330 | n = strlen(names[i][j]); | 334 | n = strlen(names[i][j]); |
| 331 | if (n > longest && !strncasecmp(*str, names[i][j], n)) | 335 | if (n > longest && !strncasecmp(*str, names[i][j], n)) |
| 332 | longest = n; | 336 | longest = n; |
| 333 | } | 337 | } |
| 334 | if (longest > 0) { | 338 | if (longest > 0) { |
| 335 | *str += longest; | 339 | *str += longest; |
| 336 | return i; | 340 | return i; |
| 337 | } | 341 | } |
| 338 | } | 342 | } |
| 339 | 343 | ||
| 340 | return -1; | 344 | return -1; |
| 341 | } | 345 | } |
| 342 | 346 | ||
| 343 | static enum event_result | 347 | static enum event_result |
| 344 | parse_generic_hw_event(const char **str, struct perf_event_attr *attr) | 348 | parse_generic_hw_event(const char **str, struct perf_event_attr *attr) |
| 345 | { | 349 | { |
| 346 | const char *s = *str; | 350 | const char *s = *str; |
| 347 | int cache_type = -1, cache_op = -1, cache_result = -1; | 351 | int cache_type = -1, cache_op = -1, cache_result = -1; |
| 348 | 352 | ||
| 349 | cache_type = parse_aliases(&s, hw_cache, PERF_COUNT_HW_CACHE_MAX); | 353 | cache_type = parse_aliases(&s, hw_cache, PERF_COUNT_HW_CACHE_MAX); |
| 350 | /* | 354 | /* |
| 351 | * No fallback - if we cannot get a clear cache type | 355 | * No fallback - if we cannot get a clear cache type |
| 352 | * then bail out: | 356 | * then bail out: |
| 353 | */ | 357 | */ |
| 354 | if (cache_type == -1) | 358 | if (cache_type == -1) |
| 355 | return EVT_FAILED; | 359 | return EVT_FAILED; |
| 356 | 360 | ||
| 357 | while ((cache_op == -1 || cache_result == -1) && *s == '-') { | 361 | while ((cache_op == -1 || cache_result == -1) && *s == '-') { |
| 358 | ++s; | 362 | ++s; |
| 359 | 363 | ||
| 360 | if (cache_op == -1) { | 364 | if (cache_op == -1) { |
| 361 | cache_op = parse_aliases(&s, hw_cache_op, | 365 | cache_op = parse_aliases(&s, hw_cache_op, |
| 362 | PERF_COUNT_HW_CACHE_OP_MAX); | 366 | PERF_COUNT_HW_CACHE_OP_MAX); |
| 363 | if (cache_op >= 0) { | 367 | if (cache_op >= 0) { |
| 364 | if (!is_cache_op_valid(cache_type, cache_op)) | 368 | if (!is_cache_op_valid(cache_type, cache_op)) |
| 365 | return 0; | 369 | return 0; |
| 366 | continue; | 370 | continue; |
| 367 | } | 371 | } |
| 368 | } | 372 | } |
| 369 | 373 | ||
| 370 | if (cache_result == -1) { | 374 | if (cache_result == -1) { |
| 371 | cache_result = parse_aliases(&s, hw_cache_result, | 375 | cache_result = parse_aliases(&s, hw_cache_result, |
| 372 | PERF_COUNT_HW_CACHE_RESULT_MAX); | 376 | PERF_COUNT_HW_CACHE_RESULT_MAX); |
| 373 | if (cache_result >= 0) | 377 | if (cache_result >= 0) |
| 374 | continue; | 378 | continue; |
| 375 | } | 379 | } |
| 376 | 380 | ||
| 377 | /* | 381 | /* |
| 378 | * Can't parse this as a cache op or result, so back up | 382 | * Can't parse this as a cache op or result, so back up |
| 379 | * to the '-'. | 383 | * to the '-'. |
| 380 | */ | 384 | */ |
| 381 | --s; | 385 | --s; |
| 382 | break; | 386 | break; |
| 383 | } | 387 | } |
| 384 | 388 | ||
| 385 | /* | 389 | /* |
| 386 | * Fall back to reads: | 390 | * Fall back to reads: |
| 387 | */ | 391 | */ |
| 388 | if (cache_op == -1) | 392 | if (cache_op == -1) |
| 389 | cache_op = PERF_COUNT_HW_CACHE_OP_READ; | 393 | cache_op = PERF_COUNT_HW_CACHE_OP_READ; |
| 390 | 394 | ||
| 391 | /* | 395 | /* |
| 392 | * Fall back to accesses: | 396 | * Fall back to accesses: |
| 393 | */ | 397 | */ |
| 394 | if (cache_result == -1) | 398 | if (cache_result == -1) |
| 395 | cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; | 399 | cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; |
| 396 | 400 | ||
| 397 | attr->config = cache_type | (cache_op << 8) | (cache_result << 16); | 401 | attr->config = cache_type | (cache_op << 8) | (cache_result << 16); |
| 398 | attr->type = PERF_TYPE_HW_CACHE; | 402 | attr->type = PERF_TYPE_HW_CACHE; |
| 399 | 403 | ||
| 400 | *str = s; | 404 | *str = s; |
| 401 | return EVT_HANDLED; | 405 | return EVT_HANDLED; |
| 402 | } | 406 | } |
| 403 | 407 | ||
| 404 | static enum event_result | 408 | static enum event_result |
| 405 | parse_single_tracepoint_event(char *sys_name, | 409 | parse_single_tracepoint_event(char *sys_name, |
| 406 | const char *evt_name, | 410 | const char *evt_name, |
| 407 | unsigned int evt_length, | 411 | unsigned int evt_length, |
| 408 | char *flags, | 412 | char *flags, |
| 409 | struct perf_event_attr *attr, | 413 | struct perf_event_attr *attr, |
| 410 | const char **strp) | 414 | const char **strp) |
| 411 | { | 415 | { |
| 412 | char evt_path[MAXPATHLEN]; | 416 | char evt_path[MAXPATHLEN]; |
| 413 | char id_buf[4]; | 417 | char id_buf[4]; |
| 414 | u64 id; | 418 | u64 id; |
| 415 | int fd; | 419 | int fd; |
| 416 | 420 | ||
| 417 | if (flags) { | 421 | if (flags) { |
| 418 | if (!strncmp(flags, "record", strlen(flags))) { | 422 | if (!strncmp(flags, "record", strlen(flags))) { |
| 419 | attr->sample_type |= PERF_SAMPLE_RAW; | 423 | attr->sample_type |= PERF_SAMPLE_RAW; |
| 420 | attr->sample_type |= PERF_SAMPLE_TIME; | 424 | attr->sample_type |= PERF_SAMPLE_TIME; |
| 421 | attr->sample_type |= PERF_SAMPLE_CPU; | 425 | attr->sample_type |= PERF_SAMPLE_CPU; |
| 422 | } | 426 | } |
| 423 | } | 427 | } |
| 424 | 428 | ||
| 425 | snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path, | 429 | snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path, |
| 426 | sys_name, evt_name); | 430 | sys_name, evt_name); |
| 427 | 431 | ||
| 428 | fd = open(evt_path, O_RDONLY); | 432 | fd = open(evt_path, O_RDONLY); |
| 429 | if (fd < 0) | 433 | if (fd < 0) |
| 430 | return EVT_FAILED; | 434 | return EVT_FAILED; |
| 431 | 435 | ||
| 432 | if (read(fd, id_buf, sizeof(id_buf)) < 0) { | 436 | if (read(fd, id_buf, sizeof(id_buf)) < 0) { |
| 433 | close(fd); | 437 | close(fd); |
| 434 | return EVT_FAILED; | 438 | return EVT_FAILED; |
| 435 | } | 439 | } |
| 436 | 440 | ||
| 437 | close(fd); | 441 | close(fd); |
| 438 | id = atoll(id_buf); | 442 | id = atoll(id_buf); |
| 439 | attr->config = id; | 443 | attr->config = id; |
| 440 | attr->type = PERF_TYPE_TRACEPOINT; | 444 | attr->type = PERF_TYPE_TRACEPOINT; |
| 441 | *strp = evt_name + evt_length; | 445 | *strp = evt_name + evt_length; |
| 442 | 446 | ||
| 443 | return EVT_HANDLED; | 447 | return EVT_HANDLED; |
| 444 | } | 448 | } |
| 445 | 449 | ||
| 446 | /* sys + ':' + event + ':' + flags*/ | 450 | /* sys + ':' + event + ':' + flags*/ |
| 447 | #define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128) | 451 | #define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128) |
| 448 | static enum event_result | 452 | static enum event_result |
| 449 | parse_subsystem_tracepoint_event(char *sys_name, char *flags) | 453 | parse_subsystem_tracepoint_event(char *sys_name, char *flags) |
| 450 | { | 454 | { |
| 451 | char evt_path[MAXPATHLEN]; | 455 | char evt_path[MAXPATHLEN]; |
| 452 | struct dirent *evt_ent; | 456 | struct dirent *evt_ent; |
| 453 | DIR *evt_dir; | 457 | DIR *evt_dir; |
| 454 | 458 | ||
| 455 | snprintf(evt_path, MAXPATHLEN, "%s/%s", debugfs_path, sys_name); | 459 | snprintf(evt_path, MAXPATHLEN, "%s/%s", debugfs_path, sys_name); |
| 456 | evt_dir = opendir(evt_path); | 460 | evt_dir = opendir(evt_path); |
| 457 | 461 | ||
| 458 | if (!evt_dir) { | 462 | if (!evt_dir) { |
| 459 | perror("Can't open event dir"); | 463 | perror("Can't open event dir"); |
| 460 | return EVT_FAILED; | 464 | return EVT_FAILED; |
| 461 | } | 465 | } |
| 462 | 466 | ||
| 463 | while ((evt_ent = readdir(evt_dir))) { | 467 | while ((evt_ent = readdir(evt_dir))) { |
| 464 | char event_opt[MAX_EVOPT_LEN + 1]; | 468 | char event_opt[MAX_EVOPT_LEN + 1]; |
| 465 | int len; | 469 | int len; |
| 466 | unsigned int rem = MAX_EVOPT_LEN; | 470 | unsigned int rem = MAX_EVOPT_LEN; |
| 467 | 471 | ||
| 468 | if (!strcmp(evt_ent->d_name, ".") | 472 | if (!strcmp(evt_ent->d_name, ".") |
| 469 | || !strcmp(evt_ent->d_name, "..") | 473 | || !strcmp(evt_ent->d_name, "..") |
| 470 | || !strcmp(evt_ent->d_name, "enable") | 474 | || !strcmp(evt_ent->d_name, "enable") |
| 471 | || !strcmp(evt_ent->d_name, "filter")) | 475 | || !strcmp(evt_ent->d_name, "filter")) |
| 472 | continue; | 476 | continue; |
| 473 | 477 | ||
| 474 | len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s", sys_name, | 478 | len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s", sys_name, |
| 475 | evt_ent->d_name); | 479 | evt_ent->d_name); |
| 476 | if (len < 0) | 480 | if (len < 0) |
| 477 | return EVT_FAILED; | 481 | return EVT_FAILED; |
| 478 | 482 | ||
| 479 | rem -= len; | 483 | rem -= len; |
| 480 | if (flags) { | 484 | if (flags) { |
| 481 | if (rem < strlen(flags) + 1) | 485 | if (rem < strlen(flags) + 1) |
| 482 | return EVT_FAILED; | 486 | return EVT_FAILED; |
| 483 | 487 | ||
| 484 | strcat(event_opt, ":"); | 488 | strcat(event_opt, ":"); |
| 485 | strcat(event_opt, flags); | 489 | strcat(event_opt, flags); |
| 486 | } | 490 | } |
| 487 | 491 | ||
| 488 | if (parse_events(NULL, event_opt, 0)) | 492 | if (parse_events(NULL, event_opt, 0)) |
| 489 | return EVT_FAILED; | 493 | return EVT_FAILED; |
| 490 | } | 494 | } |
| 491 | 495 | ||
| 492 | return EVT_HANDLED_ALL; | 496 | return EVT_HANDLED_ALL; |
| 493 | } | 497 | } |
| 494 | 498 | ||
| 495 | 499 | ||
| 496 | static enum event_result parse_tracepoint_event(const char **strp, | 500 | static enum event_result parse_tracepoint_event(const char **strp, |
| 497 | struct perf_event_attr *attr) | 501 | struct perf_event_attr *attr) |
| 498 | { | 502 | { |
| 499 | const char *evt_name; | 503 | const char *evt_name; |
| 500 | char *flags; | 504 | char *flags; |
| 501 | char sys_name[MAX_EVENT_LENGTH]; | 505 | char sys_name[MAX_EVENT_LENGTH]; |
| 502 | unsigned int sys_length, evt_length; | 506 | unsigned int sys_length, evt_length; |
| 503 | 507 | ||
| 504 | if (debugfs_valid_mountpoint(debugfs_path)) | 508 | if (debugfs_valid_mountpoint(debugfs_path)) |
| 505 | return 0; | 509 | return 0; |
| 506 | 510 | ||
| 507 | evt_name = strchr(*strp, ':'); | 511 | evt_name = strchr(*strp, ':'); |
| 508 | if (!evt_name) | 512 | if (!evt_name) |
| 509 | return EVT_FAILED; | 513 | return EVT_FAILED; |
| 510 | 514 | ||
| 511 | sys_length = evt_name - *strp; | 515 | sys_length = evt_name - *strp; |
| 512 | if (sys_length >= MAX_EVENT_LENGTH) | 516 | if (sys_length >= MAX_EVENT_LENGTH) |
| 513 | return 0; | 517 | return 0; |
| 514 | 518 | ||
| 515 | strncpy(sys_name, *strp, sys_length); | 519 | strncpy(sys_name, *strp, sys_length); |
| 516 | sys_name[sys_length] = '\0'; | 520 | sys_name[sys_length] = '\0'; |
| 517 | evt_name = evt_name + 1; | 521 | evt_name = evt_name + 1; |
| 518 | 522 | ||
| 519 | flags = strchr(evt_name, ':'); | 523 | flags = strchr(evt_name, ':'); |
| 520 | if (flags) { | 524 | if (flags) { |
| 521 | /* split it out: */ | 525 | /* split it out: */ |
| 522 | evt_name = strndup(evt_name, flags - evt_name); | 526 | evt_name = strndup(evt_name, flags - evt_name); |
| 523 | flags++; | 527 | flags++; |
| 524 | } | 528 | } |
| 525 | 529 | ||
| 526 | evt_length = strlen(evt_name); | 530 | evt_length = strlen(evt_name); |
| 527 | if (evt_length >= MAX_EVENT_LENGTH) | 531 | if (evt_length >= MAX_EVENT_LENGTH) |
| 528 | return EVT_FAILED; | 532 | return EVT_FAILED; |
| 529 | 533 | ||
| 530 | if (!strcmp(evt_name, "*")) { | 534 | if (!strcmp(evt_name, "*")) { |
| 531 | *strp = evt_name + evt_length; | 535 | *strp = evt_name + evt_length; |
| 532 | return parse_subsystem_tracepoint_event(sys_name, flags); | 536 | return parse_subsystem_tracepoint_event(sys_name, flags); |
| 533 | } else | 537 | } else |
| 534 | return parse_single_tracepoint_event(sys_name, evt_name, | 538 | return parse_single_tracepoint_event(sys_name, evt_name, |
| 535 | evt_length, flags, | 539 | evt_length, flags, |
| 536 | attr, strp); | 540 | attr, strp); |
| 537 | } | 541 | } |
| 538 | 542 | ||
| 539 | static int check_events(const char *str, unsigned int i) | 543 | static int check_events(const char *str, unsigned int i) |
| 540 | { | 544 | { |
| 541 | int n; | 545 | int n; |
| 542 | 546 | ||
| 543 | n = strlen(event_symbols[i].symbol); | 547 | n = strlen(event_symbols[i].symbol); |
| 544 | if (!strncmp(str, event_symbols[i].symbol, n)) | 548 | if (!strncmp(str, event_symbols[i].symbol, n)) |
| 545 | return n; | 549 | return n; |
| 546 | 550 | ||
| 547 | n = strlen(event_symbols[i].alias); | 551 | n = strlen(event_symbols[i].alias); |
| 548 | if (n) | 552 | if (n) |
| 549 | if (!strncmp(str, event_symbols[i].alias, n)) | 553 | if (!strncmp(str, event_symbols[i].alias, n)) |
| 550 | return n; | 554 | return n; |
| 551 | return 0; | 555 | return 0; |
| 552 | } | 556 | } |
| 553 | 557 | ||
| 554 | static enum event_result | 558 | static enum event_result |
| 555 | parse_symbolic_event(const char **strp, struct perf_event_attr *attr) | 559 | parse_symbolic_event(const char **strp, struct perf_event_attr *attr) |
| 556 | { | 560 | { |
| 557 | const char *str = *strp; | 561 | const char *str = *strp; |
| 558 | unsigned int i; | 562 | unsigned int i; |
| 559 | int n; | 563 | int n; |
| 560 | 564 | ||
| 561 | for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { | 565 | for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { |
| 562 | n = check_events(str, i); | 566 | n = check_events(str, i); |
| 563 | if (n > 0) { | 567 | if (n > 0) { |
| 564 | attr->type = event_symbols[i].type; | 568 | attr->type = event_symbols[i].type; |
| 565 | attr->config = event_symbols[i].config; | 569 | attr->config = event_symbols[i].config; |
| 566 | *strp = str + n; | 570 | *strp = str + n; |
| 567 | return EVT_HANDLED; | 571 | return EVT_HANDLED; |
| 568 | } | 572 | } |
| 569 | } | 573 | } |
| 570 | return EVT_FAILED; | 574 | return EVT_FAILED; |
| 571 | } | 575 | } |
| 572 | 576 | ||
| 573 | static enum event_result | 577 | static enum event_result |
| 574 | parse_raw_event(const char **strp, struct perf_event_attr *attr) | 578 | parse_raw_event(const char **strp, struct perf_event_attr *attr) |
| 575 | { | 579 | { |
| 576 | const char *str = *strp; | 580 | const char *str = *strp; |
| 577 | u64 config; | 581 | u64 config; |
| 578 | int n; | 582 | int n; |
| 579 | 583 | ||
| 580 | if (*str != 'r') | 584 | if (*str != 'r') |
| 581 | return EVT_FAILED; | 585 | return EVT_FAILED; |
| 582 | n = hex2u64(str + 1, &config); | 586 | n = hex2u64(str + 1, &config); |
| 583 | if (n > 0) { | 587 | if (n > 0) { |
| 584 | *strp = str + n + 1; | 588 | *strp = str + n + 1; |
| 585 | attr->type = PERF_TYPE_RAW; | 589 | attr->type = PERF_TYPE_RAW; |
| 586 | attr->config = config; | 590 | attr->config = config; |
| 587 | return EVT_HANDLED; | 591 | return EVT_HANDLED; |
| 588 | } | 592 | } |
| 589 | return EVT_FAILED; | 593 | return EVT_FAILED; |
| 590 | } | 594 | } |
| 591 | 595 | ||
| 592 | static enum event_result | 596 | static enum event_result |
| 593 | parse_numeric_event(const char **strp, struct perf_event_attr *attr) | 597 | parse_numeric_event(const char **strp, struct perf_event_attr *attr) |
| 594 | { | 598 | { |
| 595 | const char *str = *strp; | 599 | const char *str = *strp; |
| 596 | char *endp; | 600 | char *endp; |
| 597 | unsigned long type; | 601 | unsigned long type; |
| 598 | u64 config; | 602 | u64 config; |
| 599 | 603 | ||
| 600 | type = strtoul(str, &endp, 0); | 604 | type = strtoul(str, &endp, 0); |
| 601 | if (endp > str && type < PERF_TYPE_MAX && *endp == ':') { | 605 | if (endp > str && type < PERF_TYPE_MAX && *endp == ':') { |
| 602 | str = endp + 1; | 606 | str = endp + 1; |
| 603 | config = strtoul(str, &endp, 0); | 607 | config = strtoul(str, &endp, 0); |
| 604 | if (endp > str) { | 608 | if (endp > str) { |
| 605 | attr->type = type; | 609 | attr->type = type; |
| 606 | attr->config = config; | 610 | attr->config = config; |
| 607 | *strp = endp; | 611 | *strp = endp; |
| 608 | return EVT_HANDLED; | 612 | return EVT_HANDLED; |
| 609 | } | 613 | } |
| 610 | } | 614 | } |
| 611 | return EVT_FAILED; | 615 | return EVT_FAILED; |
| 612 | } | 616 | } |
| 613 | 617 | ||
| 614 | static enum event_result | 618 | static enum event_result |
| 615 | parse_event_modifier(const char **strp, struct perf_event_attr *attr) | 619 | parse_event_modifier(const char **strp, struct perf_event_attr *attr) |
| 616 | { | 620 | { |
| 617 | const char *str = *strp; | 621 | const char *str = *strp; |
| 618 | int eu = 1, ek = 1, eh = 1; | 622 | int eu = 1, ek = 1, eh = 1; |
| 619 | 623 | ||
| 620 | if (*str++ != ':') | 624 | if (*str++ != ':') |
| 621 | return 0; | 625 | return 0; |
| 622 | while (*str) { | 626 | while (*str) { |
| 623 | if (*str == 'u') | 627 | if (*str == 'u') |
| 624 | eu = 0; | 628 | eu = 0; |
| 625 | else if (*str == 'k') | 629 | else if (*str == 'k') |
| 626 | ek = 0; | 630 | ek = 0; |
| 627 | else if (*str == 'h') | 631 | else if (*str == 'h') |
| 628 | eh = 0; | 632 | eh = 0; |
| 629 | else | 633 | else |
| 630 | break; | 634 | break; |
| 631 | ++str; | 635 | ++str; |
| 632 | } | 636 | } |
| 633 | if (str >= *strp + 2) { | 637 | if (str >= *strp + 2) { |
| 634 | *strp = str; | 638 | *strp = str; |
| 635 | attr->exclude_user = eu; | 639 | attr->exclude_user = eu; |
| 636 | attr->exclude_kernel = ek; | 640 | attr->exclude_kernel = ek; |
| 637 | attr->exclude_hv = eh; | 641 | attr->exclude_hv = eh; |
| 638 | return 1; | 642 | return 1; |
| 639 | } | 643 | } |
| 640 | return 0; | 644 | return 0; |
| 641 | } | 645 | } |
| 642 | 646 | ||
| 643 | /* | 647 | /* |
| 644 | * Each event can have multiple symbolic names. | 648 | * Each event can have multiple symbolic names. |
| 645 | * Symbolic names are (almost) exactly matched. | 649 | * Symbolic names are (almost) exactly matched. |
| 646 | */ | 650 | */ |
| 647 | static enum event_result | 651 | static enum event_result |
| 648 | parse_event_symbols(const char **str, struct perf_event_attr *attr) | 652 | parse_event_symbols(const char **str, struct perf_event_attr *attr) |
| 649 | { | 653 | { |
| 650 | enum event_result ret; | 654 | enum event_result ret; |
| 651 | 655 | ||
| 652 | ret = parse_tracepoint_event(str, attr); | 656 | ret = parse_tracepoint_event(str, attr); |
| 653 | if (ret != EVT_FAILED) | 657 | if (ret != EVT_FAILED) |
| 654 | goto modifier; | 658 | goto modifier; |
| 655 | 659 | ||
| 656 | ret = parse_raw_event(str, attr); | 660 | ret = parse_raw_event(str, attr); |
| 657 | if (ret != EVT_FAILED) | 661 | if (ret != EVT_FAILED) |
| 658 | goto modifier; | 662 | goto modifier; |
| 659 | 663 | ||
| 660 | ret = parse_numeric_event(str, attr); | 664 | ret = parse_numeric_event(str, attr); |
| 661 | if (ret != EVT_FAILED) | 665 | if (ret != EVT_FAILED) |
| 662 | goto modifier; | 666 | goto modifier; |
| 663 | 667 | ||
| 664 | ret = parse_symbolic_event(str, attr); | 668 | ret = parse_symbolic_event(str, attr); |
| 665 | if (ret != EVT_FAILED) | 669 | if (ret != EVT_FAILED) |
| 666 | goto modifier; | 670 | goto modifier; |
| 667 | 671 | ||
| 668 | ret = parse_generic_hw_event(str, attr); | 672 | ret = parse_generic_hw_event(str, attr); |
| 669 | if (ret != EVT_FAILED) | 673 | if (ret != EVT_FAILED) |
| 670 | goto modifier; | 674 | goto modifier; |
| 671 | 675 | ||
| 672 | fprintf(stderr, "invalid or unsupported event: '%s'\n", *str); | 676 | fprintf(stderr, "invalid or unsupported event: '%s'\n", *str); |
| 673 | fprintf(stderr, "Run 'perf list' for a list of valid events\n"); | 677 | fprintf(stderr, "Run 'perf list' for a list of valid events\n"); |
| 674 | return EVT_FAILED; | 678 | return EVT_FAILED; |
| 675 | 679 | ||
| 676 | modifier: | 680 | modifier: |
| 677 | parse_event_modifier(str, attr); | 681 | parse_event_modifier(str, attr); |
| 678 | 682 | ||
| 679 | return ret; | 683 | return ret; |
| 680 | } | 684 | } |
| 681 | 685 | ||
| 682 | static void store_event_type(const char *orgname) | 686 | static void store_event_type(const char *orgname) |
| 683 | { | 687 | { |
| 684 | char filename[PATH_MAX], *c; | 688 | char filename[PATH_MAX], *c; |
| 685 | FILE *file; | 689 | FILE *file; |
| 686 | int id; | 690 | int id; |
| 687 | 691 | ||
| 688 | sprintf(filename, "%s/", debugfs_path); | 692 | sprintf(filename, "%s/", debugfs_path); |
| 689 | strncat(filename, orgname, strlen(orgname)); | 693 | strncat(filename, orgname, strlen(orgname)); |
| 690 | strcat(filename, "/id"); | 694 | strcat(filename, "/id"); |
| 691 | 695 | ||
| 692 | c = strchr(filename, ':'); | 696 | c = strchr(filename, ':'); |
| 693 | if (c) | 697 | if (c) |
| 694 | *c = '/'; | 698 | *c = '/'; |
| 695 | 699 | ||
| 696 | file = fopen(filename, "r"); | 700 | file = fopen(filename, "r"); |
| 697 | if (!file) | 701 | if (!file) |
| 698 | return; | 702 | return; |
| 699 | if (fscanf(file, "%i", &id) < 1) | 703 | if (fscanf(file, "%i", &id) < 1) |
| 700 | die("cannot store event ID"); | 704 | die("cannot store event ID"); |
| 701 | fclose(file); | 705 | fclose(file); |
| 702 | perf_header__push_event(id, orgname); | 706 | perf_header__push_event(id, orgname); |
| 703 | } | 707 | } |
| 704 | 708 | ||
| 705 | int parse_events(const struct option *opt __used, const char *str, int unset __used) | 709 | int parse_events(const struct option *opt __used, const char *str, int unset __used) |
| 706 | { | 710 | { |
| 707 | struct perf_event_attr attr; | 711 | struct perf_event_attr attr; |
| 708 | enum event_result ret; | 712 | enum event_result ret; |
| 709 | 713 | ||
| 710 | if (strchr(str, ':')) | 714 | if (strchr(str, ':')) |
| 711 | store_event_type(str); | 715 | store_event_type(str); |
| 712 | 716 | ||
| 713 | for (;;) { | 717 | for (;;) { |
| 714 | if (nr_counters == MAX_COUNTERS) | 718 | if (nr_counters == MAX_COUNTERS) |
| 715 | return -1; | 719 | return -1; |
| 716 | 720 | ||
| 717 | memset(&attr, 0, sizeof(attr)); | 721 | memset(&attr, 0, sizeof(attr)); |
| 718 | ret = parse_event_symbols(&str, &attr); | 722 | ret = parse_event_symbols(&str, &attr); |
| 719 | if (ret == EVT_FAILED) | 723 | if (ret == EVT_FAILED) |
| 720 | return -1; | 724 | return -1; |
| 721 | 725 | ||
| 722 | if (!(*str == 0 || *str == ',' || isspace(*str))) | 726 | if (!(*str == 0 || *str == ',' || isspace(*str))) |
| 723 | return -1; | 727 | return -1; |
| 724 | 728 | ||
| 725 | if (ret != EVT_HANDLED_ALL) { | 729 | if (ret != EVT_HANDLED_ALL) { |
| 726 | attrs[nr_counters] = attr; | 730 | attrs[nr_counters] = attr; |
| 727 | nr_counters++; | 731 | nr_counters++; |
| 728 | } | 732 | } |
| 729 | 733 | ||
| 730 | if (*str == 0) | 734 | if (*str == 0) |
| 731 | break; | 735 | break; |
| 732 | if (*str == ',') | 736 | if (*str == ',') |
| 733 | ++str; | 737 | ++str; |
| 734 | while (isspace(*str)) | 738 | while (isspace(*str)) |
| 735 | ++str; | 739 | ++str; |
| 736 | } | 740 | } |
| 737 | 741 | ||
| 738 | return 0; | 742 | return 0; |
| 739 | } | 743 | } |
| 740 | 744 | ||
| 741 | int parse_filter(const struct option *opt __used, const char *str, | 745 | int parse_filter(const struct option *opt __used, const char *str, |
| 742 | int unset __used) | 746 | int unset __used) |
| 743 | { | 747 | { |
| 744 | int i = nr_counters - 1; | 748 | int i = nr_counters - 1; |
| 745 | int len = strlen(str); | 749 | int len = strlen(str); |
| 746 | 750 | ||
| 747 | if (i < 0 || attrs[i].type != PERF_TYPE_TRACEPOINT) { | 751 | if (i < 0 || attrs[i].type != PERF_TYPE_TRACEPOINT) { |
| 748 | fprintf(stderr, | 752 | fprintf(stderr, |
| 749 | "-F option should follow a -e tracepoint option\n"); | 753 | "-F option should follow a -e tracepoint option\n"); |
| 750 | return -1; | 754 | return -1; |
| 751 | } | 755 | } |
| 752 | 756 | ||
| 753 | filters[i] = malloc(len + 1); | 757 | filters[i] = malloc(len + 1); |
| 754 | if (!filters[i]) { | 758 | if (!filters[i]) { |
| 755 | fprintf(stderr, "not enough memory to hold filter string\n"); | 759 | fprintf(stderr, "not enough memory to hold filter string\n"); |
| 756 | return -1; | 760 | return -1; |
| 757 | } | 761 | } |
| 758 | strcpy(filters[i], str); | 762 | strcpy(filters[i], str); |
| 759 | 763 | ||
| 760 | return 0; | 764 | return 0; |
| 761 | } | 765 | } |
| 762 | 766 | ||
| 763 | static const char * const event_type_descriptors[] = { | 767 | static const char * const event_type_descriptors[] = { |
| 764 | "", | 768 | "", |
| 765 | "Hardware event", | 769 | "Hardware event", |
| 766 | "Software event", | 770 | "Software event", |
| 767 | "Tracepoint event", | 771 | "Tracepoint event", |
| 768 | "Hardware cache event", | 772 | "Hardware cache event", |
| 769 | }; | 773 | }; |
| 770 | 774 | ||
| 771 | /* | 775 | /* |
| 772 | * Print the events from <debugfs_mount_point>/tracing/events | 776 | * Print the events from <debugfs_mount_point>/tracing/events |
| 773 | */ | 777 | */ |
| 774 | 778 | ||
| 775 | static void print_tracepoint_events(void) | 779 | static void print_tracepoint_events(void) |
| 776 | { | 780 | { |
| 777 | DIR *sys_dir, *evt_dir; | 781 | DIR *sys_dir, *evt_dir; |
| 778 | struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; | 782 | struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; |
| 779 | char evt_path[MAXPATHLEN]; | 783 | char evt_path[MAXPATHLEN]; |
| 780 | char dir_path[MAXPATHLEN]; | 784 | char dir_path[MAXPATHLEN]; |
| 781 | 785 | ||
| 782 | if (debugfs_valid_mountpoint(debugfs_path)) | 786 | if (debugfs_valid_mountpoint(debugfs_path)) |
| 783 | return; | 787 | return; |
| 784 | 788 | ||
| 785 | sys_dir = opendir(debugfs_path); | 789 | sys_dir = opendir(debugfs_path); |
| 786 | if (!sys_dir) | 790 | if (!sys_dir) |
| 787 | return; | 791 | return; |
| 788 | 792 | ||
| 789 | for_each_subsystem(sys_dir, sys_dirent, sys_next) { | 793 | for_each_subsystem(sys_dir, sys_dirent, sys_next) { |
| 790 | 794 | ||
| 791 | snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path, | 795 | snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path, |
| 792 | sys_dirent.d_name); | 796 | sys_dirent.d_name); |
| 793 | evt_dir = opendir(dir_path); | 797 | evt_dir = opendir(dir_path); |
| 794 | if (!evt_dir) | 798 | if (!evt_dir) |
| 795 | continue; | 799 | continue; |
| 796 | 800 | ||
| 797 | for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { | 801 | for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { |
| 798 | snprintf(evt_path, MAXPATHLEN, "%s:%s", | 802 | snprintf(evt_path, MAXPATHLEN, "%s:%s", |
| 799 | sys_dirent.d_name, evt_dirent.d_name); | 803 | sys_dirent.d_name, evt_dirent.d_name); |
| 800 | printf(" %-42s [%s]\n", evt_path, | 804 | printf(" %-42s [%s]\n", evt_path, |
| 801 | event_type_descriptors[PERF_TYPE_TRACEPOINT+1]); | 805 | event_type_descriptors[PERF_TYPE_TRACEPOINT+1]); |
| 802 | } | 806 | } |
| 803 | closedir(evt_dir); | 807 | closedir(evt_dir); |
| 804 | } | 808 | } |
| 805 | closedir(sys_dir); | 809 | closedir(sys_dir); |
| 806 | } | 810 | } |
| 807 | 811 | ||
| 808 | /* | 812 | /* |
| 809 | * Print the help text for the event symbols: | 813 | * Print the help text for the event symbols: |
| 810 | */ | 814 | */ |
| 811 | void print_events(void) | 815 | void print_events(void) |
| 812 | { | 816 | { |
| 813 | struct event_symbol *syms = event_symbols; | 817 | struct event_symbol *syms = event_symbols; |
| 814 | unsigned int i, type, op, prev_type = -1; | 818 | unsigned int i, type, op, prev_type = -1; |
| 815 | char name[40]; | 819 | char name[40]; |
| 816 | 820 | ||
| 817 | printf("\n"); | 821 | printf("\n"); |
| 818 | printf("List of pre-defined events (to be used in -e):\n"); | 822 | printf("List of pre-defined events (to be used in -e):\n"); |
| 819 | 823 | ||
| 820 | for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { | 824 | for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { |
| 821 | type = syms->type + 1; | 825 | type = syms->type + 1; |
| 822 | if (type >= ARRAY_SIZE(event_type_descriptors)) | 826 | if (type >= ARRAY_SIZE(event_type_descriptors)) |
| 823 | type = 0; | 827 | type = 0; |
| 824 | 828 | ||
| 825 | if (type != prev_type) | 829 | if (type != prev_type) |
| 826 | printf("\n"); | 830 | printf("\n"); |
| 827 | 831 | ||
| 828 | if (strlen(syms->alias)) | 832 | if (strlen(syms->alias)) |
| 829 | sprintf(name, "%s OR %s", syms->symbol, syms->alias); | 833 | sprintf(name, "%s OR %s", syms->symbol, syms->alias); |
| 830 | else | 834 | else |
| 831 | strcpy(name, syms->symbol); | 835 | strcpy(name, syms->symbol); |
| 832 | printf(" %-42s [%s]\n", name, | 836 | printf(" %-42s [%s]\n", name, |
| 833 | event_type_descriptors[type]); | 837 | event_type_descriptors[type]); |
| 834 | 838 | ||
| 835 | prev_type = type; | 839 | prev_type = type; |
| 836 | } | 840 | } |
| 837 | 841 | ||
| 838 | printf("\n"); | 842 | printf("\n"); |
| 839 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { | 843 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { |
| 840 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { | 844 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { |
| 841 | /* skip invalid cache type */ | 845 | /* skip invalid cache type */ |
| 842 | if (!is_cache_op_valid(type, op)) | 846 | if (!is_cache_op_valid(type, op)) |
| 843 | continue; | 847 | continue; |
| 844 | 848 | ||
| 845 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { | 849 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { |
| 846 | printf(" %-42s [%s]\n", | 850 | printf(" %-42s [%s]\n", |
| 847 | event_cache_name(type, op, i), | 851 | event_cache_name(type, op, i), |
| 848 | event_type_descriptors[4]); | 852 | event_type_descriptors[4]); |
| 849 | } | 853 | } |
| 850 | } | 854 | } |
| 851 | } | 855 | } |
| 852 | 856 | ||
| 853 | printf("\n"); | 857 | printf("\n"); |
| 854 | printf(" %-42s [raw hardware event descriptor]\n", | 858 | printf(" %-42s [raw hardware event descriptor]\n", |
| 855 | "rNNN"); | 859 | "rNNN"); |
| 856 | printf("\n"); | 860 | printf("\n"); |
| 857 | 861 | ||
| 858 | print_tracepoint_events(); | 862 | print_tracepoint_events(); |
| 859 | 863 | ||
| 860 | exit(129); | 864 | exit(129); |
| 861 | } | 865 | } |
| 862 | 866 |