Commit 79c4581262e225a7c96d88b632b05ab3b5e9a52c
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (92 commits) powerpc: Remove unused 'protect4gb' boot parameter powerpc: Build-in e1000e for pseries & ppc64_defconfig powerpc/pseries: Make request_ras_irqs() available to other pseries code powerpc/numa: Use ibm,architecture-vec-5 to detect form 1 affinity powerpc/numa: Set a smaller value for RECLAIM_DISTANCE to enable zone reclaim powerpc: Use smt_snooze_delay=-1 to always busy loop powerpc: Remove check of ibm,smt-snooze-delay OF property powerpc/kdump: Fix race in kdump shutdown powerpc/kexec: Fix race in kexec shutdown powerpc/kexec: Speedup kexec hash PTE tear down powerpc/pseries: Add hcall to read 4 ptes at a time in real mode powerpc: Use more accurate limit for first segment memory allocations powerpc/kdump: Use chip->shutdown to disable IRQs powerpc/kdump: CPUs assume the context of the oopsing CPU powerpc/crashdump: Do not fail on NULL pointer dereferencing powerpc/eeh: Fix oops when probing in early boot powerpc/pci: Check devices status property when scanning OF tree powerpc/vio: Switch VIO Bus PM to use generic helpers powerpc: Avoid bad relocations in iSeries code powerpc: Use common cpu_die (fixes SMP+SUSPEND build) ...
Showing 118 changed files Side-by-side Diff
- Documentation/powerpc/dts-bindings/4xx/reboot.txt
- Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt
- arch/powerpc/Kconfig
- arch/powerpc/Kconfig.debug
- arch/powerpc/boot/Makefile
- arch/powerpc/boot/dts/iss4xx-mpic.dts
- arch/powerpc/boot/dts/iss4xx.dts
- arch/powerpc/boot/dts/mpc8315erdb.dts
- arch/powerpc/boot/dts/mpc8377_rdb.dts
- arch/powerpc/boot/dts/mpc8378_rdb.dts
- arch/powerpc/boot/dts/mpc8379_rdb.dts
- arch/powerpc/boot/dts/p1020rdb.dts
- arch/powerpc/boot/treeboot-iss4xx.c
- arch/powerpc/boot/wrapper
- arch/powerpc/configs/44x/iss476-smp_defconfig
- arch/powerpc/configs/ppc64_defconfig
- arch/powerpc/configs/pseries_defconfig
- arch/powerpc/include/asm/cache.h
- arch/powerpc/include/asm/cputable.h
- arch/powerpc/include/asm/hvcall.h
- arch/powerpc/include/asm/kexec.h
- arch/powerpc/include/asm/mmu-44x.h
- arch/powerpc/include/asm/mmu.h
- arch/powerpc/include/asm/mmzone.h
- arch/powerpc/include/asm/mpic.h
- arch/powerpc/include/asm/paca.h
- arch/powerpc/include/asm/parport.h
- arch/powerpc/include/asm/pgalloc-64.h
- arch/powerpc/include/asm/pgtable-ppc32.h
- arch/powerpc/include/asm/ptrace.h
- arch/powerpc/include/asm/reg.h
- arch/powerpc/include/asm/reg_booke.h
- arch/powerpc/include/asm/smp.h
- arch/powerpc/include/asm/topology.h
- arch/powerpc/kernel/asm-offsets.c
- arch/powerpc/kernel/cputable.c
- arch/powerpc/kernel/crash.c
- arch/powerpc/kernel/entry_32.S
- arch/powerpc/kernel/exceptions-64s.S
- arch/powerpc/kernel/head_44x.S
- arch/powerpc/kernel/head_8xx.S
- arch/powerpc/kernel/head_booke.h
- arch/powerpc/kernel/head_fsl_booke.S
- arch/powerpc/kernel/iommu.c
- arch/powerpc/kernel/irq.c
- arch/powerpc/kernel/kprobes.c
- arch/powerpc/kernel/lparcfg.c
- arch/powerpc/kernel/machine_kexec_64.c
- arch/powerpc/kernel/misc_32.S
- arch/powerpc/kernel/misc_64.S
- arch/powerpc/kernel/paca.c
- arch/powerpc/kernel/pci_of_scan.c
- arch/powerpc/kernel/process.c
- arch/powerpc/kernel/ptrace.c
- arch/powerpc/kernel/rtas.c
- arch/powerpc/kernel/rtasd.c
- arch/powerpc/kernel/setup-common.c
- arch/powerpc/kernel/setup_64.c
- arch/powerpc/kernel/smp.c
- arch/powerpc/kernel/sysfs.c
- arch/powerpc/kernel/traps.c
- arch/powerpc/kernel/vio.c
- arch/powerpc/lib/string.S
- arch/powerpc/mm/44x_mmu.c
- arch/powerpc/mm/fault.c
- arch/powerpc/mm/fsl_booke_mmu.c
- arch/powerpc/mm/init_64.c
- arch/powerpc/mm/mmu_context_nohash.c
- arch/powerpc/mm/mmu_decl.h
- arch/powerpc/mm/numa.c
- arch/powerpc/mm/pgtable_32.c
- arch/powerpc/mm/pgtable_64.c
- arch/powerpc/mm/tlb_nohash_low.S
- arch/powerpc/platforms/44x/Kconfig
- arch/powerpc/platforms/44x/Makefile
- arch/powerpc/platforms/44x/iss4xx.c
- arch/powerpc/platforms/83xx/mpc831x_rdb.c
- arch/powerpc/platforms/83xx/mpc837x_rdb.c
- arch/powerpc/platforms/86xx/mpc8610_hpcd.c
- arch/powerpc/platforms/Kconfig.cputype
- arch/powerpc/platforms/cell/cbe_cpufreq.c
- arch/powerpc/platforms/iseries/exception.S
- arch/powerpc/platforms/iseries/pci.c
- arch/powerpc/platforms/iseries/smp.c
- arch/powerpc/platforms/pasemi/cpufreq.c
- arch/powerpc/platforms/powermac/cpufreq_64.c
- arch/powerpc/platforms/powermac/low_i2c.c
- arch/powerpc/platforms/powermac/pmac.h
- arch/powerpc/platforms/powermac/setup.c
- arch/powerpc/platforms/powermac/smp.c
- arch/powerpc/platforms/pseries/Makefile
- arch/powerpc/platforms/pseries/dlpar.c
- arch/powerpc/platforms/pseries/eeh.c
- arch/powerpc/platforms/pseries/event_sources.c
- arch/powerpc/platforms/pseries/hotplug-cpu.c
- arch/powerpc/platforms/pseries/hvCall.S
- arch/powerpc/platforms/pseries/lpar.c
- arch/powerpc/platforms/pseries/plpar_wrappers.h
- arch/powerpc/platforms/pseries/pseries.h
- arch/powerpc/platforms/pseries/ras.c
- arch/powerpc/platforms/pseries/setup.c
- arch/powerpc/platforms/pseries/smp.c
- arch/powerpc/platforms/pseries/xics.c
- arch/powerpc/sysdev/mpc8xxx_gpio.c
- arch/powerpc/sysdev/mpic.c
- arch/powerpc/sysdev/ppc4xx_soc.c
- drivers/macintosh/macio-adb.c
- drivers/macintosh/smu.c
- drivers/macintosh/therm_adt746x.c
- drivers/macintosh/windfarm_pm81.c
- drivers/macintosh/windfarm_pm91.c
- drivers/misc/Makefile
- drivers/misc/hdpuftrs/Makefile
- drivers/misc/hdpuftrs/hdpu_cpustate.c
- drivers/misc/hdpuftrs/hdpu_nexus.c
- drivers/serial/mpsc.c
- include/linux/hdpu_features.h
- sound/aoa/core/gpio-pmf.c
Documentation/powerpc/dts-bindings/4xx/reboot.txt
1 | +Reboot property to control system reboot on PPC4xx systems: | |
2 | + | |
3 | +By setting "reset_type" to one of the following values, the default | |
4 | +software reset mechanism may be overidden. Here the possible values of | |
5 | +"reset_type": | |
6 | + | |
7 | + 1 - PPC4xx core reset | |
8 | + 2 - PPC4xx chip reset | |
9 | + 3 - PPC4xx system reset (default) | |
10 | + | |
11 | +Example: | |
12 | + | |
13 | + cpu@0 { | |
14 | + device_type = "cpu"; | |
15 | + model = "PowerPC,440SPe"; | |
16 | + ... | |
17 | + reset-type = <2>; /* Use chip-reset */ | |
18 | + }; |
Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt
... | ... | @@ -11,7 +11,7 @@ |
11 | 11 | 83xx, "fsl,mpc8572-gpio" for 85xx and "fsl,mpc8610-gpio" for 86xx. |
12 | 12 | - #gpio-cells : Should be two. The first cell is the pin number and the |
13 | 13 | second cell is used to specify optional parameters (currently unused). |
14 | - - interrupts : Interrupt mapping for GPIO IRQ (currently unused). | |
14 | + - interrupts : Interrupt mapping for GPIO IRQ. | |
15 | 15 | - interrupt-parent : Phandle for the interrupt controller that |
16 | 16 | services interrupts for this device. |
17 | 17 | - gpio-controller : Marks the port as GPIO controller. |
... | ... | @@ -38,4 +38,24 @@ |
38 | 38 | |
39 | 39 | See booting-without-of.txt for details of how to specify GPIO |
40 | 40 | information for devices. |
41 | + | |
42 | +To use GPIO pins as interrupt sources for peripherals, specify the | |
43 | +GPIO controller as the interrupt parent and define GPIO number + | |
44 | +trigger mode using the interrupts property, which is defined like | |
45 | +this: | |
46 | + | |
47 | +interrupts = <number trigger>, where: | |
48 | + - number: GPIO pin (0..31) | |
49 | + - trigger: trigger mode: | |
50 | + 2 = trigger on falling edge | |
51 | + 3 = trigger on both edges | |
52 | + | |
53 | +Example of device using this is: | |
54 | + | |
55 | + funkyfpga@0 { | |
56 | + compatible = "funky-fpga"; | |
57 | + ... | |
58 | + interrupts = <4 3>; | |
59 | + interrupt-parent = <&gpio1>; | |
60 | + }; |
arch/powerpc/Kconfig
arch/powerpc/Kconfig.debug
... | ... | @@ -44,6 +44,18 @@ |
44 | 44 | |
45 | 45 | This option will slow down process creation somewhat. |
46 | 46 | |
47 | +config DEBUG_PER_CPU_MAPS | |
48 | + bool "Debug access to per_cpu maps" | |
49 | + depends on DEBUG_KERNEL | |
50 | + depends on SMP | |
51 | + default n | |
52 | + ---help--- | |
53 | + Say Y to verify that the per_cpu map being accessed has | |
54 | + been setup. Adds a fair amount of code to kernel memory | |
55 | + and decreases performance. | |
56 | + | |
57 | + Say N if unsure. | |
58 | + | |
47 | 59 | config HCALL_STATS |
48 | 60 | bool "Hypervisor call instrumentation" |
49 | 61 | depends on PPC_PSERIES && DEBUG_FS && TRACEPOINTS |
arch/powerpc/boot/Makefile
... | ... | @@ -44,6 +44,7 @@ |
44 | 44 | $(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405 |
45 | 45 | $(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405 |
46 | 46 | $(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405 |
47 | +$(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405 | |
47 | 48 | $(obj)/virtex405-head.o: BOOTAFLAGS += -mcpu=405 |
48 | 49 | |
49 | 50 | |
... | ... | @@ -77,7 +78,7 @@ |
77 | 78 | cuboot-warp.c cuboot-85xx-cpm2.c cuboot-yosemite.c simpleboot.c \ |
78 | 79 | virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c \ |
79 | 80 | cuboot-acadia.c cuboot-amigaone.c cuboot-kilauea.c \ |
80 | - gamecube-head.S gamecube.c wii-head.S wii.c | |
81 | + gamecube-head.S gamecube.c wii-head.S wii.c treeboot-iss4xx.c | |
81 | 82 | src-boot := $(src-wlib) $(src-plat) empty.c |
82 | 83 | |
83 | 84 | src-boot := $(addprefix $(obj)/, $(src-boot)) |
... | ... | @@ -169,7 +170,7 @@ |
169 | 170 | $(if $3, -s $3)$(if $4, -d $4)$(if $5, -i $5) vmlinux |
170 | 171 | |
171 | 172 | image-$(CONFIG_PPC_PSERIES) += zImage.pseries |
172 | -image-$(CONFIG_PPC_MAPLE) += zImage.pseries | |
173 | +image-$(CONFIG_PPC_MAPLE) += zImage.maple | |
173 | 174 | image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries |
174 | 175 | image-$(CONFIG_PPC_PS3) += dtbImage.ps3 |
175 | 176 | image-$(CONFIG_PPC_CELLEB) += zImage.pseries |
... | ... | @@ -206,6 +207,8 @@ |
206 | 207 | image-$(CONFIG_KATMAI) += cuImage.katmai |
207 | 208 | image-$(CONFIG_WARP) += cuImage.warp |
208 | 209 | image-$(CONFIG_YOSEMITE) += cuImage.yosemite |
210 | +image-$(CONFIG_ISS4xx) += treeImage.iss4xx \ | |
211 | + treeImage.iss4xx-mpic | |
209 | 212 | |
210 | 213 | # Board ports in arch/powerpc/platform/8xx/Kconfig |
211 | 214 | image-$(CONFIG_MPC86XADS) += cuImage.mpc866ads |
... | ... | @@ -351,7 +354,7 @@ |
351 | 354 | clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \ |
352 | 355 | zImage zImage.initrd zImage.chrp zImage.coff zImage.holly \ |
353 | 356 | zImage.iseries zImage.miboot zImage.pmac zImage.pseries \ |
354 | - simpleImage.* otheros.bld *.dtb | |
357 | + zImage.maple simpleImage.* otheros.bld *.dtb | |
355 | 358 | |
356 | 359 | # clean up files cached by wrapper |
357 | 360 | clean-kernel := vmlinux.strip vmlinux.bin |
arch/powerpc/boot/dts/iss4xx-mpic.dts
1 | +/* | |
2 | + * Device Tree Source for IBM Embedded PPC 476 Platform | |
3 | + * | |
4 | + * Copyright 2010 Torez Smith, IBM Corporation. | |
5 | + * | |
6 | + * Based on earlier code: | |
7 | + * Copyright (c) 2006, 2007 IBM Corp. | |
8 | + * Josh Boyer <jwboyer@linux.vnet.ibm.com>, David Gibson <dwg@au1.ibm.com> | |
9 | + * | |
10 | + * This file is licensed under the terms of the GNU General Public | |
11 | + * License version 2. This program is licensed "as is" without | |
12 | + * any warranty of any kind, whether express or implied. | |
13 | + */ | |
14 | + | |
15 | +/dts-v1/; | |
16 | + | |
17 | +/memreserve/ 0x01f00000 0x00100000; | |
18 | + | |
19 | +/ { | |
20 | + #address-cells = <2>; | |
21 | + #size-cells = <1>; | |
22 | + model = "ibm,iss-4xx"; | |
23 | + compatible = "ibm,iss-4xx"; | |
24 | + dcr-parent = <&{/cpus/cpu@0}>; | |
25 | + | |
26 | + aliases { | |
27 | + serial0 = &UART0; | |
28 | + }; | |
29 | + | |
30 | + cpus { | |
31 | + #address-cells = <1>; | |
32 | + #size-cells = <0>; | |
33 | + | |
34 | + cpu@0 { | |
35 | + device_type = "cpu"; | |
36 | + model = "PowerPC,4xx"; // real CPU changed in sim | |
37 | + reg = <0>; | |
38 | + clock-frequency = <100000000>; // 100Mhz :-) | |
39 | + timebase-frequency = <100000000>; | |
40 | + i-cache-line-size = <32>; | |
41 | + d-cache-line-size = <32>; | |
42 | + i-cache-size = <32768>; | |
43 | + d-cache-size = <32768>; | |
44 | + dcr-controller; | |
45 | + dcr-access-method = "native"; | |
46 | + status = "ok"; | |
47 | + }; | |
48 | + cpu@1 { | |
49 | + device_type = "cpu"; | |
50 | + model = "PowerPC,4xx"; // real CPU changed in sim | |
51 | + reg = <1>; | |
52 | + clock-frequency = <100000000>; // 100Mhz :-) | |
53 | + timebase-frequency = <100000000>; | |
54 | + i-cache-line-size = <32>; | |
55 | + d-cache-line-size = <32>; | |
56 | + i-cache-size = <32768>; | |
57 | + d-cache-size = <32768>; | |
58 | + dcr-controller; | |
59 | + dcr-access-method = "native"; | |
60 | + status = "disabled"; | |
61 | + enable-method = "spin-table"; | |
62 | + cpu-release-addr = <0 0x01f00100>; | |
63 | + }; | |
64 | + cpu@2 { | |
65 | + device_type = "cpu"; | |
66 | + model = "PowerPC,4xx"; // real CPU changed in sim | |
67 | + reg = <2>; | |
68 | + clock-frequency = <100000000>; // 100Mhz :-) | |
69 | + timebase-frequency = <100000000>; | |
70 | + i-cache-line-size = <32>; | |
71 | + d-cache-line-size = <32>; | |
72 | + i-cache-size = <32768>; | |
73 | + d-cache-size = <32768>; | |
74 | + dcr-controller; | |
75 | + dcr-access-method = "native"; | |
76 | + status = "disabled"; | |
77 | + enable-method = "spin-table"; | |
78 | + cpu-release-addr = <0 0x01f00200>; | |
79 | + }; | |
80 | + cpu@3 { | |
81 | + device_type = "cpu"; | |
82 | + model = "PowerPC,4xx"; // real CPU changed in sim | |
83 | + reg = <3>; | |
84 | + clock-frequency = <100000000>; // 100Mhz :-) | |
85 | + timebase-frequency = <100000000>; | |
86 | + i-cache-line-size = <32>; | |
87 | + d-cache-line-size = <32>; | |
88 | + i-cache-size = <32768>; | |
89 | + d-cache-size = <32768>; | |
90 | + dcr-controller; | |
91 | + dcr-access-method = "native"; | |
92 | + status = "disabled"; | |
93 | + enable-method = "spin-table"; | |
94 | + cpu-release-addr = <0 0x01f00300>; | |
95 | + }; | |
96 | + }; | |
97 | + | |
98 | + memory { | |
99 | + device_type = "memory"; | |
100 | + reg = <0x00000000 0x00000000 0x00000000>; // Filled in by zImage | |
101 | + | |
102 | + }; | |
103 | + | |
104 | + MPIC: interrupt-controller { | |
105 | + compatible = "chrp,open-pic"; | |
106 | + interrupt-controller; | |
107 | + dcr-reg = <0xffc00000 0x00030000>; | |
108 | + #address-cells = <0>; | |
109 | + #size-cells = <0>; | |
110 | + #interrupt-cells = <2>; | |
111 | + | |
112 | + }; | |
113 | + | |
114 | + plb { | |
115 | + compatible = "ibm,plb-4xx", "ibm,plb4"; /* Could be PLB6, doesn't matter */ | |
116 | + #address-cells = <2>; | |
117 | + #size-cells = <1>; | |
118 | + ranges; | |
119 | + clock-frequency = <0>; // Filled in by zImage | |
120 | + | |
121 | + POB0: opb { | |
122 | + compatible = "ibm,opb-4xx", "ibm,opb"; | |
123 | + #address-cells = <1>; | |
124 | + #size-cells = <1>; | |
125 | + /* Wish there was a nicer way of specifying a full 32-bit | |
126 | + range */ | |
127 | + ranges = <0x00000000 0x00000001 0x00000000 0x80000000 | |
128 | + 0x80000000 0x00000001 0x80000000 0x80000000>; | |
129 | + clock-frequency = <0>; // Filled in by zImage | |
130 | + UART0: serial@40000200 { | |
131 | + device_type = "serial"; | |
132 | + compatible = "ns16550a"; | |
133 | + reg = <0x40000200 0x00000008>; | |
134 | + virtual-reg = <0xe0000200>; | |
135 | + clock-frequency = <11059200>; | |
136 | + current-speed = <115200>; | |
137 | + interrupt-parent = <&MPIC>; | |
138 | + interrupts = <0x0 0x2>; | |
139 | + }; | |
140 | + }; | |
141 | + }; | |
142 | + | |
143 | + nvrtc { | |
144 | + compatible = "ds1743-nvram", "ds1743", "rtc-ds1743"; | |
145 | + reg = <0 0xEF703000 0x2000>; | |
146 | + }; | |
147 | + iss-block { | |
148 | + compatible = "ibm,iss-sim-block-device"; | |
149 | + reg = <0 0xEF701000 0x1000>; | |
150 | + }; | |
151 | + | |
152 | + chosen { | |
153 | + linux,stdout-path = "/plb/opb/serial@40000200"; | |
154 | + }; | |
155 | +}; |
arch/powerpc/boot/dts/iss4xx.dts
1 | +/* | |
2 | + * Device Tree Source for IBM Embedded PPC 476 Platform | |
3 | + * | |
4 | + * Copyright 2010 Torez Smith, IBM Corporation. | |
5 | + * | |
6 | + * Based on earlier code: | |
7 | + * Copyright (c) 2006, 2007 IBM Corp. | |
8 | + * Josh Boyer <jwboyer@linux.vnet.ibm.com>, David Gibson <dwg@au1.ibm.com> | |
9 | + * | |
10 | + * This file is licensed under the terms of the GNU General Public | |
11 | + * License version 2. This program is licensed "as is" without | |
12 | + * any warranty of any kind, whether express or implied. | |
13 | + */ | |
14 | + | |
15 | +/dts-v1/; | |
16 | + | |
17 | +/ { | |
18 | + #address-cells = <2>; | |
19 | + #size-cells = <1>; | |
20 | + model = "ibm,iss-4xx"; | |
21 | + compatible = "ibm,iss-4xx"; | |
22 | + dcr-parent = <&{/cpus/cpu@0}>; | |
23 | + | |
24 | + aliases { | |
25 | + serial0 = &UART0; | |
26 | + }; | |
27 | + | |
28 | + cpus { | |
29 | + #address-cells = <1>; | |
30 | + #size-cells = <0>; | |
31 | + | |
32 | + cpu@0 { | |
33 | + device_type = "cpu"; | |
34 | + model = "PowerPC,4xx"; // real CPU changed in sim | |
35 | + reg = <0x00000000>; | |
36 | + clock-frequency = <100000000>; // 100Mhz :-) | |
37 | + timebase-frequency = <100000000>; | |
38 | + i-cache-line-size = <32>; // may need fixup in sim | |
39 | + d-cache-line-size = <32>; // may need fixup in sim | |
40 | + i-cache-size = <32768>; /* may need fixup in sim */ | |
41 | + d-cache-size = <32768>; /* may need fixup in sim */ | |
42 | + dcr-controller; | |
43 | + dcr-access-method = "native"; | |
44 | + }; | |
45 | + }; | |
46 | + | |
47 | + memory { | |
48 | + device_type = "memory"; | |
49 | + reg = <0x00000000 0x00000000 0x00000000>; // Filled in by zImage | |
50 | + }; | |
51 | + | |
52 | + UIC0: interrupt-controller0 { | |
53 | + compatible = "ibm,uic-4xx", "ibm,uic"; | |
54 | + interrupt-controller; | |
55 | + cell-index = <0>; | |
56 | + dcr-reg = <0x0c0 0x009>; | |
57 | + #address-cells = <0>; | |
58 | + #size-cells = <0>; | |
59 | + #interrupt-cells = <2>; | |
60 | + | |
61 | + }; | |
62 | + | |
63 | + UIC1: interrupt-controller1 { | |
64 | + compatible = "ibm,uic-4xx", "ibm,uic"; | |
65 | + interrupt-controller; | |
66 | + cell-index = <1>; | |
67 | + dcr-reg = <0x0d0 0x009>; | |
68 | + #address-cells = <0>; | |
69 | + #size-cells = <0>; | |
70 | + #interrupt-cells = <2>; | |
71 | + interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */ | |
72 | + interrupt-parent = <&UIC0>; | |
73 | + }; | |
74 | + | |
75 | + plb { | |
76 | + compatible = "ibm,plb-4xx", "ibm,plb4"; /* Could be PLB6, doesn't matter */ | |
77 | + #address-cells = <2>; | |
78 | + #size-cells = <1>; | |
79 | + ranges; | |
80 | + clock-frequency = <0>; // Filled in by zImage | |
81 | + | |
82 | + POB0: opb { | |
83 | + compatible = "ibm,opb-4xx", "ibm,opb"; | |
84 | + #address-cells = <1>; | |
85 | + #size-cells = <1>; | |
86 | + /* Wish there was a nicer way of specifying a full 32-bit | |
87 | + range */ | |
88 | + ranges = <0x00000000 0x00000001 0x00000000 0x80000000 | |
89 | + 0x80000000 0x00000001 0x80000000 0x80000000>; | |
90 | + clock-frequency = <0>; // Filled in by zImage | |
91 | + UART0: serial@40000200 { | |
92 | + device_type = "serial"; | |
93 | + compatible = "ns16550a"; | |
94 | + reg = <0x40000200 0x00000008>; | |
95 | + virtual-reg = <0xe0000200>; | |
96 | + clock-frequency = <11059200>; | |
97 | + current-speed = <115200>; | |
98 | + interrupt-parent = <&UIC0>; | |
99 | + interrupts = <0x0 0x4>; | |
100 | + }; | |
101 | + }; | |
102 | + }; | |
103 | + | |
104 | + nvrtc { | |
105 | + compatible = "ds1743-nvram", "ds1743", "rtc-ds1743"; | |
106 | + reg = <0 0xEF703000 0x2000>; | |
107 | + }; | |
108 | + iss-block { | |
109 | + compatible = "ibm,iss-sim-block-device"; | |
110 | + reg = <0 0xEF701000 0x1000>; | |
111 | + }; | |
112 | + | |
113 | + chosen { | |
114 | + linux,stdout-path = "/plb/opb/serial@40000200"; | |
115 | + }; | |
116 | +}; |
arch/powerpc/boot/dts/mpc8315erdb.dts
... | ... | @@ -292,7 +292,7 @@ |
292 | 292 | fsl,num-channels = <4>; |
293 | 293 | fsl,channel-fifo-len = <24>; |
294 | 294 | fsl,exec-units-mask = <0x97c>; |
295 | - fsl,descriptor-types-mask = <0x3ab0abf>; | |
295 | + fsl,descriptor-types-mask = <0x3a30abf>; | |
296 | 296 | }; |
297 | 297 | |
298 | 298 | sata@18000 { |
... | ... | @@ -461,6 +461,20 @@ |
461 | 461 | 0x01000000 0 0x00000000 |
462 | 462 | 0x01000000 0 0x00000000 |
463 | 463 | 0 0x00800000>; |
464 | + }; | |
465 | + }; | |
466 | + | |
467 | + leds { | |
468 | + compatible = "gpio-leds"; | |
469 | + | |
470 | + pwr { | |
471 | + gpios = <&mcu_pio 0 0>; | |
472 | + default-state = "on"; | |
473 | + }; | |
474 | + | |
475 | + hdd { | |
476 | + gpios = <&mcu_pio 1 0>; | |
477 | + linux,default-trigger = "ide-disk"; | |
464 | 478 | }; |
465 | 479 | }; |
466 | 480 | }; |
arch/powerpc/boot/dts/mpc8377_rdb.dts
... | ... | @@ -486,5 +486,19 @@ |
486 | 486 | 0 0x00800000>; |
487 | 487 | }; |
488 | 488 | }; |
489 | + | |
490 | + leds { | |
491 | + compatible = "gpio-leds"; | |
492 | + | |
493 | + pwr { | |
494 | + gpios = <&mcu_pio 0 0>; | |
495 | + default-state = "on"; | |
496 | + }; | |
497 | + | |
498 | + hdd { | |
499 | + gpios = <&mcu_pio 1 0>; | |
500 | + linux,default-trigger = "ide-disk"; | |
501 | + }; | |
502 | + }; | |
489 | 503 | }; |
arch/powerpc/boot/dts/mpc8378_rdb.dts
... | ... | @@ -470,5 +470,19 @@ |
470 | 470 | 0 0x00800000>; |
471 | 471 | }; |
472 | 472 | }; |
473 | + | |
474 | + leds { | |
475 | + compatible = "gpio-leds"; | |
476 | + | |
477 | + pwr { | |
478 | + gpios = <&mcu_pio 0 0>; | |
479 | + default-state = "on"; | |
480 | + }; | |
481 | + | |
482 | + hdd { | |
483 | + gpios = <&mcu_pio 1 0>; | |
484 | + linux,default-trigger = "ide-disk"; | |
485 | + }; | |
486 | + }; | |
473 | 487 | }; |
arch/powerpc/boot/dts/mpc8379_rdb.dts
... | ... | @@ -436,5 +436,19 @@ |
436 | 436 | compatible = "fsl,mpc8349-pci"; |
437 | 437 | device_type = "pci"; |
438 | 438 | }; |
439 | + | |
440 | + leds { | |
441 | + compatible = "gpio-leds"; | |
442 | + | |
443 | + pwr { | |
444 | + gpios = <&mcu_pio 0 0>; | |
445 | + default-state = "on"; | |
446 | + }; | |
447 | + | |
448 | + hdd { | |
449 | + gpios = <&mcu_pio 1 0>; | |
450 | + linux,default-trigger = "ide-disk"; | |
451 | + }; | |
452 | + }; | |
439 | 453 | }; |
arch/powerpc/boot/dts/p1020rdb.dts
... | ... | @@ -19,6 +19,9 @@ |
19 | 19 | aliases { |
20 | 20 | serial0 = &serial0; |
21 | 21 | serial1 = &serial1; |
22 | + ethernet0 = &enet0; | |
23 | + ethernet1 = &enet1; | |
24 | + ethernet2 = &enet2; | |
22 | 25 | pci0 = &pci0; |
23 | 26 | pci1 = &pci1; |
24 | 27 | }; |
... | ... | @@ -346,6 +349,122 @@ |
346 | 349 | }; |
347 | 350 | }; |
348 | 351 | |
352 | + mdio@24000 { | |
353 | + #address-cells = <1>; | |
354 | + #size-cells = <0>; | |
355 | + compatible = "fsl,etsec2-mdio"; | |
356 | + reg = <0x24000 0x1000 0xb0030 0x4>; | |
357 | + | |
358 | + phy0: ethernet-phy@0 { | |
359 | + interrupt-parent = <&mpic>; | |
360 | + interrupts = <3 1>; | |
361 | + reg = <0x0>; | |
362 | + }; | |
363 | + | |
364 | + phy1: ethernet-phy@1 { | |
365 | + interrupt-parent = <&mpic>; | |
366 | + interrupts = <2 1>; | |
367 | + reg = <0x1>; | |
368 | + }; | |
369 | + }; | |
370 | + | |
371 | + mdio@25000 { | |
372 | + #address-cells = <1>; | |
373 | + #size-cells = <0>; | |
374 | + compatible = "fsl,etsec2-tbi"; | |
375 | + reg = <0x25000 0x1000 0xb1030 0x4>; | |
376 | + | |
377 | + tbi0: tbi-phy@11 { | |
378 | + reg = <0x11>; | |
379 | + device_type = "tbi-phy"; | |
380 | + }; | |
381 | + }; | |
382 | + | |
383 | + enet0: ethernet@b0000 { | |
384 | + #address-cells = <1>; | |
385 | + #size-cells = <1>; | |
386 | + device_type = "network"; | |
387 | + model = "eTSEC"; | |
388 | + compatible = "fsl,etsec2"; | |
389 | + fsl,num_rx_queues = <0x8>; | |
390 | + fsl,num_tx_queues = <0x8>; | |
391 | + local-mac-address = [ 00 00 00 00 00 00 ]; | |
392 | + interrupt-parent = <&mpic>; | |
393 | + fixed-link = <1 1 1000 0 0>; | |
394 | + phy-connection-type = "rgmii-id"; | |
395 | + | |
396 | + queue-group@0 { | |
397 | + #address-cells = <1>; | |
398 | + #size-cells = <1>; | |
399 | + reg = <0xb0000 0x1000>; | |
400 | + interrupts = <29 2 30 2 34 2>; | |
401 | + }; | |
402 | + | |
403 | + queue-group@1 { | |
404 | + #address-cells = <1>; | |
405 | + #size-cells = <1>; | |
406 | + reg = <0xb4000 0x1000>; | |
407 | + interrupts = <17 2 18 2 24 2>; | |
408 | + }; | |
409 | + }; | |
410 | + | |
411 | + enet1: ethernet@b1000 { | |
412 | + #address-cells = <1>; | |
413 | + #size-cells = <1>; | |
414 | + device_type = "network"; | |
415 | + model = "eTSEC"; | |
416 | + compatible = "fsl,etsec2"; | |
417 | + fsl,num_rx_queues = <0x8>; | |
418 | + fsl,num_tx_queues = <0x8>; | |
419 | + local-mac-address = [ 00 00 00 00 00 00 ]; | |
420 | + interrupt-parent = <&mpic>; | |
421 | + phy-handle = <&phy0>; | |
422 | + tbi-handle = <&tbi0>; | |
423 | + phy-connection-type = "sgmii"; | |
424 | + | |
425 | + queue-group@0 { | |
426 | + #address-cells = <1>; | |
427 | + #size-cells = <1>; | |
428 | + reg = <0xb1000 0x1000>; | |
429 | + interrupts = <35 2 36 2 40 2>; | |
430 | + }; | |
431 | + | |
432 | + queue-group@1 { | |
433 | + #address-cells = <1>; | |
434 | + #size-cells = <1>; | |
435 | + reg = <0xb5000 0x1000>; | |
436 | + interrupts = <51 2 52 2 67 2>; | |
437 | + }; | |
438 | + }; | |
439 | + | |
440 | + enet2: ethernet@b2000 { | |
441 | + #address-cells = <1>; | |
442 | + #size-cells = <1>; | |
443 | + device_type = "network"; | |
444 | + model = "eTSEC"; | |
445 | + compatible = "fsl,etsec2"; | |
446 | + fsl,num_rx_queues = <0x8>; | |
447 | + fsl,num_tx_queues = <0x8>; | |
448 | + local-mac-address = [ 00 00 00 00 00 00 ]; | |
449 | + interrupt-parent = <&mpic>; | |
450 | + phy-handle = <&phy1>; | |
451 | + phy-connection-type = "rgmii-id"; | |
452 | + | |
453 | + queue-group@0 { | |
454 | + #address-cells = <1>; | |
455 | + #size-cells = <1>; | |
456 | + reg = <0xb2000 0x1000>; | |
457 | + interrupts = <31 2 32 2 33 2>; | |
458 | + }; | |
459 | + | |
460 | + queue-group@1 { | |
461 | + #address-cells = <1>; | |
462 | + #size-cells = <1>; | |
463 | + reg = <0xb6000 0x1000>; | |
464 | + interrupts = <25 2 26 2 27 2>; | |
465 | + }; | |
466 | + }; | |
467 | + | |
349 | 468 | usb@22000 { |
350 | 469 | #address-cells = <1>; |
351 | 470 | #size-cells = <0>; |
... | ... | @@ -356,6 +475,11 @@ |
356 | 475 | phy_type = "ulpi"; |
357 | 476 | }; |
358 | 477 | |
478 | + /* USB2 is shared with localbus, so it must be disabled | |
479 | + by default. We can't put 'status = "disabled";' here | |
480 | + since U-Boot doesn't clear the status property when | |
481 | + it enables USB2. OTOH, U-Boot does create a new node | |
482 | + when there isn't any. So, just comment it out. | |
359 | 483 | usb@23000 { |
360 | 484 | #address-cells = <1>; |
361 | 485 | #size-cells = <0>; |
... | ... | @@ -365,6 +489,7 @@ |
365 | 489 | interrupts = <46 0x2>; |
366 | 490 | phy_type = "ulpi"; |
367 | 491 | }; |
492 | + */ | |
368 | 493 | |
369 | 494 | sdhci@2e000 { |
370 | 495 | compatible = "fsl,p1020-esdhc", "fsl,esdhc"; |
arch/powerpc/boot/treeboot-iss4xx.c
1 | +/* | |
2 | + * Copyright 2010 Ben. Herrenschmidt, IBM Corporation. | |
3 | + * | |
4 | + * Based on earlier code: | |
5 | + * Copyright (C) Paul Mackerras 1997. | |
6 | + * | |
7 | + * Matt Porter <mporter@kernel.crashing.org> | |
8 | + * Copyright 2002-2005 MontaVista Software Inc. | |
9 | + * | |
10 | + * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> | |
11 | + * Copyright (c) 2003, 2004 Zultys Technologies | |
12 | + * | |
13 | + * Copyright 2007 David Gibson, IBM Corporation. | |
14 | + * | |
15 | + * This program is free software; you can redistribute it and/or | |
16 | + * modify it under the terms of the GNU General Public License | |
17 | + * as published by the Free Software Foundation; either version | |
18 | + * 2 of the License, or (at your option) any later version. | |
19 | + */ | |
20 | +#include <stdarg.h> | |
21 | +#include <stddef.h> | |
22 | +#include "types.h" | |
23 | +#include "elf.h" | |
24 | +#include "string.h" | |
25 | +#include "stdio.h" | |
26 | +#include "page.h" | |
27 | +#include "ops.h" | |
28 | +#include "reg.h" | |
29 | +#include "io.h" | |
30 | +#include "dcr.h" | |
31 | +#include "4xx.h" | |
32 | +#include "44x.h" | |
33 | +#include "libfdt.h" | |
34 | + | |
35 | +BSS_STACK(4096); | |
36 | + | |
37 | +static void iss_4xx_fixups(void) | |
38 | +{ | |
39 | + ibm4xx_sdram_fixup_memsize(); | |
40 | +} | |
41 | + | |
42 | +#define SPRN_PIR 0x11E /* Processor Indentification Register */ | |
43 | +void platform_init(void) | |
44 | +{ | |
45 | + unsigned long end_of_ram = 0x08000000; | |
46 | + unsigned long avail_ram = end_of_ram - (unsigned long)_end; | |
47 | + u32 pir_reg; | |
48 | + | |
49 | + simple_alloc_init(_end, avail_ram, 128, 64); | |
50 | + platform_ops.fixups = iss_4xx_fixups; | |
51 | + platform_ops.exit = ibm44x_dbcr_reset; | |
52 | + pir_reg = mfspr(SPRN_PIR); | |
53 | + fdt_set_boot_cpuid_phys(_dtb_start, pir_reg); | |
54 | + fdt_init(_dtb_start); | |
55 | + serial_console_init(); | |
56 | +} |
arch/powerpc/boot/wrapper
... | ... | @@ -149,6 +149,10 @@ |
149 | 149 | platformo=$object/of.o |
150 | 150 | link_address='0x4000000' |
151 | 151 | ;; |
152 | +maple) | |
153 | + platformo=$object/of.o | |
154 | + link_address='0x400000' | |
155 | + ;; | |
152 | 156 | pmac|chrp) |
153 | 157 | platformo=$object/of.o |
154 | 158 | ;; |
... | ... | @@ -237,6 +241,9 @@ |
237 | 241 | link_address='0x600000' |
238 | 242 | platformo="$object/$platform-head.o $object/$platform.o" |
239 | 243 | ;; |
244 | +treeboot-iss4xx-mpic) | |
245 | + platformo="$object/treeboot-iss4xx.o" | |
246 | + ;; | |
240 | 247 | esac |
241 | 248 | |
242 | 249 | vmz="$tmpdir/`basename \"$kernel\"`.$ext" |
... | ... | @@ -321,7 +328,7 @@ |
321 | 328 | |
322 | 329 | # post-processing needed for some platforms |
323 | 330 | case "$platform" in |
324 | -pseries|chrp) | |
331 | +pseries|chrp|maple) | |
325 | 332 | $objbin/addnote "$ofile" |
326 | 333 | ;; |
327 | 334 | coff) |
arch/powerpc/configs/44x/iss476-smp_defconfig
Changes suppressed. Click to show
1 | +# | |
2 | +# Automatically generated make config: don't edit | |
3 | +# Linux kernel version: 2.6.33 | |
4 | +# Thu Mar 4 11:50:12 2010 | |
5 | +# | |
6 | +# CONFIG_PPC64 is not set | |
7 | + | |
8 | +# | |
9 | +# Processor support | |
10 | +# | |
11 | +# CONFIG_PPC_BOOK3S_32 is not set | |
12 | +# CONFIG_PPC_85xx is not set | |
13 | +# CONFIG_PPC_8xx is not set | |
14 | +# CONFIG_40x is not set | |
15 | +CONFIG_44x=y | |
16 | +# CONFIG_E200 is not set | |
17 | +CONFIG_PPC_FPU=y | |
18 | +CONFIG_4xx=y | |
19 | +CONFIG_BOOKE=y | |
20 | +CONFIG_PTE_64BIT=y | |
21 | +CONFIG_PHYS_64BIT=y | |
22 | +CONFIG_PPC_MMU_NOHASH=y | |
23 | +CONFIG_PPC_MMU_NOHASH_32=y | |
24 | +# CONFIG_PPC_MM_SLICES is not set | |
25 | +CONFIG_SMP=y | |
26 | +CONFIG_NR_CPUS=4 | |
27 | +# CONFIG_NOT_COHERENT_CACHE is not set | |
28 | +CONFIG_PPC32=y | |
29 | +CONFIG_WORD_SIZE=32 | |
30 | +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y | |
31 | +CONFIG_MMU=y | |
32 | +CONFIG_GENERIC_CMOS_UPDATE=y | |
33 | +CONFIG_GENERIC_TIME=y | |
34 | +CONFIG_GENERIC_TIME_VSYSCALL=y | |
35 | +CONFIG_GENERIC_CLOCKEVENTS=y | |
36 | +CONFIG_GENERIC_HARDIRQS=y | |
37 | +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y | |
38 | +# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set | |
39 | +# CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK is not set | |
40 | +CONFIG_IRQ_PER_CPU=y | |
41 | +CONFIG_NR_IRQS=512 | |
42 | +CONFIG_STACKTRACE_SUPPORT=y | |
43 | +CONFIG_HAVE_LATENCYTOP_SUPPORT=y | |
44 | +CONFIG_TRACE_IRQFLAGS_SUPPORT=y | |
45 | +CONFIG_LOCKDEP_SUPPORT=y | |
46 | +CONFIG_RWSEM_XCHGADD_ALGORITHM=y | |
47 | +CONFIG_ARCH_HAS_ILOG2_U32=y | |
48 | +CONFIG_GENERIC_HWEIGHT=y | |
49 | +CONFIG_GENERIC_FIND_NEXT_BIT=y | |
50 | +# CONFIG_ARCH_NO_VIRT_TO_BUS is not set | |
51 | +CONFIG_PPC=y | |
52 | +CONFIG_EARLY_PRINTK=y | |
53 | +CONFIG_GENERIC_NVRAM=y | |
54 | +CONFIG_SCHED_OMIT_FRAME_POINTER=y | |
55 | +CONFIG_ARCH_MAY_HAVE_PC_FDC=y | |
56 | +CONFIG_PPC_OF=y | |
57 | +CONFIG_OF=y | |
58 | +CONFIG_PPC_UDBG_16550=y | |
59 | +CONFIG_GENERIC_TBSYNC=y | |
60 | +CONFIG_AUDIT_ARCH=y | |
61 | +CONFIG_GENERIC_BUG=y | |
62 | +CONFIG_DTC=y | |
63 | +# CONFIG_DEFAULT_UIMAGE is not set | |
64 | +CONFIG_ARCH_HIBERNATION_POSSIBLE=y | |
65 | +CONFIG_PPC_DCR_NATIVE=y | |
66 | +# CONFIG_PPC_DCR_MMIO is not set | |
67 | +CONFIG_PPC_DCR=y | |
68 | +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y | |
69 | +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | |
70 | +CONFIG_CONSTRUCTORS=y | |
71 | + | |
72 | +# | |
73 | +# General setup | |
74 | +# | |
75 | +CONFIG_EXPERIMENTAL=y | |
76 | +CONFIG_LOCK_KERNEL=y | |
77 | +CONFIG_INIT_ENV_ARG_LIMIT=32 | |
78 | +CONFIG_LOCALVERSION="" | |
79 | +CONFIG_LOCALVERSION_AUTO=y | |
80 | +CONFIG_SWAP=y | |
81 | +CONFIG_SYSVIPC=y | |
82 | +CONFIG_SYSVIPC_SYSCTL=y | |
83 | +CONFIG_POSIX_MQUEUE=y | |
84 | +CONFIG_POSIX_MQUEUE_SYSCTL=y | |
85 | +# CONFIG_BSD_PROCESS_ACCT is not set | |
86 | +# CONFIG_TASKSTATS is not set | |
87 | +# CONFIG_AUDIT is not set | |
88 | + | |
89 | +# | |
90 | +# RCU Subsystem | |
91 | +# | |
92 | +CONFIG_TREE_RCU=y | |
93 | +# CONFIG_TREE_PREEMPT_RCU is not set | |
94 | +# CONFIG_TINY_RCU is not set | |
95 | +# CONFIG_RCU_TRACE is not set | |
96 | +CONFIG_RCU_FANOUT=32 | |
97 | +# CONFIG_RCU_FANOUT_EXACT is not set | |
98 | +# CONFIG_TREE_RCU_TRACE is not set | |
99 | +# CONFIG_IKCONFIG is not set | |
100 | +CONFIG_LOG_BUF_SHIFT=14 | |
101 | +CONFIG_GROUP_SCHED=y | |
102 | +CONFIG_FAIR_GROUP_SCHED=y | |
103 | +# CONFIG_RT_GROUP_SCHED is not set | |
104 | +CONFIG_USER_SCHED=y | |
105 | +# CONFIG_CGROUP_SCHED is not set | |
106 | +# CONFIG_CGROUPS is not set | |
107 | +CONFIG_SYSFS_DEPRECATED=y | |
108 | +CONFIG_SYSFS_DEPRECATED_V2=y | |
109 | +# CONFIG_RELAY is not set | |
110 | +# CONFIG_NAMESPACES is not set | |
111 | +CONFIG_BLK_DEV_INITRD=y | |
112 | +CONFIG_INITRAMFS_SOURCE="" | |
113 | +CONFIG_RD_GZIP=y | |
114 | +# CONFIG_RD_BZIP2 is not set | |
115 | +# CONFIG_RD_LZMA is not set | |
116 | +# CONFIG_RD_LZO is not set | |
117 | +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | |
118 | +CONFIG_SYSCTL=y | |
119 | +CONFIG_ANON_INODES=y | |
120 | +CONFIG_EMBEDDED=y | |
121 | +CONFIG_SYSCTL_SYSCALL=y | |
122 | +CONFIG_KALLSYMS=y | |
123 | +CONFIG_KALLSYMS_ALL=y | |
124 | +CONFIG_KALLSYMS_EXTRA_PASS=y | |
125 | +CONFIG_HOTPLUG=y | |
126 | +CONFIG_PRINTK=y | |
127 | +CONFIG_BUG=y | |
128 | +CONFIG_ELF_CORE=y | |
129 | +CONFIG_BASE_FULL=y | |
130 | +CONFIG_FUTEX=y | |
131 | +CONFIG_EPOLL=y | |
132 | +CONFIG_SIGNALFD=y | |
133 | +CONFIG_TIMERFD=y | |
134 | +CONFIG_EVENTFD=y | |
135 | +CONFIG_SHMEM=y | |
136 | +CONFIG_AIO=y | |
137 | +CONFIG_HAVE_PERF_EVENTS=y | |
138 | + | |
139 | +# | |
140 | +# Kernel Performance Events And Counters | |
141 | +# | |
142 | +CONFIG_PERF_EVENTS=y | |
143 | +CONFIG_EVENT_PROFILE=y | |
144 | +# CONFIG_PERF_COUNTERS is not set | |
145 | +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set | |
146 | +CONFIG_VM_EVENT_COUNTERS=y | |
147 | +CONFIG_SLUB_DEBUG=y | |
148 | +CONFIG_COMPAT_BRK=y | |
149 | +# CONFIG_SLAB is not set | |
150 | +CONFIG_SLUB=y | |
151 | +# CONFIG_SLOB is not set | |
152 | +CONFIG_PROFILING=y | |
153 | +CONFIG_TRACEPOINTS=y | |
154 | +CONFIG_OPROFILE=y | |
155 | +CONFIG_HAVE_OPROFILE=y | |
156 | +# CONFIG_KPROBES is not set | |
157 | +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y | |
158 | +CONFIG_HAVE_IOREMAP_PROT=y | |
159 | +CONFIG_HAVE_KPROBES=y | |
160 | +CONFIG_HAVE_KRETPROBES=y | |
161 | +CONFIG_HAVE_ARCH_TRACEHOOK=y | |
162 | +CONFIG_HAVE_DMA_ATTRS=y | |
163 | +CONFIG_USE_GENERIC_SMP_HELPERS=y | |
164 | +CONFIG_HAVE_DMA_API_DEBUG=y | |
165 | + | |
166 | +# | |
167 | +# GCOV-based kernel profiling | |
168 | +# | |
169 | +# CONFIG_GCOV_KERNEL is not set | |
170 | +# CONFIG_SLOW_WORK is not set | |
171 | +# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set | |
172 | +CONFIG_SLABINFO=y | |
173 | +CONFIG_RT_MUTEXES=y | |
174 | +CONFIG_BASE_SMALL=0 | |
175 | +CONFIG_MODULES=y | |
176 | +# CONFIG_MODULE_FORCE_LOAD is not set | |
177 | +CONFIG_MODULE_UNLOAD=y | |
178 | +# CONFIG_MODULE_FORCE_UNLOAD is not set | |
179 | +# CONFIG_MODVERSIONS is not set | |
180 | +# CONFIG_MODULE_SRCVERSION_ALL is not set | |
181 | +CONFIG_STOP_MACHINE=y | |
182 | +CONFIG_BLOCK=y | |
183 | +CONFIG_LBDAF=y | |
184 | +# CONFIG_BLK_DEV_BSG is not set | |
185 | +# CONFIG_BLK_DEV_INTEGRITY is not set | |
186 | + | |
187 | +# | |
188 | +# IO Schedulers | |
189 | +# | |
190 | +CONFIG_IOSCHED_NOOP=y | |
191 | +CONFIG_IOSCHED_DEADLINE=y | |
192 | +CONFIG_IOSCHED_CFQ=y | |
193 | +# CONFIG_DEFAULT_DEADLINE is not set | |
194 | +CONFIG_DEFAULT_CFQ=y | |
195 | +# CONFIG_DEFAULT_NOOP is not set | |
196 | +CONFIG_DEFAULT_IOSCHED="cfq" | |
197 | +# CONFIG_INLINE_SPIN_TRYLOCK is not set | |
198 | +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set | |
199 | +# CONFIG_INLINE_SPIN_LOCK is not set | |
200 | +# CONFIG_INLINE_SPIN_LOCK_BH is not set | |
201 | +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set | |
202 | +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set | |
203 | +CONFIG_INLINE_SPIN_UNLOCK=y | |
204 | +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set | |
205 | +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y | |
206 | +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set | |
207 | +# CONFIG_INLINE_READ_TRYLOCK is not set | |
208 | +# CONFIG_INLINE_READ_LOCK is not set | |
209 | +# CONFIG_INLINE_READ_LOCK_BH is not set | |
210 | +# CONFIG_INLINE_READ_LOCK_IRQ is not set | |
211 | +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set | |
212 | +CONFIG_INLINE_READ_UNLOCK=y | |
213 | +# CONFIG_INLINE_READ_UNLOCK_BH is not set | |
214 | +CONFIG_INLINE_READ_UNLOCK_IRQ=y | |
215 | +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set | |
216 | +# CONFIG_INLINE_WRITE_TRYLOCK is not set | |
217 | +# CONFIG_INLINE_WRITE_LOCK is not set | |
218 | +# CONFIG_INLINE_WRITE_LOCK_BH is not set | |
219 | +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set | |
220 | +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set | |
221 | +CONFIG_INLINE_WRITE_UNLOCK=y | |
222 | +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set | |
223 | +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y | |
224 | +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set | |
225 | +CONFIG_MUTEX_SPIN_ON_OWNER=y | |
226 | +# CONFIG_FREEZER is not set | |
227 | + | |
228 | +# | |
229 | +# Platform support | |
230 | +# | |
231 | +# CONFIG_PPC_CELL is not set | |
232 | +# CONFIG_PPC_CELL_NATIVE is not set | |
233 | +# CONFIG_PQ2ADS is not set | |
234 | +CONFIG_PPC_47x=y | |
235 | +# CONFIG_BAMBOO is not set | |
236 | +# CONFIG_EBONY is not set | |
237 | +# CONFIG_SAM440EP is not set | |
238 | +# CONFIG_SEQUOIA is not set | |
239 | +# CONFIG_TAISHAN is not set | |
240 | +# CONFIG_KATMAI is not set | |
241 | +# CONFIG_RAINIER is not set | |
242 | +# CONFIG_WARP is not set | |
243 | +# CONFIG_ARCHES is not set | |
244 | +# CONFIG_CANYONLANDS is not set | |
245 | +# CONFIG_GLACIER is not set | |
246 | +# CONFIG_REDWOOD is not set | |
247 | +# CONFIG_EIGER is not set | |
248 | +# CONFIG_YOSEMITE is not set | |
249 | +CONFIG_ISS4xx=y | |
250 | +# CONFIG_XILINX_VIRTEX440_GENERIC_BOARD is not set | |
251 | +# CONFIG_PPC44x_SIMPLE is not set | |
252 | +# CONFIG_PPC4xx_GPIO is not set | |
253 | +# CONFIG_IPIC is not set | |
254 | +CONFIG_MPIC=y | |
255 | +# CONFIG_MPIC_WEIRD is not set | |
256 | +# CONFIG_PPC_I8259 is not set | |
257 | +# CONFIG_PPC_RTAS is not set | |
258 | +# CONFIG_MMIO_NVRAM is not set | |
259 | +# CONFIG_PPC_MPC106 is not set | |
260 | +# CONFIG_PPC_970_NAP is not set | |
261 | +# CONFIG_PPC_INDIRECT_IO is not set | |
262 | +# CONFIG_GENERIC_IOMAP is not set | |
263 | +# CONFIG_CPU_FREQ is not set | |
264 | +# CONFIG_FSL_ULI1575 is not set | |
265 | +CONFIG_OF_RTC=y | |
266 | +# CONFIG_SIMPLE_GPIO is not set | |
267 | + | |
268 | +# | |
269 | +# Kernel options | |
270 | +# | |
271 | +# CONFIG_HIGHMEM is not set | |
272 | +# CONFIG_NO_HZ is not set | |
273 | +# CONFIG_HIGH_RES_TIMERS is not set | |
274 | +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | |
275 | +CONFIG_HZ_100=y | |
276 | +# CONFIG_HZ_250 is not set | |
277 | +# CONFIG_HZ_300 is not set | |
278 | +# CONFIG_HZ_1000 is not set | |
279 | +CONFIG_HZ=100 | |
280 | +# CONFIG_SCHED_HRTICK is not set | |
281 | +CONFIG_PREEMPT_NONE=y | |
282 | +# CONFIG_PREEMPT_VOLUNTARY is not set | |
283 | +# CONFIG_PREEMPT is not set | |
284 | +CONFIG_BINFMT_ELF=y | |
285 | +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | |
286 | +# CONFIG_HAVE_AOUT is not set | |
287 | +# CONFIG_BINFMT_MISC is not set | |
288 | +CONFIG_MATH_EMULATION=y | |
289 | +# CONFIG_IOMMU_HELPER is not set | |
290 | +# CONFIG_SWIOTLB is not set | |
291 | +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y | |
292 | +CONFIG_ARCH_HAS_WALK_MEMORY=y | |
293 | +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y | |
294 | +CONFIG_IRQ_ALL_CPUS=y | |
295 | +CONFIG_SPARSE_IRQ=y | |
296 | +CONFIG_MAX_ACTIVE_REGIONS=32 | |
297 | +CONFIG_ARCH_FLATMEM_ENABLE=y | |
298 | +CONFIG_ARCH_POPULATES_NODE_MAP=y | |
299 | +CONFIG_SELECT_MEMORY_MODEL=y | |
300 | +CONFIG_FLATMEM_MANUAL=y | |
301 | +# CONFIG_DISCONTIGMEM_MANUAL is not set | |
302 | +# CONFIG_SPARSEMEM_MANUAL is not set | |
303 | +CONFIG_FLATMEM=y | |
304 | +CONFIG_FLAT_NODE_MEM_MAP=y | |
305 | +CONFIG_PAGEFLAGS_EXTENDED=y | |
306 | +CONFIG_SPLIT_PTLOCK_CPUS=4 | |
307 | +CONFIG_MIGRATION=y | |
308 | +CONFIG_PHYS_ADDR_T_64BIT=y | |
309 | +CONFIG_ZONE_DMA_FLAG=1 | |
310 | +CONFIG_BOUNCE=y | |
311 | +CONFIG_VIRT_TO_BUS=y | |
312 | +# CONFIG_KSM is not set | |
313 | +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 | |
314 | +CONFIG_STDBINUTILS=y | |
315 | +CONFIG_PPC_4K_PAGES=y | |
316 | +# CONFIG_PPC_16K_PAGES is not set | |
317 | +# CONFIG_PPC_64K_PAGES is not set | |
318 | +# CONFIG_PPC_256K_PAGES is not set | |
319 | +CONFIG_FORCE_MAX_ZONEORDER=11 | |
320 | +CONFIG_PROC_DEVICETREE=y | |
321 | +CONFIG_CMDLINE_BOOL=y | |
322 | +CONFIG_CMDLINE="root=/dev/issblk0" | |
323 | +CONFIG_EXTRA_TARGETS="" | |
324 | +CONFIG_SECCOMP=y | |
325 | +CONFIG_ISA_DMA_API=y | |
326 | + | |
327 | +# | |
328 | +# Bus options | |
329 | +# | |
330 | +CONFIG_ZONE_DMA=y | |
331 | +CONFIG_4xx_SOC=y | |
332 | +CONFIG_PPC_PCI_CHOICE=y | |
333 | +# CONFIG_PCI is not set | |
334 | +# CONFIG_PCI_DOMAINS is not set | |
335 | +# CONFIG_PCI_SYSCALL is not set | |
336 | +# CONFIG_ARCH_SUPPORTS_MSI is not set | |
337 | +# CONFIG_PCCARD is not set | |
338 | +# CONFIG_HAS_RAPIDIO is not set | |
339 | + | |
340 | +# | |
341 | +# Advanced setup | |
342 | +# | |
343 | +# CONFIG_ADVANCED_OPTIONS is not set | |
344 | + | |
345 | +# | |
346 | +# Default settings for advanced configuration options are used | |
347 | +# | |
348 | +CONFIG_LOWMEM_SIZE=0x30000000 | |
349 | +CONFIG_PAGE_OFFSET=0xc0000000 | |
350 | +CONFIG_KERNEL_START=0xc0000000 | |
351 | +CONFIG_PHYSICAL_START=0x00000000 | |
352 | +CONFIG_TASK_SIZE=0xc0000000 | |
353 | +CONFIG_NET=y | |
354 | + | |
355 | +# | |
356 | +# Networking options | |
357 | +# | |
358 | +CONFIG_PACKET=y | |
359 | +# CONFIG_PACKET_MMAP is not set | |
360 | +CONFIG_UNIX=y | |
361 | +# CONFIG_NET_KEY is not set | |
362 | +CONFIG_INET=y | |
363 | +# CONFIG_IP_MULTICAST is not set | |
364 | +# CONFIG_IP_ADVANCED_ROUTER is not set | |
365 | +CONFIG_IP_FIB_HASH=y | |
366 | +CONFIG_IP_PNP=y | |
367 | +CONFIG_IP_PNP_DHCP=y | |
368 | +CONFIG_IP_PNP_BOOTP=y | |
369 | +# CONFIG_IP_PNP_RARP is not set | |
370 | +# CONFIG_NET_IPIP is not set | |
371 | +# CONFIG_NET_IPGRE is not set | |
372 | +# CONFIG_ARPD is not set | |
373 | +# CONFIG_SYN_COOKIES is not set | |
374 | +# CONFIG_INET_AH is not set | |
375 | +# CONFIG_INET_ESP is not set | |
376 | +# CONFIG_INET_IPCOMP is not set | |
377 | +# CONFIG_INET_XFRM_TUNNEL is not set | |
378 | +# CONFIG_INET_TUNNEL is not set | |
379 | +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set | |
380 | +# CONFIG_INET_XFRM_MODE_TUNNEL is not set | |
381 | +# CONFIG_INET_XFRM_MODE_BEET is not set | |
382 | +# CONFIG_INET_LRO is not set | |
383 | +CONFIG_INET_DIAG=y | |
384 | +CONFIG_INET_TCP_DIAG=y | |
385 | +# CONFIG_TCP_CONG_ADVANCED is not set | |
386 | +CONFIG_TCP_CONG_CUBIC=y | |
387 | +CONFIG_DEFAULT_TCP_CONG="cubic" | |
388 | +# CONFIG_TCP_MD5SIG is not set | |
389 | +# CONFIG_IPV6 is not set | |
390 | +# CONFIG_NETWORK_SECMARK is not set | |
391 | +# CONFIG_NETFILTER is not set | |
392 | +# CONFIG_IP_DCCP is not set | |
393 | +# CONFIG_IP_SCTP is not set | |
394 | +# CONFIG_RDS is not set | |
395 | +# CONFIG_TIPC is not set | |
396 | +# CONFIG_ATM is not set | |
397 | +# CONFIG_BRIDGE is not set | |
398 | +# CONFIG_NET_DSA is not set | |
399 | +# CONFIG_VLAN_8021Q is not set | |
400 | +# CONFIG_DECNET is not set | |
401 | +# CONFIG_LLC2 is not set | |
402 | +# CONFIG_IPX is not set | |
403 | +# CONFIG_ATALK is not set | |
404 | +# CONFIG_X25 is not set | |
405 | +# CONFIG_LAPB is not set | |
406 | +# CONFIG_ECONET is not set | |
407 | +# CONFIG_WAN_ROUTER is not set | |
408 | +# CONFIG_PHONET is not set | |
409 | +# CONFIG_IEEE802154 is not set | |
410 | +# CONFIG_NET_SCHED is not set | |
411 | +# CONFIG_DCB is not set | |
412 | + | |
413 | +# | |
414 | +# Network testing | |
415 | +# | |
416 | +# CONFIG_NET_PKTGEN is not set | |
417 | +# CONFIG_NET_DROP_MONITOR is not set | |
418 | +# CONFIG_HAMRADIO is not set | |
419 | +# CONFIG_CAN is not set | |
420 | +# CONFIG_IRDA is not set | |
421 | +# CONFIG_BT is not set | |
422 | +# CONFIG_AF_RXRPC is not set | |
423 | +CONFIG_WIRELESS=y | |
424 | +# CONFIG_CFG80211 is not set | |
425 | +# CONFIG_LIB80211 is not set | |
426 | + | |
427 | +# | |
428 | +# CFG80211 needs to be enabled for MAC80211 | |
429 | +# | |
430 | +# CONFIG_WIMAX is not set | |
431 | +# CONFIG_RFKILL is not set | |
432 | +# CONFIG_NET_9P is not set | |
433 | + | |
434 | +# | |
435 | +# Device Drivers | |
436 | +# | |
437 | + | |
438 | +# | |
439 | +# Generic Driver Options | |
440 | +# | |
441 | +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | |
442 | +# CONFIG_DEVTMPFS is not set | |
443 | +CONFIG_STANDALONE=y | |
444 | +CONFIG_PREVENT_FIRMWARE_BUILD=y | |
445 | +CONFIG_FW_LOADER=y | |
446 | +CONFIG_FIRMWARE_IN_KERNEL=y | |
447 | +CONFIG_EXTRA_FIRMWARE="" | |
448 | +# CONFIG_DEBUG_DRIVER is not set | |
449 | +# CONFIG_DEBUG_DEVRES is not set | |
450 | +# CONFIG_SYS_HYPERVISOR is not set | |
451 | +CONFIG_CONNECTOR=y | |
452 | +CONFIG_PROC_EVENTS=y | |
453 | +CONFIG_MTD=y | |
454 | +# CONFIG_MTD_DEBUG is not set | |
455 | +# CONFIG_MTD_TESTS is not set | |
456 | +# CONFIG_MTD_CONCAT is not set | |
457 | +CONFIG_MTD_PARTITIONS=y | |
458 | +# CONFIG_MTD_REDBOOT_PARTS is not set | |
459 | +# CONFIG_MTD_CMDLINE_PARTS is not set | |
460 | +CONFIG_MTD_OF_PARTS=y | |
461 | +# CONFIG_MTD_AR7_PARTS is not set | |
462 | + | |
463 | +# | |
464 | +# User Modules And Translation Layers | |
465 | +# | |
466 | +CONFIG_MTD_CHAR=y | |
467 | +CONFIG_MTD_BLKDEVS=y | |
468 | +CONFIG_MTD_BLOCK=y | |
469 | +# CONFIG_FTL is not set | |
470 | +# CONFIG_NFTL is not set | |
471 | +# CONFIG_INFTL is not set | |
472 | +# CONFIG_RFD_FTL is not set | |
473 | +# CONFIG_SSFDC is not set | |
474 | +# CONFIG_MTD_OOPS is not set | |
475 | + | |
476 | +# | |
477 | +# RAM/ROM/Flash chip drivers | |
478 | +# | |
479 | +# CONFIG_MTD_CFI is not set | |
480 | +CONFIG_MTD_JEDECPROBE=y | |
481 | +CONFIG_MTD_GEN_PROBE=y | |
482 | +# CONFIG_MTD_CFI_ADV_OPTIONS is not set | |
483 | +CONFIG_MTD_MAP_BANK_WIDTH_1=y | |
484 | +CONFIG_MTD_MAP_BANK_WIDTH_2=y | |
485 | +CONFIG_MTD_MAP_BANK_WIDTH_4=y | |
486 | +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set | |
487 | +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set | |
488 | +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set | |
489 | +CONFIG_MTD_CFI_I1=y | |
490 | +CONFIG_MTD_CFI_I2=y | |
491 | +# CONFIG_MTD_CFI_I4 is not set | |
492 | +# CONFIG_MTD_CFI_I8 is not set | |
493 | +# CONFIG_MTD_CFI_INTELEXT is not set | |
494 | +CONFIG_MTD_CFI_AMDSTD=y | |
495 | +# CONFIG_MTD_CFI_STAA is not set | |
496 | +CONFIG_MTD_CFI_UTIL=y | |
497 | +# CONFIG_MTD_RAM is not set | |
498 | +# CONFIG_MTD_ROM is not set | |
499 | +# CONFIG_MTD_ABSENT is not set | |
500 | + | |
501 | +# | |
502 | +# Mapping drivers for chip access | |
503 | +# | |
504 | +# CONFIG_MTD_COMPLEX_MAPPINGS is not set | |
505 | +# CONFIG_MTD_PHYSMAP is not set | |
506 | +CONFIG_MTD_PHYSMAP_OF=y | |
507 | +# CONFIG_MTD_PLATRAM is not set | |
508 | + | |
509 | +# | |
510 | +# Self-contained MTD device drivers | |
511 | +# | |
512 | +# CONFIG_MTD_SLRAM is not set | |
513 | +# CONFIG_MTD_PHRAM is not set | |
514 | +# CONFIG_MTD_MTDRAM is not set | |
515 | +# CONFIG_MTD_BLOCK2MTD is not set | |
516 | + | |
517 | +# | |
518 | +# Disk-On-Chip Device Drivers | |
519 | +# | |
520 | +# CONFIG_MTD_DOC2000 is not set | |
521 | +# CONFIG_MTD_DOC2001 is not set | |
522 | +# CONFIG_MTD_DOC2001PLUS is not set | |
523 | +# CONFIG_MTD_NAND is not set | |
524 | +# CONFIG_MTD_ONENAND is not set | |
525 | + | |
526 | +# | |
527 | +# LPDDR flash memory drivers | |
528 | +# | |
529 | +# CONFIG_MTD_LPDDR is not set | |
530 | + | |
531 | +# | |
532 | +# UBI - Unsorted block images | |
533 | +# | |
534 | +# CONFIG_MTD_UBI is not set | |
535 | +CONFIG_OF_DEVICE=y | |
536 | +# CONFIG_PARPORT is not set | |
537 | +CONFIG_BLK_DEV=y | |
538 | +# CONFIG_BLK_DEV_FD is not set | |
539 | +# CONFIG_BLK_DEV_COW_COMMON is not set | |
540 | +# CONFIG_BLK_DEV_LOOP is not set | |
541 | +# CONFIG_BLK_DEV_DRBD is not set | |
542 | +# CONFIG_BLK_DEV_NBD is not set | |
543 | +CONFIG_BLK_DEV_RAM=y | |
544 | +CONFIG_BLK_DEV_RAM_COUNT=16 | |
545 | +CONFIG_BLK_DEV_RAM_SIZE=35000 | |
546 | +# CONFIG_BLK_DEV_XIP is not set | |
547 | +# CONFIG_CDROM_PKTCDVD is not set | |
548 | +# CONFIG_ATA_OVER_ETH is not set | |
549 | +# CONFIG_XILINX_SYSACE is not set | |
550 | +# CONFIG_BLK_DEV_HD is not set | |
551 | +CONFIG_MISC_DEVICES=y | |
552 | +# CONFIG_ENCLOSURE_SERVICES is not set | |
553 | +# CONFIG_C2PORT is not set | |
554 | + | |
555 | +# | |
556 | +# EEPROM support | |
557 | +# | |
558 | +# CONFIG_EEPROM_93CX6 is not set | |
559 | +CONFIG_HAVE_IDE=y | |
560 | +# CONFIG_IDE is not set | |
561 | + | |
562 | +# | |
563 | +# SCSI device support | |
564 | +# | |
565 | +# CONFIG_RAID_ATTRS is not set | |
566 | +# CONFIG_SCSI is not set | |
567 | +# CONFIG_SCSI_DMA is not set | |
568 | +# CONFIG_SCSI_NETLINK is not set | |
569 | +# CONFIG_ATA is not set | |
570 | +# CONFIG_MD is not set | |
571 | +# CONFIG_MACINTOSH_DRIVERS is not set | |
572 | +# CONFIG_NETDEVICES is not set | |
573 | +# CONFIG_ISDN is not set | |
574 | +# CONFIG_PHONE is not set | |
575 | + | |
576 | +# | |
577 | +# Input device support | |
578 | +# | |
579 | +# CONFIG_INPUT is not set | |
580 | + | |
581 | +# | |
582 | +# Hardware I/O ports | |
583 | +# | |
584 | +# CONFIG_SERIO is not set | |
585 | +# CONFIG_GAMEPORT is not set | |
586 | + | |
587 | +# | |
588 | +# Character devices | |
589 | +# | |
590 | +# CONFIG_VT is not set | |
591 | +CONFIG_DEVKMEM=y | |
592 | +# CONFIG_SERIAL_NONSTANDARD is not set | |
593 | + | |
594 | +# | |
595 | +# Serial drivers | |
596 | +# | |
597 | +CONFIG_SERIAL_8250=y | |
598 | +CONFIG_SERIAL_8250_CONSOLE=y | |
599 | +CONFIG_SERIAL_8250_NR_UARTS=4 | |
600 | +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 | |
601 | +CONFIG_SERIAL_8250_EXTENDED=y | |
602 | +# CONFIG_SERIAL_8250_MANY_PORTS is not set | |
603 | +CONFIG_SERIAL_8250_SHARE_IRQ=y | |
604 | +# CONFIG_SERIAL_8250_DETECT_IRQ is not set | |
605 | +# CONFIG_SERIAL_8250_RSA is not set | |
606 | + | |
607 | +# | |
608 | +# Non-8250 serial port support | |
609 | +# | |
610 | +# CONFIG_SERIAL_UARTLITE is not set | |
611 | +CONFIG_SERIAL_CORE=y | |
612 | +CONFIG_SERIAL_CORE_CONSOLE=y | |
613 | +CONFIG_SERIAL_OF_PLATFORM=y | |
614 | +# CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL is not set | |
615 | +# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set | |
616 | +CONFIG_UNIX98_PTYS=y | |
617 | +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set | |
618 | +CONFIG_LEGACY_PTYS=y | |
619 | +CONFIG_LEGACY_PTY_COUNT=256 | |
620 | +# CONFIG_HVC_UDBG is not set | |
621 | +# CONFIG_IPMI_HANDLER is not set | |
622 | +# CONFIG_HW_RANDOM is not set | |
623 | +# CONFIG_NVRAM is not set | |
624 | +# CONFIG_GEN_RTC is not set | |
625 | +# CONFIG_R3964 is not set | |
626 | +# CONFIG_RAW_DRIVER is not set | |
627 | +# CONFIG_TCG_TPM is not set | |
628 | +# CONFIG_I2C is not set | |
629 | +# CONFIG_SPI is not set | |
630 | + | |
631 | +# | |
632 | +# PPS support | |
633 | +# | |
634 | +# CONFIG_PPS is not set | |
635 | +CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y | |
636 | +# CONFIG_GPIOLIB is not set | |
637 | +# CONFIG_W1 is not set | |
638 | +# CONFIG_POWER_SUPPLY is not set | |
639 | +# CONFIG_HWMON is not set | |
640 | +CONFIG_THERMAL=y | |
641 | +# CONFIG_WATCHDOG is not set | |
642 | +CONFIG_SSB_POSSIBLE=y | |
643 | + | |
644 | +# | |
645 | +# Sonics Silicon Backplane | |
646 | +# | |
647 | +# CONFIG_SSB is not set | |
648 | + | |
649 | +# | |
650 | +# Multifunction device drivers | |
651 | +# | |
652 | +# CONFIG_MFD_CORE is not set | |
653 | +# CONFIG_MFD_SM501 is not set | |
654 | +# CONFIG_HTC_PASIC3 is not set | |
655 | +# CONFIG_MFD_TMIO is not set | |
656 | +# CONFIG_REGULATOR is not set | |
657 | +# CONFIG_MEDIA_SUPPORT is not set | |
658 | + | |
659 | +# | |
660 | +# Graphics support | |
661 | +# | |
662 | +# CONFIG_VGASTATE is not set | |
663 | +# CONFIG_VIDEO_OUTPUT_CONTROL is not set | |
664 | +# CONFIG_FB is not set | |
665 | +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set | |
666 | + | |
667 | +# | |
668 | +# Display device support | |
669 | +# | |
670 | +# CONFIG_DISPLAY_SUPPORT is not set | |
671 | +# CONFIG_SOUND is not set | |
672 | +# CONFIG_USB_SUPPORT is not set | |
673 | +# CONFIG_MMC is not set | |
674 | +# CONFIG_MEMSTICK is not set | |
675 | +# CONFIG_NEW_LEDS is not set | |
676 | +# CONFIG_ACCESSIBILITY is not set | |
677 | +# CONFIG_EDAC is not set | |
678 | +# CONFIG_RTC_CLASS is not set | |
679 | +# CONFIG_DMADEVICES is not set | |
680 | +# CONFIG_AUXDISPLAY is not set | |
681 | +# CONFIG_UIO is not set | |
682 | + | |
683 | +# | |
684 | +# TI VLYNQ | |
685 | +# | |
686 | +# CONFIG_STAGING is not set | |
687 | + | |
688 | +# | |
689 | +# File systems | |
690 | +# | |
691 | +CONFIG_EXT2_FS=y | |
692 | +# CONFIG_EXT2_FS_XATTR is not set | |
693 | +# CONFIG_EXT2_FS_XIP is not set | |
694 | +CONFIG_EXT3_FS=y | |
695 | +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | |
696 | +CONFIG_EXT3_FS_XATTR=y | |
697 | +CONFIG_EXT3_FS_POSIX_ACL=y | |
698 | +CONFIG_EXT3_FS_SECURITY=y | |
699 | +# CONFIG_EXT4_FS is not set | |
700 | +CONFIG_JBD=y | |
701 | +# CONFIG_JBD_DEBUG is not set | |
702 | +CONFIG_FS_MBCACHE=y | |
703 | +# CONFIG_REISERFS_FS is not set | |
704 | +# CONFIG_JFS_FS is not set | |
705 | +CONFIG_FS_POSIX_ACL=y | |
706 | +# CONFIG_XFS_FS is not set | |
707 | +# CONFIG_GFS2_FS is not set | |
708 | +# CONFIG_OCFS2_FS is not set | |
709 | +# CONFIG_BTRFS_FS is not set | |
710 | +# CONFIG_NILFS2_FS is not set | |
711 | +CONFIG_FILE_LOCKING=y | |
712 | +CONFIG_FSNOTIFY=y | |
713 | +CONFIG_DNOTIFY=y | |
714 | +CONFIG_INOTIFY=y | |
715 | +CONFIG_INOTIFY_USER=y | |
716 | +# CONFIG_QUOTA is not set | |
717 | +# CONFIG_AUTOFS_FS is not set | |
718 | +# CONFIG_AUTOFS4_FS is not set | |
719 | +# CONFIG_FUSE_FS is not set | |
720 | + | |
721 | +# | |
722 | +# Caches | |
723 | +# | |
724 | +# CONFIG_FSCACHE is not set | |
725 | + | |
726 | +# | |
727 | +# CD-ROM/DVD Filesystems | |
728 | +# | |
729 | +# CONFIG_ISO9660_FS is not set | |
730 | +# CONFIG_UDF_FS is not set | |
731 | + | |
732 | +# | |
733 | +# DOS/FAT/NT Filesystems | |
734 | +# | |
735 | +# CONFIG_MSDOS_FS is not set | |
736 | +# CONFIG_VFAT_FS is not set | |
737 | +# CONFIG_NTFS_FS is not set | |
738 | + | |
739 | +# | |
740 | +# Pseudo filesystems | |
741 | +# | |
742 | +CONFIG_PROC_FS=y | |
743 | +CONFIG_PROC_KCORE=y | |
744 | +CONFIG_PROC_SYSCTL=y | |
745 | +CONFIG_PROC_PAGE_MONITOR=y | |
746 | +CONFIG_SYSFS=y | |
747 | +CONFIG_TMPFS=y | |
748 | +# CONFIG_TMPFS_POSIX_ACL is not set | |
749 | +# CONFIG_HUGETLB_PAGE is not set | |
750 | +# CONFIG_CONFIGFS_FS is not set | |
751 | +CONFIG_MISC_FILESYSTEMS=y | |
752 | +# CONFIG_ADFS_FS is not set | |
753 | +# CONFIG_AFFS_FS is not set | |
754 | +# CONFIG_HFS_FS is not set | |
755 | +# CONFIG_HFSPLUS_FS is not set | |
756 | +# CONFIG_BEFS_FS is not set | |
757 | +# CONFIG_BFS_FS is not set | |
758 | +# CONFIG_EFS_FS is not set | |
759 | +# CONFIG_JFFS2_FS is not set | |
760 | +CONFIG_CRAMFS=y | |
761 | +# CONFIG_SQUASHFS is not set | |
762 | +# CONFIG_VXFS_FS is not set | |
763 | +# CONFIG_MINIX_FS is not set | |
764 | +# CONFIG_OMFS_FS is not set | |
765 | +# CONFIG_HPFS_FS is not set | |
766 | +# CONFIG_QNX4FS_FS is not set | |
767 | +# CONFIG_ROMFS_FS is not set | |
768 | +# CONFIG_SYSV_FS is not set | |
769 | +# CONFIG_UFS_FS is not set | |
770 | +# CONFIG_NETWORK_FILESYSTEMS is not set | |
771 | + | |
772 | +# | |
773 | +# Partition Types | |
774 | +# | |
775 | +# CONFIG_PARTITION_ADVANCED is not set | |
776 | +CONFIG_MSDOS_PARTITION=y | |
777 | +# CONFIG_NLS is not set | |
778 | +# CONFIG_DLM is not set | |
779 | +CONFIG_BINARY_PRINTF=y | |
780 | + | |
781 | +# | |
782 | +# Library routines | |
783 | +# | |
784 | +CONFIG_BITREVERSE=y | |
785 | +CONFIG_GENERIC_FIND_LAST_BIT=y | |
786 | +# CONFIG_CRC_CCITT is not set | |
787 | +# CONFIG_CRC16 is not set | |
788 | +# CONFIG_CRC_T10DIF is not set | |
789 | +# CONFIG_CRC_ITU_T is not set | |
790 | +CONFIG_CRC32=y | |
791 | +# CONFIG_CRC7 is not set | |
792 | +# CONFIG_LIBCRC32C is not set | |
793 | +CONFIG_ZLIB_INFLATE=y | |
794 | +CONFIG_DECOMPRESS_GZIP=y | |
795 | +CONFIG_HAS_IOMEM=y | |
796 | +CONFIG_HAS_IOPORT=y | |
797 | +CONFIG_HAS_DMA=y | |
798 | +CONFIG_HAVE_LMB=y | |
799 | +CONFIG_NLATTR=y | |
800 | +CONFIG_GENERIC_ATOMIC64=y | |
801 | + | |
802 | +# | |
803 | +# Kernel hacking | |
804 | +# | |
805 | +# CONFIG_PRINTK_TIME is not set | |
806 | +CONFIG_ENABLE_WARN_DEPRECATED=y | |
807 | +CONFIG_ENABLE_MUST_CHECK=y | |
808 | +CONFIG_FRAME_WARN=1024 | |
809 | +CONFIG_MAGIC_SYSRQ=y | |
810 | +# CONFIG_STRIP_ASM_SYMS is not set | |
811 | +# CONFIG_UNUSED_SYMBOLS is not set | |
812 | +CONFIG_DEBUG_FS=y | |
813 | +# CONFIG_HEADERS_CHECK is not set | |
814 | +CONFIG_DEBUG_KERNEL=y | |
815 | +# CONFIG_DEBUG_SHIRQ is not set | |
816 | +CONFIG_DETECT_SOFTLOCKUP=y | |
817 | +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set | |
818 | +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 | |
819 | +CONFIG_DETECT_HUNG_TASK=y | |
820 | +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set | |
821 | +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 | |
822 | +CONFIG_SCHED_DEBUG=y | |
823 | +# CONFIG_SCHEDSTATS is not set | |
824 | +# CONFIG_TIMER_STATS is not set | |
825 | +# CONFIG_DEBUG_OBJECTS is not set | |
826 | +# CONFIG_SLUB_DEBUG_ON is not set | |
827 | +# CONFIG_SLUB_STATS is not set | |
828 | +# CONFIG_DEBUG_KMEMLEAK is not set | |
829 | +# CONFIG_DEBUG_RT_MUTEXES is not set | |
830 | +# CONFIG_RT_MUTEX_TESTER is not set | |
831 | +# CONFIG_DEBUG_SPINLOCK is not set | |
832 | +# CONFIG_DEBUG_MUTEXES is not set | |
833 | +# CONFIG_DEBUG_LOCK_ALLOC is not set | |
834 | +# CONFIG_PROVE_LOCKING is not set | |
835 | +# CONFIG_LOCK_STAT is not set | |
836 | +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set | |
837 | +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set | |
838 | +CONFIG_STACKTRACE=y | |
839 | +# CONFIG_DEBUG_KOBJECT is not set | |
840 | +CONFIG_DEBUG_BUGVERBOSE=y | |
841 | +CONFIG_DEBUG_INFO=y | |
842 | +# CONFIG_DEBUG_VM is not set | |
843 | +# CONFIG_DEBUG_WRITECOUNT is not set | |
844 | +# CONFIG_DEBUG_MEMORY_INIT is not set | |
845 | +# CONFIG_DEBUG_LIST is not set | |
846 | +# CONFIG_DEBUG_SG is not set | |
847 | +# CONFIG_DEBUG_NOTIFIERS is not set | |
848 | +# CONFIG_DEBUG_CREDENTIALS is not set | |
849 | +# CONFIG_RCU_TORTURE_TEST is not set | |
850 | +# CONFIG_RCU_CPU_STALL_DETECTOR is not set | |
851 | +# CONFIG_BACKTRACE_SELF_TEST is not set | |
852 | +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set | |
853 | +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set | |
854 | +# CONFIG_FAULT_INJECTION is not set | |
855 | +# CONFIG_LATENCYTOP is not set | |
856 | +CONFIG_SYSCTL_SYSCALL_CHECK=y | |
857 | +# CONFIG_DEBUG_PAGEALLOC is not set | |
858 | +CONFIG_NOP_TRACER=y | |
859 | +CONFIG_HAVE_FUNCTION_TRACER=y | |
860 | +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y | |
861 | +CONFIG_HAVE_DYNAMIC_FTRACE=y | |
862 | +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y | |
863 | +CONFIG_RING_BUFFER=y | |
864 | +CONFIG_EVENT_TRACING=y | |
865 | +CONFIG_CONTEXT_SWITCH_TRACER=y | |
866 | +CONFIG_RING_BUFFER_ALLOW_SWAP=y | |
867 | +CONFIG_TRACING=y | |
868 | +CONFIG_TRACING_SUPPORT=y | |
869 | +CONFIG_FTRACE=y | |
870 | +# CONFIG_FUNCTION_TRACER is not set | |
871 | +# CONFIG_IRQSOFF_TRACER is not set | |
872 | +# CONFIG_SCHED_TRACER is not set | |
873 | +# CONFIG_ENABLE_DEFAULT_TRACERS is not set | |
874 | +# CONFIG_BOOT_TRACER is not set | |
875 | +CONFIG_BRANCH_PROFILE_NONE=y | |
876 | +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set | |
877 | +# CONFIG_PROFILE_ALL_BRANCHES is not set | |
878 | +# CONFIG_STACK_TRACER is not set | |
879 | +# CONFIG_KMEMTRACE is not set | |
880 | +# CONFIG_WORKQUEUE_TRACER is not set | |
881 | +# CONFIG_BLK_DEV_IO_TRACE is not set | |
882 | +# CONFIG_RING_BUFFER_BENCHMARK is not set | |
883 | +# CONFIG_DYNAMIC_DEBUG is not set | |
884 | +# CONFIG_DMA_API_DEBUG is not set | |
885 | +# CONFIG_SAMPLES is not set | |
886 | +CONFIG_HAVE_ARCH_KGDB=y | |
887 | +# CONFIG_KGDB is not set | |
888 | +# CONFIG_PPC_DISABLE_WERROR is not set | |
889 | +CONFIG_PPC_WERROR=y | |
890 | +CONFIG_PRINT_STACK_DEPTH=64 | |
891 | +# CONFIG_DEBUG_STACKOVERFLOW is not set | |
892 | +# CONFIG_DEBUG_STACK_USAGE is not set | |
893 | +# CONFIG_PPC_EMULATED_STATS is not set | |
894 | +# CONFIG_CODE_PATCHING_SELFTEST is not set | |
895 | +# CONFIG_FTR_FIXUP_SELFTEST is not set | |
896 | +# CONFIG_MSI_BITMAP_SELFTEST is not set | |
897 | +# CONFIG_XMON is not set | |
898 | +# CONFIG_IRQSTACKS is not set | |
899 | +# CONFIG_VIRQ_DEBUG is not set | |
900 | +# CONFIG_BDI_SWITCH is not set | |
901 | +CONFIG_PPC_EARLY_DEBUG=y | |
902 | +# CONFIG_PPC_EARLY_DEBUG_LPAR is not set | |
903 | +# CONFIG_PPC_EARLY_DEBUG_G5 is not set | |
904 | +# CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL is not set | |
905 | +# CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE is not set | |
906 | +# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set | |
907 | +# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set | |
908 | +# CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE is not set | |
909 | +# CONFIG_PPC_EARLY_DEBUG_BEAT is not set | |
910 | +CONFIG_PPC_EARLY_DEBUG_44x=y | |
911 | +# CONFIG_PPC_EARLY_DEBUG_40x is not set | |
912 | +# CONFIG_PPC_EARLY_DEBUG_CPM is not set | |
913 | +# CONFIG_PPC_EARLY_DEBUG_USBGECKO is not set | |
914 | +CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW=0x40000200 | |
915 | +CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH=0x1 | |
916 | + | |
917 | +# | |
918 | +# Security options | |
919 | +# | |
920 | +# CONFIG_KEYS is not set | |
921 | +# CONFIG_SECURITY is not set | |
922 | +# CONFIG_SECURITYFS is not set | |
923 | +# CONFIG_DEFAULT_SECURITY_SELINUX is not set | |
924 | +# CONFIG_DEFAULT_SECURITY_SMACK is not set | |
925 | +# CONFIG_DEFAULT_SECURITY_TOMOYO is not set | |
926 | +CONFIG_DEFAULT_SECURITY_DAC=y | |
927 | +CONFIG_DEFAULT_SECURITY="" | |
928 | +CONFIG_CRYPTO=y | |
929 | + | |
930 | +# | |
931 | +# Crypto core or helper | |
932 | +# | |
933 | +CONFIG_CRYPTO_ALGAPI=y | |
934 | +CONFIG_CRYPTO_ALGAPI2=y | |
935 | +CONFIG_CRYPTO_AEAD2=y | |
936 | +CONFIG_CRYPTO_BLKCIPHER=y | |
937 | +CONFIG_CRYPTO_BLKCIPHER2=y | |
938 | +CONFIG_CRYPTO_HASH=y | |
939 | +CONFIG_CRYPTO_HASH2=y | |
940 | +CONFIG_CRYPTO_RNG2=y | |
941 | +CONFIG_CRYPTO_PCOMP=y | |
942 | +CONFIG_CRYPTO_MANAGER=y | |
943 | +CONFIG_CRYPTO_MANAGER2=y | |
944 | +# CONFIG_CRYPTO_GF128MUL is not set | |
945 | +# CONFIG_CRYPTO_NULL is not set | |
946 | +CONFIG_CRYPTO_WORKQUEUE=y | |
947 | +# CONFIG_CRYPTO_CRYPTD is not set | |
948 | +# CONFIG_CRYPTO_AUTHENC is not set | |
949 | +# CONFIG_CRYPTO_TEST is not set | |
950 | + | |
951 | +# | |
952 | +# Authenticated Encryption with Associated Data | |
953 | +# | |
954 | +# CONFIG_CRYPTO_CCM is not set | |
955 | +# CONFIG_CRYPTO_GCM is not set | |
956 | +# CONFIG_CRYPTO_SEQIV is not set | |
957 | + | |
958 | +# | |
959 | +# Block modes | |
960 | +# | |
961 | +CONFIG_CRYPTO_CBC=y | |
962 | +# CONFIG_CRYPTO_CTR is not set | |
963 | +# CONFIG_CRYPTO_CTS is not set | |
964 | +CONFIG_CRYPTO_ECB=y | |
965 | +# CONFIG_CRYPTO_LRW is not set | |
966 | +CONFIG_CRYPTO_PCBC=y | |
967 | +# CONFIG_CRYPTO_XTS is not set | |
968 | + | |
969 | +# | |
970 | +# Hash modes | |
971 | +# | |
972 | +# CONFIG_CRYPTO_HMAC is not set | |
973 | +# CONFIG_CRYPTO_XCBC is not set | |
974 | +# CONFIG_CRYPTO_VMAC is not set | |
975 | + | |
976 | +# | |
977 | +# Digest | |
978 | +# | |
979 | +# CONFIG_CRYPTO_CRC32C is not set | |
980 | +# CONFIG_CRYPTO_GHASH is not set | |
981 | +# CONFIG_CRYPTO_MD4 is not set | |
982 | +CONFIG_CRYPTO_MD5=y | |
983 | +# CONFIG_CRYPTO_MICHAEL_MIC is not set | |
984 | +# CONFIG_CRYPTO_RMD128 is not set | |
985 | +# CONFIG_CRYPTO_RMD160 is not set | |
986 | +# CONFIG_CRYPTO_RMD256 is not set | |
987 | +# CONFIG_CRYPTO_RMD320 is not set | |
988 | +# CONFIG_CRYPTO_SHA1 is not set | |
989 | +# CONFIG_CRYPTO_SHA256 is not set | |
990 | +# CONFIG_CRYPTO_SHA512 is not set | |
991 | +# CONFIG_CRYPTO_TGR192 is not set | |
992 | +# CONFIG_CRYPTO_WP512 is not set | |
993 | + | |
994 | +# | |
995 | +# Ciphers | |
996 | +# | |
997 | +# CONFIG_CRYPTO_AES is not set | |
998 | +# CONFIG_CRYPTO_ANUBIS is not set | |
999 | +# CONFIG_CRYPTO_ARC4 is not set | |
1000 | +# CONFIG_CRYPTO_BLOWFISH is not set | |
1001 | +# CONFIG_CRYPTO_CAMELLIA is not set | |
1002 | +# CONFIG_CRYPTO_CAST5 is not set | |
1003 | +# CONFIG_CRYPTO_CAST6 is not set | |
1004 | +CONFIG_CRYPTO_DES=y | |
1005 | +# CONFIG_CRYPTO_FCRYPT is not set | |
1006 | +# CONFIG_CRYPTO_KHAZAD is not set | |
1007 | +# CONFIG_CRYPTO_SALSA20 is not set | |
1008 | +# CONFIG_CRYPTO_SEED is not set | |
1009 | +# CONFIG_CRYPTO_SERPENT is not set | |
1010 | +# CONFIG_CRYPTO_TEA is not set | |
1011 | +# CONFIG_CRYPTO_TWOFISH is not set | |
1012 | + | |
1013 | +# | |
1014 | +# Compression | |
1015 | +# | |
1016 | +# CONFIG_CRYPTO_DEFLATE is not set | |
1017 | +# CONFIG_CRYPTO_ZLIB is not set | |
1018 | +# CONFIG_CRYPTO_LZO is not set | |
1019 | + | |
1020 | +# | |
1021 | +# Random Number Generation | |
1022 | +# | |
1023 | +# CONFIG_CRYPTO_ANSI_CPRNG is not set | |
1024 | +# CONFIG_CRYPTO_HW is not set | |
1025 | +# CONFIG_PPC_CLOCK is not set | |
1026 | +# CONFIG_VIRTUALIZATION is not set |
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/configs/pseries_defconfig
arch/powerpc/include/asm/cache.h
... | ... | @@ -12,8 +12,12 @@ |
12 | 12 | #define L1_CACHE_SHIFT 6 |
13 | 13 | #define MAX_COPY_PREFETCH 4 |
14 | 14 | #elif defined(CONFIG_PPC32) |
15 | -#define L1_CACHE_SHIFT 5 | |
16 | 15 | #define MAX_COPY_PREFETCH 4 |
16 | +#if defined(CONFIG_PPC_47x) | |
17 | +#define L1_CACHE_SHIFT 7 | |
18 | +#else | |
19 | +#define L1_CACHE_SHIFT 5 | |
20 | +#endif | |
17 | 21 | #else /* CONFIG_PPC64 */ |
18 | 22 | #define L1_CACHE_SHIFT 7 |
19 | 23 | #endif |
arch/powerpc/include/asm/cputable.h
... | ... | @@ -72,6 +72,7 @@ |
72 | 72 | extern int machine_check_440A(struct pt_regs *regs); |
73 | 73 | extern int machine_check_e500(struct pt_regs *regs); |
74 | 74 | extern int machine_check_e200(struct pt_regs *regs); |
75 | +extern int machine_check_47x(struct pt_regs *regs); | |
75 | 76 | |
76 | 77 | /* NOTE WELL: Update identify_cpu() if fields are added or removed! */ |
77 | 78 | struct cpu_spec { |
... | ... | @@ -365,6 +366,7 @@ |
365 | 366 | #define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) |
366 | 367 | #define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \ |
367 | 368 | CPU_FTR_INDEXED_DCR) |
369 | +#define CPU_FTRS_47X (CPU_FTRS_440x6) | |
368 | 370 | #define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \ |
369 | 371 | CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \ |
370 | 372 | CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE) |
... | ... | @@ -452,6 +454,9 @@ |
452 | 454 | #endif |
453 | 455 | #ifdef CONFIG_44x |
454 | 456 | CPU_FTRS_44X | CPU_FTRS_440x6 | |
457 | +#endif | |
458 | +#ifdef CONFIG_PPC_47x | |
459 | + CPU_FTRS_47X | | |
455 | 460 | #endif |
456 | 461 | #ifdef CONFIG_E200 |
457 | 462 | CPU_FTRS_E200 | |
arch/powerpc/include/asm/hvcall.h
... | ... | @@ -228,6 +228,7 @@ |
228 | 228 | #define H_JOIN 0x298 |
229 | 229 | #define H_VASI_STATE 0x2A4 |
230 | 230 | #define H_ENABLE_CRQ 0x2B0 |
231 | +#define H_GET_EM_PARMS 0x2B8 | |
231 | 232 | #define H_SET_MPP 0x2D0 |
232 | 233 | #define H_GET_MPP 0x2D4 |
233 | 234 | #define MAX_HCALL_OPCODE H_GET_MPP |
... | ... | @@ -281,6 +282,7 @@ |
281 | 282 | */ |
282 | 283 | #define PLPAR_HCALL9_BUFSIZE 9 |
283 | 284 | long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...); |
285 | +long plpar_hcall9_raw(unsigned long opcode, unsigned long *retbuf, ...); | |
284 | 286 | |
285 | 287 | /* For hcall instrumentation. One structure per-hcall, per-CPU */ |
286 | 288 | struct hcall_stats { |
arch/powerpc/include/asm/kexec.h
arch/powerpc/include/asm/mmu-44x.h
... | ... | @@ -40,7 +40,7 @@ |
40 | 40 | #define PPC44x_TLB_I 0x00000400 /* Caching is inhibited */ |
41 | 41 | #define PPC44x_TLB_M 0x00000200 /* Memory is coherent */ |
42 | 42 | #define PPC44x_TLB_G 0x00000100 /* Memory is guarded */ |
43 | -#define PPC44x_TLB_E 0x00000080 /* Memory is guarded */ | |
43 | +#define PPC44x_TLB_E 0x00000080 /* Memory is little endian */ | |
44 | 44 | |
45 | 45 | #define PPC44x_TLB_PERM_MASK 0x0000003f |
46 | 46 | #define PPC44x_TLB_UX 0x00000020 /* User execution */ |
... | ... | @@ -53,6 +53,52 @@ |
53 | 53 | /* Number of TLB entries */ |
54 | 54 | #define PPC44x_TLB_SIZE 64 |
55 | 55 | |
56 | +/* 47x bits */ | |
57 | +#define PPC47x_MMUCR_TID 0x0000ffff | |
58 | +#define PPC47x_MMUCR_STS 0x00010000 | |
59 | + | |
60 | +/* Page identification fields */ | |
61 | +#define PPC47x_TLB0_EPN_MASK 0xfffff000 /* Effective Page Number */ | |
62 | +#define PPC47x_TLB0_VALID 0x00000800 /* Valid flag */ | |
63 | +#define PPC47x_TLB0_TS 0x00000400 /* Translation address space */ | |
64 | +#define PPC47x_TLB0_4K 0x00000000 | |
65 | +#define PPC47x_TLB0_16K 0x00000010 | |
66 | +#define PPC47x_TLB0_64K 0x00000030 | |
67 | +#define PPC47x_TLB0_1M 0x00000070 | |
68 | +#define PPC47x_TLB0_16M 0x000000f0 | |
69 | +#define PPC47x_TLB0_256M 0x000001f0 | |
70 | +#define PPC47x_TLB0_1G 0x000003f0 | |
71 | +#define PPC47x_TLB0_BOLTED_R 0x00000008 /* tlbre only */ | |
72 | + | |
73 | +/* Translation fields */ | |
74 | +#define PPC47x_TLB1_RPN_MASK 0xfffff000 /* Real Page Number */ | |
75 | +#define PPC47x_TLB1_ERPN_MASK 0x000003ff | |
76 | + | |
77 | +/* Storage attribute and access control fields */ | |
78 | +#define PPC47x_TLB2_ATTR_MASK 0x0003ff80 | |
79 | +#define PPC47x_TLB2_IL1I 0x00020000 /* Memory is guarded */ | |
80 | +#define PPC47x_TLB2_IL1D 0x00010000 /* Memory is guarded */ | |
81 | +#define PPC47x_TLB2_U0 0x00008000 /* User 0 */ | |
82 | +#define PPC47x_TLB2_U1 0x00004000 /* User 1 */ | |
83 | +#define PPC47x_TLB2_U2 0x00002000 /* User 2 */ | |
84 | +#define PPC47x_TLB2_U3 0x00001000 /* User 3 */ | |
85 | +#define PPC47x_TLB2_W 0x00000800 /* Caching is write-through */ | |
86 | +#define PPC47x_TLB2_I 0x00000400 /* Caching is inhibited */ | |
87 | +#define PPC47x_TLB2_M 0x00000200 /* Memory is coherent */ | |
88 | +#define PPC47x_TLB2_G 0x00000100 /* Memory is guarded */ | |
89 | +#define PPC47x_TLB2_E 0x00000080 /* Memory is little endian */ | |
90 | +#define PPC47x_TLB2_PERM_MASK 0x0000003f | |
91 | +#define PPC47x_TLB2_UX 0x00000020 /* User execution */ | |
92 | +#define PPC47x_TLB2_UW 0x00000010 /* User write */ | |
93 | +#define PPC47x_TLB2_UR 0x00000008 /* User read */ | |
94 | +#define PPC47x_TLB2_SX 0x00000004 /* Super execution */ | |
95 | +#define PPC47x_TLB2_SW 0x00000002 /* Super write */ | |
96 | +#define PPC47x_TLB2_SR 0x00000001 /* Super read */ | |
97 | +#define PPC47x_TLB2_U_RWX (PPC47x_TLB2_UX|PPC47x_TLB2_UW|PPC47x_TLB2_UR) | |
98 | +#define PPC47x_TLB2_S_RWX (PPC47x_TLB2_SX|PPC47x_TLB2_SW|PPC47x_TLB2_SR) | |
99 | +#define PPC47x_TLB2_S_RW (PPC47x_TLB2_SW | PPC47x_TLB2_SR) | |
100 | +#define PPC47x_TLB2_IMG (PPC47x_TLB2_I | PPC47x_TLB2_M | PPC47x_TLB2_G) | |
101 | + | |
56 | 102 | #ifndef __ASSEMBLY__ |
57 | 103 | |
58 | 104 | extern unsigned int tlb_44x_hwater; |
59 | 105 | |
60 | 106 | |
... | ... | @@ -79,12 +125,15 @@ |
79 | 125 | |
80 | 126 | #if (PAGE_SHIFT == 12) |
81 | 127 | #define PPC44x_TLBE_SIZE PPC44x_TLB_4K |
128 | +#define PPC47x_TLBE_SIZE PPC47x_TLB0_4K | |
82 | 129 | #define mmu_virtual_psize MMU_PAGE_4K |
83 | 130 | #elif (PAGE_SHIFT == 14) |
84 | 131 | #define PPC44x_TLBE_SIZE PPC44x_TLB_16K |
132 | +#define PPC47x_TLBE_SIZE PPC47x_TLB0_16K | |
85 | 133 | #define mmu_virtual_psize MMU_PAGE_16K |
86 | 134 | #elif (PAGE_SHIFT == 16) |
87 | 135 | #define PPC44x_TLBE_SIZE PPC44x_TLB_64K |
136 | +#define PPC47x_TLBE_SIZE PPC47x_TLB0_64K | |
88 | 137 | #define mmu_virtual_psize MMU_PAGE_64K |
89 | 138 | #elif (PAGE_SHIFT == 18) |
90 | 139 | #define PPC44x_TLBE_SIZE PPC44x_TLB_256K |
arch/powerpc/include/asm/mmu.h
arch/powerpc/include/asm/mmzone.h
arch/powerpc/include/asm/mpic.h
... | ... | @@ -463,9 +463,6 @@ |
463 | 463 | /* Request IPIs on primary mpic */ |
464 | 464 | extern void mpic_request_ipis(void); |
465 | 465 | |
466 | -/* Send an IPI (non offseted number 0..3) */ | |
467 | -extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask); | |
468 | - | |
469 | 466 | /* Send a message (IPI) to a given target (cpu number or MSG_*) */ |
470 | 467 | void smp_mpic_message_pass(int target, int msg); |
471 | 468 |
arch/powerpc/include/asm/paca.h
... | ... | @@ -82,6 +82,7 @@ |
82 | 82 | s16 hw_cpu_id; /* Physical processor number */ |
83 | 83 | u8 cpu_start; /* At startup, processor spins until */ |
84 | 84 | /* this becomes non-zero. */ |
85 | + u8 kexec_state; /* set when kexec down has irqs off */ | |
85 | 86 | #ifdef CONFIG_PPC_STD_MMU_64 |
86 | 87 | struct slb_shadow *slb_shadow_ptr; |
87 | 88 |
arch/powerpc/include/asm/parport.h
... | ... | @@ -19,6 +19,8 @@ |
19 | 19 | u32 io1, io2; |
20 | 20 | int propsize; |
21 | 21 | int count = 0; |
22 | + int virq; | |
23 | + | |
22 | 24 | for (np = NULL; (np = of_find_compatible_node(np, |
23 | 25 | "parallel", |
24 | 26 | "pnpPNP,400")) != NULL;) { |
25 | 27 | |
... | ... | @@ -26,10 +28,13 @@ |
26 | 28 | if (!prop || propsize > 6*sizeof(u32)) |
27 | 29 | continue; |
28 | 30 | io1 = prop[1]; io2 = prop[2]; |
29 | - prop = of_get_property(np, "interrupts", NULL); | |
30 | - if (!prop) | |
31 | + | |
32 | + virq = irq_of_parse_and_map(np, 0); | |
33 | + if (virq == NO_IRQ) | |
31 | 34 | continue; |
32 | - if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL, 0) != NULL) | |
35 | + | |
36 | + if (parport_pc_probe_port(io1, io2, virq, autodma, NULL, 0) | |
37 | + != NULL) | |
33 | 38 | count++; |
34 | 39 | } |
35 | 40 | return count; |
arch/powerpc/include/asm/pgalloc-64.h
... | ... | @@ -11,6 +11,12 @@ |
11 | 11 | #include <linux/cpumask.h> |
12 | 12 | #include <linux/percpu.h> |
13 | 13 | |
14 | +struct vmemmap_backing { | |
15 | + struct vmemmap_backing *list; | |
16 | + unsigned long phys; | |
17 | + unsigned long virt_addr; | |
18 | +}; | |
19 | + | |
14 | 20 | /* |
15 | 21 | * Functions that deal with pagetables that could be at any level of |
16 | 22 | * the table need to be passed an "index_size" so they know how to |
arch/powerpc/include/asm/pgtable-ppc32.h
... | ... | @@ -287,7 +287,7 @@ |
287 | 287 | #define pmd_page_vaddr(pmd) \ |
288 | 288 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) |
289 | 289 | #define pmd_page(pmd) \ |
290 | - (mem_map + (pmd_val(pmd) >> PAGE_SHIFT)) | |
290 | + pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) | |
291 | 291 | #else |
292 | 292 | #define pmd_page_vaddr(pmd) \ |
293 | 293 | ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) |
arch/powerpc/include/asm/ptrace.h
... | ... | @@ -89,6 +89,7 @@ |
89 | 89 | |
90 | 90 | #define instruction_pointer(regs) ((regs)->nip) |
91 | 91 | #define user_stack_pointer(regs) ((regs)->gpr[1]) |
92 | +#define kernel_stack_pointer(regs) ((regs)->gpr[1]) | |
92 | 93 | #define regs_return_value(regs) ((regs)->gpr[3]) |
93 | 94 | |
94 | 95 | #ifdef CONFIG_SMP |
... | ... | @@ -140,6 +141,69 @@ |
140 | 141 | #define arch_has_single_step() (1) |
141 | 142 | #define arch_has_block_step() (!cpu_has_feature(CPU_FTR_601)) |
142 | 143 | #define ARCH_HAS_USER_SINGLE_STEP_INFO |
144 | + | |
145 | +/* | |
146 | + * kprobe-based event tracer support | |
147 | + */ | |
148 | + | |
149 | +#include <linux/stddef.h> | |
150 | +#include <linux/thread_info.h> | |
151 | +extern int regs_query_register_offset(const char *name); | |
152 | +extern const char *regs_query_register_name(unsigned int offset); | |
153 | +#define MAX_REG_OFFSET (offsetof(struct pt_regs, dsisr)) | |
154 | + | |
155 | +/** | |
156 | + * regs_get_register() - get register value from its offset | |
157 | + * @regs: pt_regs from which register value is gotten | |
158 | + * @offset: offset number of the register. | |
159 | + * | |
160 | + * regs_get_register returns the value of a register whose offset from @regs. | |
161 | + * The @offset is the offset of the register in struct pt_regs. | |
162 | + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. | |
163 | + */ | |
164 | +static inline unsigned long regs_get_register(struct pt_regs *regs, | |
165 | + unsigned int offset) | |
166 | +{ | |
167 | + if (unlikely(offset > MAX_REG_OFFSET)) | |
168 | + return 0; | |
169 | + return *(unsigned long *)((unsigned long)regs + offset); | |
170 | +} | |
171 | + | |
172 | +/** | |
173 | + * regs_within_kernel_stack() - check the address in the stack | |
174 | + * @regs: pt_regs which contains kernel stack pointer. | |
175 | + * @addr: address which is checked. | |
176 | + * | |
177 | + * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). | |
178 | + * If @addr is within the kernel stack, it returns true. If not, returns false. | |
179 | + */ | |
180 | + | |
181 | +static inline bool regs_within_kernel_stack(struct pt_regs *regs, | |
182 | + unsigned long addr) | |
183 | +{ | |
184 | + return ((addr & ~(THREAD_SIZE - 1)) == | |
185 | + (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); | |
186 | +} | |
187 | + | |
188 | +/** | |
189 | + * regs_get_kernel_stack_nth() - get Nth entry of the stack | |
190 | + * @regs: pt_regs which contains kernel stack pointer. | |
191 | + * @n: stack entry number. | |
192 | + * | |
193 | + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which | |
194 | + * is specified by @regs. If the @n th entry is NOT in the kernel stack, | |
195 | + * this returns 0. | |
196 | + */ | |
197 | +static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, | |
198 | + unsigned int n) | |
199 | +{ | |
200 | + unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); | |
201 | + addr += n; | |
202 | + if (regs_within_kernel_stack(regs, (unsigned long)addr)) | |
203 | + return *addr; | |
204 | + else | |
205 | + return 0; | |
206 | +} | |
143 | 207 | |
144 | 208 | #endif /* __ASSEMBLY__ */ |
145 | 209 |
arch/powerpc/include/asm/reg.h
... | ... | @@ -817,6 +817,7 @@ |
817 | 817 | #define PVR_403GC 0x00200200 |
818 | 818 | #define PVR_403GCX 0x00201400 |
819 | 819 | #define PVR_405GP 0x40110000 |
820 | +#define PVR_476 0x11a52000 | |
820 | 821 | #define PVR_STB03XXX 0x40310000 |
821 | 822 | #define PVR_NP405H 0x41410000 |
822 | 823 | #define PVR_NP405L 0x41610000 |
... | ... | @@ -852,6 +853,9 @@ |
852 | 853 | #define PVR_8240 0x00810100 |
853 | 854 | #define PVR_8245 0x80811014 |
854 | 855 | #define PVR_8260 PVR_8240 |
856 | + | |
857 | +/* 476 Simulator seems to currently have the PVR of the 602... */ | |
858 | +#define PVR_476_ISS 0x00052000 | |
855 | 859 | |
856 | 860 | /* 64-bit processors */ |
857 | 861 | /* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */ |
arch/powerpc/include/asm/reg_booke.h
... | ... | @@ -191,6 +191,10 @@ |
191 | 191 | #define MCSR_DCFP 0x01000000 /* D-Cache Flush Parity Error */ |
192 | 192 | #define MCSR_IMPE 0x00800000 /* Imprecise Machine Check Exception */ |
193 | 193 | |
194 | +#define PPC47x_MCSR_GPR 0x01000000 /* GPR parity error */ | |
195 | +#define PPC47x_MCSR_FPR 0x00800000 /* FPR parity error */ | |
196 | +#define PPC47x_MCSR_IPR 0x00400000 /* Imprecise Machine Check Exception */ | |
197 | + | |
194 | 198 | #ifdef CONFIG_E500 |
195 | 199 | #define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */ |
196 | 200 | #define MCSR_ICPERR 0x40000000UL /* I-Cache Parity Error */ |
... | ... | @@ -604,6 +608,26 @@ |
604 | 608 | #define DBCR_JOI 0x00000002 /* JTAG Serial Outbound Int. Enable */ |
605 | 609 | #define DBCR_JII 0x00000001 /* JTAG Serial Inbound Int. Enable */ |
606 | 610 | #endif /* 403GCX */ |
611 | + | |
612 | +/* Some 476 specific registers */ | |
613 | +#define SPRN_SSPCR 830 | |
614 | +#define SPRN_USPCR 831 | |
615 | +#define SPRN_ISPCR 829 | |
616 | +#define SPRN_MMUBE0 820 | |
617 | +#define MMUBE0_IBE0_SHIFT 24 | |
618 | +#define MMUBE0_IBE1_SHIFT 16 | |
619 | +#define MMUBE0_IBE2_SHIFT 8 | |
620 | +#define MMUBE0_VBE0 0x00000004 | |
621 | +#define MMUBE0_VBE1 0x00000002 | |
622 | +#define MMUBE0_VBE2 0x00000001 | |
623 | +#define SPRN_MMUBE1 821 | |
624 | +#define MMUBE1_IBE3_SHIFT 24 | |
625 | +#define MMUBE1_IBE4_SHIFT 16 | |
626 | +#define MMUBE1_IBE5_SHIFT 8 | |
627 | +#define MMUBE1_VBE3 0x00000004 | |
628 | +#define MMUBE1_VBE4 0x00000002 | |
629 | +#define MMUBE1_VBE5 0x00000001 | |
630 | + | |
607 | 631 | #endif /* __ASM_POWERPC_REG_BOOKE_H__ */ |
608 | 632 | #endif /* __KERNEL__ */ |
arch/powerpc/include/asm/smp.h
... | ... | @@ -40,7 +40,7 @@ |
40 | 40 | DECLARE_PER_CPU(unsigned int, cpu_pvr); |
41 | 41 | |
42 | 42 | #ifdef CONFIG_HOTPLUG_CPU |
43 | -extern void fixup_irqs(cpumask_t map); | |
43 | +extern void fixup_irqs(const struct cpumask *map); | |
44 | 44 | int generic_cpu_disable(void); |
45 | 45 | int generic_cpu_enable(unsigned int cpu); |
46 | 46 | void generic_cpu_die(unsigned int cpu); |
... | ... | @@ -68,8 +68,19 @@ |
68 | 68 | } |
69 | 69 | #endif |
70 | 70 | |
71 | -DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | |
72 | -DECLARE_PER_CPU(cpumask_t, cpu_core_map); | |
71 | +DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map); | |
72 | +DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); | |
73 | + | |
74 | +static inline struct cpumask *cpu_sibling_mask(int cpu) | |
75 | +{ | |
76 | + return per_cpu(cpu_sibling_map, cpu); | |
77 | +} | |
78 | + | |
79 | +static inline struct cpumask *cpu_core_mask(int cpu) | |
80 | +{ | |
81 | + return per_cpu(cpu_core_map, cpu); | |
82 | +} | |
83 | + | |
73 | 84 | extern int cpu_to_core_id(int cpu); |
74 | 85 | |
75 | 86 | /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. |
... | ... | @@ -93,7 +104,6 @@ |
93 | 104 | void smp_init_cell(void); |
94 | 105 | void smp_init_celleb(void); |
95 | 106 | void smp_setup_cpu_maps(void); |
96 | -void smp_setup_cpu_sibling_map(void); | |
97 | 107 | |
98 | 108 | extern int __cpu_disable(void); |
99 | 109 | extern void __cpu_die(unsigned int cpu); |
arch/powerpc/include/asm/topology.h
... | ... | @@ -8,6 +8,26 @@ |
8 | 8 | |
9 | 9 | #ifdef CONFIG_NUMA |
10 | 10 | |
11 | +/* | |
12 | + * Before going off node we want the VM to try and reclaim from the local | |
13 | + * node. It does this if the remote distance is larger than RECLAIM_DISTANCE. | |
14 | + * With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of | |
15 | + * 20, we never reclaim and go off node straight away. | |
16 | + * | |
17 | + * To fix this we choose a smaller value of RECLAIM_DISTANCE. | |
18 | + */ | |
19 | +#define RECLAIM_DISTANCE 10 | |
20 | + | |
21 | +/* | |
22 | + * Before going off node we want the VM to try and reclaim from the local | |
23 | + * node. It does this if the remote distance is larger than RECLAIM_DISTANCE. | |
24 | + * With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of | |
25 | + * 20, we never reclaim and go off node straight away. | |
26 | + * | |
27 | + * To fix this we choose a smaller value of RECLAIM_DISTANCE. | |
28 | + */ | |
29 | +#define RECLAIM_DISTANCE 10 | |
30 | + | |
11 | 31 | #include <asm/mmzone.h> |
12 | 32 | |
13 | 33 | static inline int cpu_to_node(int cpu) |
... | ... | @@ -19,7 +39,7 @@ |
19 | 39 | |
20 | 40 | #define cpumask_of_node(node) ((node) == -1 ? \ |
21 | 41 | cpu_all_mask : \ |
22 | - &numa_cpumask_lookup_table[node]) | |
42 | + node_to_cpumask_map[node]) | |
23 | 43 | |
24 | 44 | int of_node_to_nid(struct device_node *device); |
25 | 45 | |
... | ... | @@ -102,8 +122,8 @@ |
102 | 122 | #ifdef CONFIG_PPC64 |
103 | 123 | #include <asm/smp.h> |
104 | 124 | |
105 | -#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) | |
106 | -#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) | |
125 | +#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) | |
126 | +#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) | |
107 | 127 | #define topology_core_id(cpu) (cpu_to_core_id(cpu)) |
108 | 128 | #endif |
109 | 129 | #endif |
arch/powerpc/kernel/asm-offsets.c
... | ... | @@ -183,6 +183,7 @@ |
183 | 183 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
184 | 184 | DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); |
185 | 185 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); |
186 | + DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); | |
186 | 187 | DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); |
187 | 188 | DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr)); |
188 | 189 | DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); |
... | ... | @@ -446,6 +447,14 @@ |
446 | 447 | #ifdef CONFIG_44x |
447 | 448 | DEFINE(PGD_T_LOG2, PGD_T_LOG2); |
448 | 449 | DEFINE(PTE_T_LOG2, PTE_T_LOG2); |
450 | +#endif | |
451 | +#ifdef CONFIG_FSL_BOOKE | |
452 | + DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam)); | |
453 | + DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0)); | |
454 | + DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1)); | |
455 | + DEFINE(TLBCAM_MAS2, offsetof(struct tlbcam, MAS2)); | |
456 | + DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3)); | |
457 | + DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7)); | |
449 | 458 | #endif |
450 | 459 | |
451 | 460 | #ifdef CONFIG_KVM_EXIT_TIMING |
arch/powerpc/kernel/cputable.c
... | ... | @@ -1701,6 +1701,35 @@ |
1701 | 1701 | .machine_check = machine_check_440A, |
1702 | 1702 | .platform = "ppc440", |
1703 | 1703 | }, |
1704 | + { /* 476 core */ | |
1705 | + .pvr_mask = 0xffff0000, | |
1706 | + .pvr_value = 0x11a50000, | |
1707 | + .cpu_name = "476", | |
1708 | + .cpu_features = CPU_FTRS_47X, | |
1709 | + .cpu_user_features = COMMON_USER_BOOKE | | |
1710 | + PPC_FEATURE_HAS_FPU, | |
1711 | + .mmu_features = MMU_FTR_TYPE_47x | | |
1712 | + MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, | |
1713 | + .icache_bsize = 32, | |
1714 | + .dcache_bsize = 128, | |
1715 | + .machine_check = machine_check_47x, | |
1716 | + .platform = "ppc470", | |
1717 | + }, | |
1718 | + { /* 476 iss */ | |
1719 | + .pvr_mask = 0xffff0000, | |
1720 | + .pvr_value = 0x00050000, | |
1721 | + .cpu_name = "476", | |
1722 | + .cpu_features = CPU_FTRS_47X, | |
1723 | + .cpu_user_features = COMMON_USER_BOOKE | | |
1724 | + PPC_FEATURE_HAS_FPU, | |
1725 | + .cpu_user_features = COMMON_USER_BOOKE, | |
1726 | + .mmu_features = MMU_FTR_TYPE_47x | | |
1727 | + MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, | |
1728 | + .icache_bsize = 32, | |
1729 | + .dcache_bsize = 128, | |
1730 | + .machine_check = machine_check_47x, | |
1731 | + .platform = "ppc470", | |
1732 | + }, | |
1704 | 1733 | { /* default match */ |
1705 | 1734 | .pvr_mask = 0x00000000, |
1706 | 1735 | .pvr_value = 0x00000000, |
arch/powerpc/kernel/crash.c
... | ... | @@ -162,6 +162,32 @@ |
162 | 162 | /* Leave the IPI callback set */ |
163 | 163 | } |
164 | 164 | |
165 | +/* wait for all the CPUs to hit real mode but timeout if they don't come in */ | |
166 | +static void crash_kexec_wait_realmode(int cpu) | |
167 | +{ | |
168 | + unsigned int msecs; | |
169 | + int i; | |
170 | + | |
171 | + msecs = 10000; | |
172 | + for (i=0; i < NR_CPUS && msecs > 0; i++) { | |
173 | + if (i == cpu) | |
174 | + continue; | |
175 | + | |
176 | + while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) { | |
177 | + barrier(); | |
178 | + if (!cpu_possible(i)) { | |
179 | + break; | |
180 | + } | |
181 | + if (!cpu_online(i)) { | |
182 | + break; | |
183 | + } | |
184 | + msecs--; | |
185 | + mdelay(1); | |
186 | + } | |
187 | + } | |
188 | + mb(); | |
189 | +} | |
190 | + | |
165 | 191 | /* |
166 | 192 | * This function will be called by secondary cpus or by kexec cpu |
167 | 193 | * if soft-reset is activated to stop some CPUs. |
168 | 194 | |
... | ... | @@ -347,10 +373,12 @@ |
347 | 373 | EXPORT_SYMBOL(crash_shutdown_unregister); |
348 | 374 | |
349 | 375 | static unsigned long crash_shutdown_buf[JMP_BUF_LEN]; |
376 | +static int crash_shutdown_cpu = -1; | |
350 | 377 | |
351 | 378 | static int handle_fault(struct pt_regs *regs) |
352 | 379 | { |
353 | - longjmp(crash_shutdown_buf, 1); | |
380 | + if (crash_shutdown_cpu == smp_processor_id()) | |
381 | + longjmp(crash_shutdown_buf, 1); | |
354 | 382 | return 0; |
355 | 383 | } |
356 | 384 | |
357 | 385 | |
... | ... | @@ -375,11 +403,14 @@ |
375 | 403 | for_each_irq(i) { |
376 | 404 | struct irq_desc *desc = irq_to_desc(i); |
377 | 405 | |
406 | + if (!desc || !desc->chip || !desc->chip->eoi) | |
407 | + continue; | |
408 | + | |
378 | 409 | if (desc->status & IRQ_INPROGRESS) |
379 | 410 | desc->chip->eoi(i); |
380 | 411 | |
381 | 412 | if (!(desc->status & IRQ_DISABLED)) |
382 | - desc->chip->disable(i); | |
413 | + desc->chip->shutdown(i); | |
383 | 414 | } |
384 | 415 | |
385 | 416 | /* |
... | ... | @@ -388,6 +419,7 @@ |
388 | 419 | */ |
389 | 420 | old_handler = __debugger_fault_handler; |
390 | 421 | __debugger_fault_handler = handle_fault; |
422 | + crash_shutdown_cpu = smp_processor_id(); | |
391 | 423 | for (i = 0; crash_shutdown_handles[i]; i++) { |
392 | 424 | if (setjmp(crash_shutdown_buf) == 0) { |
393 | 425 | /* |
... | ... | @@ -401,6 +433,7 @@ |
401 | 433 | asm volatile("sync; isync"); |
402 | 434 | } |
403 | 435 | } |
436 | + crash_shutdown_cpu = -1; | |
404 | 437 | __debugger_fault_handler = old_handler; |
405 | 438 | |
406 | 439 | /* |
... | ... | @@ -412,6 +445,7 @@ |
412 | 445 | crash_kexec_prepare_cpus(crashing_cpu); |
413 | 446 | cpu_set(crashing_cpu, cpus_in_crash); |
414 | 447 | crash_kexec_stop_spus(); |
448 | + crash_kexec_wait_realmode(crashing_cpu); | |
415 | 449 | if (ppc_md.kexec_cpu_down) |
416 | 450 | ppc_md.kexec_cpu_down(1, 0); |
417 | 451 | } |
arch/powerpc/kernel/entry_32.S
... | ... | @@ -373,11 +373,13 @@ |
373 | 373 | bnel- load_dbcr0 |
374 | 374 | #endif |
375 | 375 | #ifdef CONFIG_44x |
376 | +BEGIN_MMU_FTR_SECTION | |
376 | 377 | lis r4,icache_44x_need_flush@ha |
377 | 378 | lwz r5,icache_44x_need_flush@l(r4) |
378 | 379 | cmplwi cr0,r5,0 |
379 | 380 | bne- 2f |
380 | 381 | 1: |
382 | +END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x) | |
381 | 383 | #endif /* CONFIG_44x */ |
382 | 384 | BEGIN_FTR_SECTION |
383 | 385 | lwarx r7,0,r1 |
... | ... | @@ -848,6 +850,9 @@ |
848 | 850 | /* interrupts are hard-disabled at this point */ |
849 | 851 | restore: |
850 | 852 | #ifdef CONFIG_44x |
853 | +BEGIN_MMU_FTR_SECTION | |
854 | + b 1f | |
855 | +END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x) | |
851 | 856 | lis r4,icache_44x_need_flush@ha |
852 | 857 | lwz r5,icache_44x_need_flush@l(r4) |
853 | 858 | cmplwi cr0,r5,0 |
arch/powerpc/kernel/exceptions-64s.S
... | ... | @@ -735,8 +735,11 @@ |
735 | 735 | std r3,_DAR(r1) |
736 | 736 | std r4,_DSISR(r1) |
737 | 737 | |
738 | - andis. r0,r4,0xa450 /* weird error? */ | |
738 | + andis. r0,r4,0xa410 /* weird error? */ | |
739 | 739 | bne- handle_page_fault /* if not, try to insert a HPTE */ |
740 | + andis. r0,r4,DSISR_DABRMATCH@h | |
741 | + bne- handle_dabr_fault | |
742 | + | |
740 | 743 | BEGIN_FTR_SECTION |
741 | 744 | andis. r0,r4,0x0020 /* Is it a segment table fault? */ |
742 | 745 | bne- do_ste_alloc /* If so handle it */ |
... | ... | @@ -822,6 +825,14 @@ |
822 | 825 | TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) |
823 | 826 | bl .raw_local_irq_restore |
824 | 827 | b 11f |
828 | + | |
829 | +/* We have a data breakpoint exception - handle it */ | |
830 | +handle_dabr_fault: | |
831 | + ld r4,_DAR(r1) | |
832 | + ld r5,_DSISR(r1) | |
833 | + addi r3,r1,STACK_FRAME_OVERHEAD | |
834 | + bl .do_dabr | |
835 | + b .ret_from_except_lite | |
825 | 836 | |
826 | 837 | /* Here we have a page fault that hash_page can't handle. */ |
827 | 838 | handle_page_fault: |
arch/powerpc/kernel/head_44x.S
... | ... | @@ -37,6 +37,7 @@ |
37 | 37 | #include <asm/thread_info.h> |
38 | 38 | #include <asm/ppc_asm.h> |
39 | 39 | #include <asm/asm-offsets.h> |
40 | +#include <asm/synch.h> | |
40 | 41 | #include "head_booke.h" |
41 | 42 | |
42 | 43 | |
43 | 44 | |
... | ... | @@ -69,166 +70,8 @@ |
69 | 70 | mr r27,r7 |
70 | 71 | li r24,0 /* CPU number */ |
71 | 72 | |
72 | -/* | |
73 | - * In case the firmware didn't do it, we apply some workarounds | |
74 | - * that are good for all 440 core variants here | |
75 | - */ | |
76 | - mfspr r3,SPRN_CCR0 | |
77 | - rlwinm r3,r3,0,0,27 /* disable icache prefetch */ | |
78 | - isync | |
79 | - mtspr SPRN_CCR0,r3 | |
80 | - isync | |
81 | - sync | |
73 | + bl init_cpu_state | |
82 | 74 | |
83 | -/* | |
84 | - * Set up the initial MMU state | |
85 | - * | |
86 | - * We are still executing code at the virtual address | |
87 | - * mappings set by the firmware for the base of RAM. | |
88 | - * | |
89 | - * We first invalidate all TLB entries but the one | |
90 | - * we are running from. We then load the KERNELBASE | |
91 | - * mappings so we can begin to use kernel addresses | |
92 | - * natively and so the interrupt vector locations are | |
93 | - * permanently pinned (necessary since Book E | |
94 | - * implementations always have translation enabled). | |
95 | - * | |
96 | - * TODO: Use the known TLB entry we are running from to | |
97 | - * determine which physical region we are located | |
98 | - * in. This can be used to determine where in RAM | |
99 | - * (on a shared CPU system) or PCI memory space | |
100 | - * (on a DRAMless system) we are located. | |
101 | - * For now, we assume a perfect world which means | |
102 | - * we are located at the base of DRAM (physical 0). | |
103 | - */ | |
104 | - | |
105 | -/* | |
106 | - * Search TLB for entry that we are currently using. | |
107 | - * Invalidate all entries but the one we are using. | |
108 | - */ | |
109 | - /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ | |
110 | - mfspr r3,SPRN_PID /* Get PID */ | |
111 | - mfmsr r4 /* Get MSR */ | |
112 | - andi. r4,r4,MSR_IS@l /* TS=1? */ | |
113 | - beq wmmucr /* If not, leave STS=0 */ | |
114 | - oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */ | |
115 | -wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ | |
116 | - sync | |
117 | - | |
118 | - bl invstr /* Find our address */ | |
119 | -invstr: mflr r5 /* Make it accessible */ | |
120 | - tlbsx r23,0,r5 /* Find entry we are in */ | |
121 | - li r4,0 /* Start at TLB entry 0 */ | |
122 | - li r3,0 /* Set PAGEID inval value */ | |
123 | -1: cmpw r23,r4 /* Is this our entry? */ | |
124 | - beq skpinv /* If so, skip the inval */ | |
125 | - tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ | |
126 | -skpinv: addi r4,r4,1 /* Increment */ | |
127 | - cmpwi r4,64 /* Are we done? */ | |
128 | - bne 1b /* If not, repeat */ | |
129 | - isync /* If so, context change */ | |
130 | - | |
131 | -/* | |
132 | - * Configure and load pinned entry into TLB slot 63. | |
133 | - */ | |
134 | - | |
135 | - lis r3,PAGE_OFFSET@h | |
136 | - ori r3,r3,PAGE_OFFSET@l | |
137 | - | |
138 | - /* Kernel is at the base of RAM */ | |
139 | - li r4, 0 /* Load the kernel physical address */ | |
140 | - | |
141 | - /* Load the kernel PID = 0 */ | |
142 | - li r0,0 | |
143 | - mtspr SPRN_PID,r0 | |
144 | - sync | |
145 | - | |
146 | - /* Initialize MMUCR */ | |
147 | - li r5,0 | |
148 | - mtspr SPRN_MMUCR,r5 | |
149 | - sync | |
150 | - | |
151 | - /* pageid fields */ | |
152 | - clrrwi r3,r3,10 /* Mask off the effective page number */ | |
153 | - ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M | |
154 | - | |
155 | - /* xlat fields */ | |
156 | - clrrwi r4,r4,10 /* Mask off the real page number */ | |
157 | - /* ERPN is 0 for first 4GB page */ | |
158 | - | |
159 | - /* attrib fields */ | |
160 | - /* Added guarded bit to protect against speculative loads/stores */ | |
161 | - li r5,0 | |
162 | - ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) | |
163 | - | |
164 | - li r0,63 /* TLB slot 63 */ | |
165 | - | |
166 | - tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */ | |
167 | - tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */ | |
168 | - tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */ | |
169 | - | |
170 | - /* Force context change */ | |
171 | - mfmsr r0 | |
172 | - mtspr SPRN_SRR1, r0 | |
173 | - lis r0,3f@h | |
174 | - ori r0,r0,3f@l | |
175 | - mtspr SPRN_SRR0,r0 | |
176 | - sync | |
177 | - rfi | |
178 | - | |
179 | - /* If necessary, invalidate original entry we used */ | |
180 | -3: cmpwi r23,63 | |
181 | - beq 4f | |
182 | - li r6,0 | |
183 | - tlbwe r6,r23,PPC44x_TLB_PAGEID | |
184 | - isync | |
185 | - | |
186 | -4: | |
187 | -#ifdef CONFIG_PPC_EARLY_DEBUG_44x | |
188 | - /* Add UART mapping for early debug. */ | |
189 | - | |
190 | - /* pageid fields */ | |
191 | - lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h | |
192 | - ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K | |
193 | - | |
194 | - /* xlat fields */ | |
195 | - lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h | |
196 | - ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH | |
197 | - | |
198 | - /* attrib fields */ | |
199 | - li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G) | |
200 | - li r0,62 /* TLB slot 0 */ | |
201 | - | |
202 | - tlbwe r3,r0,PPC44x_TLB_PAGEID | |
203 | - tlbwe r4,r0,PPC44x_TLB_XLAT | |
204 | - tlbwe r5,r0,PPC44x_TLB_ATTRIB | |
205 | - | |
206 | - /* Force context change */ | |
207 | - isync | |
208 | -#endif /* CONFIG_PPC_EARLY_DEBUG_44x */ | |
209 | - | |
210 | - /* Establish the interrupt vector offsets */ | |
211 | - SET_IVOR(0, CriticalInput); | |
212 | - SET_IVOR(1, MachineCheck); | |
213 | - SET_IVOR(2, DataStorage); | |
214 | - SET_IVOR(3, InstructionStorage); | |
215 | - SET_IVOR(4, ExternalInput); | |
216 | - SET_IVOR(5, Alignment); | |
217 | - SET_IVOR(6, Program); | |
218 | - SET_IVOR(7, FloatingPointUnavailable); | |
219 | - SET_IVOR(8, SystemCall); | |
220 | - SET_IVOR(9, AuxillaryProcessorUnavailable); | |
221 | - SET_IVOR(10, Decrementer); | |
222 | - SET_IVOR(11, FixedIntervalTimer); | |
223 | - SET_IVOR(12, WatchdogTimer); | |
224 | - SET_IVOR(13, DataTLBError); | |
225 | - SET_IVOR(14, InstructionTLBError); | |
226 | - SET_IVOR(15, DebugCrit); | |
227 | - | |
228 | - /* Establish the interrupt vector base */ | |
229 | - lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ | |
230 | - mtspr SPRN_IVPR,r4 | |
231 | - | |
232 | 75 | /* |
233 | 76 | * This is where the main kernel code starts. |
234 | 77 | */ |
... | ... | @@ -349,7 +192,7 @@ |
349 | 192 | #endif |
350 | 193 | |
351 | 194 | /* Data TLB Error Interrupt */ |
352 | - START_EXCEPTION(DataTLBError) | |
195 | + START_EXCEPTION(DataTLBError44x) | |
353 | 196 | mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ |
354 | 197 | mtspr SPRN_SPRG_WSCRATCH1, r11 |
355 | 198 | mtspr SPRN_SPRG_WSCRATCH2, r12 |
... | ... | @@ -440,7 +283,7 @@ |
440 | 283 | mfspr r10,SPRN_DEAR |
441 | 284 | |
442 | 285 | /* Jump to common tlb load */ |
443 | - b finish_tlb_load | |
286 | + b finish_tlb_load_44x | |
444 | 287 | |
445 | 288 | 2: |
446 | 289 | /* The bailout. Restore registers to pre-exception conditions |
... | ... | @@ -460,7 +303,7 @@ |
460 | 303 | * information from different registers and bailout |
461 | 304 | * to a different point. |
462 | 305 | */ |
463 | - START_EXCEPTION(InstructionTLBError) | |
306 | + START_EXCEPTION(InstructionTLBError44x) | |
464 | 307 | mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ |
465 | 308 | mtspr SPRN_SPRG_WSCRATCH1, r11 |
466 | 309 | mtspr SPRN_SPRG_WSCRATCH2, r12 |
... | ... | @@ -536,7 +379,7 @@ |
536 | 379 | mfspr r10,SPRN_SRR0 |
537 | 380 | |
538 | 381 | /* Jump to common TLB load point */ |
539 | - b finish_tlb_load | |
382 | + b finish_tlb_load_44x | |
540 | 383 | |
541 | 384 | 2: |
542 | 385 | /* The bailout. Restore registers to pre-exception conditions |
543 | 386 | |
... | ... | @@ -550,15 +393,7 @@ |
550 | 393 | mfspr r10, SPRN_SPRG_RSCRATCH0 |
551 | 394 | b InstructionStorage |
552 | 395 | |
553 | - /* Debug Interrupt */ | |
554 | - DEBUG_CRIT_EXCEPTION | |
555 | - | |
556 | 396 | /* |
557 | - * Local functions | |
558 | - */ | |
559 | - | |
560 | -/* | |
561 | - | |
562 | 397 | * Both the instruction and data TLB miss get to this |
563 | 398 | * point to load the TLB. |
564 | 399 | * r10 - EA of fault |
... | ... | @@ -568,7 +403,7 @@ |
568 | 403 | * MMUCR - loaded with proper value when we get here |
569 | 404 | * Upon exit, we reload everything and RFI. |
570 | 405 | */ |
571 | -finish_tlb_load: | |
406 | +finish_tlb_load_44x: | |
572 | 407 | /* Combine RPN & ERPN an write WS 0 */ |
573 | 408 | rlwimi r11,r12,0,0,31-PAGE_SHIFT |
574 | 409 | tlbwe r11,r13,PPC44x_TLB_XLAT |
575 | 410 | |
... | ... | @@ -601,7 +436,228 @@ |
601 | 436 | mfspr r10, SPRN_SPRG_RSCRATCH0 |
602 | 437 | rfi /* Force context change */ |
603 | 438 | |
439 | +/* TLB error interrupts for 476 | |
440 | + */ | |
441 | +#ifdef CONFIG_PPC_47x | |
442 | + START_EXCEPTION(DataTLBError47x) | |
443 | + mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */ | |
444 | + mtspr SPRN_SPRG_WSCRATCH1,r11 | |
445 | + mtspr SPRN_SPRG_WSCRATCH2,r12 | |
446 | + mtspr SPRN_SPRG_WSCRATCH3,r13 | |
447 | + mfcr r11 | |
448 | + mtspr SPRN_SPRG_WSCRATCH4,r11 | |
449 | + mfspr r10,SPRN_DEAR /* Get faulting address */ | |
450 | + | |
451 | + /* If we are faulting a kernel address, we have to use the | |
452 | + * kernel page tables. | |
453 | + */ | |
454 | + lis r11,PAGE_OFFSET@h | |
455 | + cmplw cr0,r10,r11 | |
456 | + blt+ 3f | |
457 | + lis r11,swapper_pg_dir@h | |
458 | + ori r11,r11, swapper_pg_dir@l | |
459 | + li r12,0 /* MMUCR = 0 */ | |
460 | + b 4f | |
461 | + | |
462 | + /* Get the PGD for the current thread and setup MMUCR */ | |
463 | +3: mfspr r11,SPRN_SPRG3 | |
464 | + lwz r11,PGDIR(r11) | |
465 | + mfspr r12,SPRN_PID /* Get PID */ | |
466 | +4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */ | |
467 | + | |
468 | + /* Mask of required permission bits. Note that while we | |
469 | + * do copy ESR:ST to _PAGE_RW position as trying to write | |
470 | + * to an RO page is pretty common, we don't do it with | |
471 | + * _PAGE_DIRTY. We could do it, but it's a fairly rare | |
472 | + * event so I'd rather take the overhead when it happens | |
473 | + * rather than adding an instruction here. We should measure | |
474 | + * whether the whole thing is worth it in the first place | |
475 | + * as we could avoid loading SPRN_ESR completely in the first | |
476 | + * place... | |
477 | + * | |
478 | + * TODO: Is it worth doing that mfspr & rlwimi in the first | |
479 | + * place or can we save a couple of instructions here ? | |
480 | + */ | |
481 | + mfspr r12,SPRN_ESR | |
482 | + li r13,_PAGE_PRESENT|_PAGE_ACCESSED | |
483 | + rlwimi r13,r12,10,30,30 | |
484 | + | |
485 | + /* Load the PTE */ | |
486 | + /* Compute pgdir/pmd offset */ | |
487 | + rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29 | |
488 | + lwzx r11,r12,r11 /* Get pgd/pmd entry */ | |
489 | + | |
490 | + /* Word 0 is EPN,V,TS,DSIZ */ | |
491 | + li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE | |
492 | + rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/ | |
493 | + li r12,0 | |
494 | + tlbwe r10,r12,0 | |
495 | + | |
496 | + /* XXX can we do better ? Need to make sure tlbwe has established | |
497 | + * latch V bit in MMUCR0 before the PTE is loaded further down */ | |
498 | +#ifdef CONFIG_SMP | |
499 | + isync | |
500 | +#endif | |
501 | + | |
502 | + rlwinm. r12,r11,0,0,20 /* Extract pt base address */ | |
503 | + /* Compute pte address */ | |
504 | + rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28 | |
505 | + beq 2f /* Bail if no table */ | |
506 | + lwz r11,0(r12) /* Get high word of pte entry */ | |
507 | + | |
508 | + /* XXX can we do better ? maybe insert a known 0 bit from r11 into the | |
509 | + * bottom of r12 to create a data dependency... We can also use r10 | |
510 | + * as destination nowadays | |
511 | + */ | |
512 | +#ifdef CONFIG_SMP | |
513 | + lwsync | |
514 | +#endif | |
515 | + lwz r12,4(r12) /* Get low word of pte entry */ | |
516 | + | |
517 | + andc. r13,r13,r12 /* Check permission */ | |
518 | + | |
519 | + /* Jump to common tlb load */ | |
520 | + beq finish_tlb_load_47x | |
521 | + | |
522 | +2: /* The bailout. Restore registers to pre-exception conditions | |
523 | + * and call the heavyweights to help us out. | |
524 | + */ | |
525 | + mfspr r11,SPRN_SPRG_RSCRATCH4 | |
526 | + mtcr r11 | |
527 | + mfspr r13,SPRN_SPRG_RSCRATCH3 | |
528 | + mfspr r12,SPRN_SPRG_RSCRATCH2 | |
529 | + mfspr r11,SPRN_SPRG_RSCRATCH1 | |
530 | + mfspr r10,SPRN_SPRG_RSCRATCH0 | |
531 | + b DataStorage | |
532 | + | |
533 | + /* Instruction TLB Error Interrupt */ | |
534 | + /* | |
535 | + * Nearly the same as above, except we get our | |
536 | + * information from different registers and bailout | |
537 | + * to a different point. | |
538 | + */ | |
539 | + START_EXCEPTION(InstructionTLBError47x) | |
540 | + mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */ | |
541 | + mtspr SPRN_SPRG_WSCRATCH1,r11 | |
542 | + mtspr SPRN_SPRG_WSCRATCH2,r12 | |
543 | + mtspr SPRN_SPRG_WSCRATCH3,r13 | |
544 | + mfcr r11 | |
545 | + mtspr SPRN_SPRG_WSCRATCH4,r11 | |
546 | + mfspr r10,SPRN_SRR0 /* Get faulting address */ | |
547 | + | |
548 | + /* If we are faulting a kernel address, we have to use the | |
549 | + * kernel page tables. | |
550 | + */ | |
551 | + lis r11,PAGE_OFFSET@h | |
552 | + cmplw cr0,r10,r11 | |
553 | + blt+ 3f | |
554 | + lis r11,swapper_pg_dir@h | |
555 | + ori r11,r11, swapper_pg_dir@l | |
556 | + li r12,0 /* MMUCR = 0 */ | |
557 | + b 4f | |
558 | + | |
559 | + /* Get the PGD for the current thread and setup MMUCR */ | |
560 | +3: mfspr r11,SPRN_SPRG_THREAD | |
561 | + lwz r11,PGDIR(r11) | |
562 | + mfspr r12,SPRN_PID /* Get PID */ | |
563 | +4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */ | |
564 | + | |
565 | + /* Make up the required permissions */ | |
566 | + li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | |
567 | + | |
568 | + /* Load PTE */ | |
569 | + /* Compute pgdir/pmd offset */ | |
570 | + rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29 | |
571 | + lwzx r11,r12,r11 /* Get pgd/pmd entry */ | |
572 | + | |
573 | + /* Word 0 is EPN,V,TS,DSIZ */ | |
574 | + li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE | |
575 | + rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/ | |
576 | + li r12,0 | |
577 | + tlbwe r10,r12,0 | |
578 | + | |
579 | + /* XXX can we do better ? Need to make sure tlbwe has established | |
580 | + * latch V bit in MMUCR0 before the PTE is loaded further down */ | |
581 | +#ifdef CONFIG_SMP | |
582 | + isync | |
583 | +#endif | |
584 | + | |
585 | + rlwinm. r12,r11,0,0,20 /* Extract pt base address */ | |
586 | + /* Compute pte address */ | |
587 | + rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28 | |
588 | + beq 2f /* Bail if no table */ | |
589 | + | |
590 | + lwz r11,0(r12) /* Get high word of pte entry */ | |
591 | + /* XXX can we do better ? maybe insert a known 0 bit from r11 into the | |
592 | + * bottom of r12 to create a data dependency... We can also use r10 | |
593 | + * as destination nowadays | |
594 | + */ | |
595 | +#ifdef CONFIG_SMP | |
596 | + lwsync | |
597 | +#endif | |
598 | + lwz r12,4(r12) /* Get low word of pte entry */ | |
599 | + | |
600 | + andc. r13,r13,r12 /* Check permission */ | |
601 | + | |
602 | + /* Jump to common TLB load point */ | |
603 | + beq finish_tlb_load_47x | |
604 | + | |
605 | +2: /* The bailout. Restore registers to pre-exception conditions | |
606 | + * and call the heavyweights to help us out. | |
607 | + */ | |
608 | + mfspr r11, SPRN_SPRG_RSCRATCH4 | |
609 | + mtcr r11 | |
610 | + mfspr r13, SPRN_SPRG_RSCRATCH3 | |
611 | + mfspr r12, SPRN_SPRG_RSCRATCH2 | |
612 | + mfspr r11, SPRN_SPRG_RSCRATCH1 | |
613 | + mfspr r10, SPRN_SPRG_RSCRATCH0 | |
614 | + b InstructionStorage | |
615 | + | |
604 | 616 | /* |
617 | + * Both the instruction and data TLB miss get to this | |
618 | + * point to load the TLB. | |
619 | + * r10 - free to use | |
620 | + * r11 - PTE high word value | |
621 | + * r12 - PTE low word value | |
622 | + * r13 - free to use | |
623 | + * MMUCR - loaded with proper value when we get here | |
624 | + * Upon exit, we reload everything and RFI. | |
625 | + */ | |
626 | +finish_tlb_load_47x: | |
627 | + /* Combine RPN & ERPN an write WS 1 */ | |
628 | + rlwimi r11,r12,0,0,31-PAGE_SHIFT | |
629 | + tlbwe r11,r13,1 | |
630 | + | |
631 | + /* And make up word 2 */ | |
632 | + li r10,0xf85 /* Mask to apply from PTE */ | |
633 | + rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */ | |
634 | + and r11,r12,r10 /* Mask PTE bits to keep */ | |
635 | + andi. r10,r12,_PAGE_USER /* User page ? */ | |
636 | + beq 1f /* nope, leave U bits empty */ | |
637 | + rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ | |
638 | +1: tlbwe r11,r13,2 | |
639 | + | |
640 | + /* Done...restore registers and get out of here. | |
641 | + */ | |
642 | + mfspr r11, SPRN_SPRG_RSCRATCH4 | |
643 | + mtcr r11 | |
644 | + mfspr r13, SPRN_SPRG_RSCRATCH3 | |
645 | + mfspr r12, SPRN_SPRG_RSCRATCH2 | |
646 | + mfspr r11, SPRN_SPRG_RSCRATCH1 | |
647 | + mfspr r10, SPRN_SPRG_RSCRATCH0 | |
648 | + rfi | |
649 | + | |
650 | +#endif /* CONFIG_PPC_47x */ | |
651 | + | |
652 | + /* Debug Interrupt */ | |
653 | + /* | |
654 | + * This statement needs to exist at the end of the IVPR | |
655 | + * definition just in case you end up taking a debug | |
656 | + * exception within another exception. | |
657 | + */ | |
658 | + DEBUG_CRIT_EXCEPTION | |
659 | + | |
660 | +/* | |
605 | 661 | * Global functions |
606 | 662 | */ |
607 | 663 | |
... | ... | @@ -647,6 +703,428 @@ |
647 | 703 | blr |
648 | 704 | |
649 | 705 | /* |
706 | + * Init CPU state. This is called at boot time or for secondary CPUs | |
707 | + * to setup initial TLB entries, setup IVORs, etc... | |
708 | + * | |
709 | + */ | |
710 | +_GLOBAL(init_cpu_state) | |
711 | + mflr r22 | |
712 | +#ifdef CONFIG_PPC_47x | |
713 | + /* We use the PVR to differenciate 44x cores from 476 */ | |
714 | + mfspr r3,SPRN_PVR | |
715 | + srwi r3,r3,16 | |
716 | + cmplwi cr0,r3,PVR_476@h | |
717 | + beq head_start_47x | |
718 | + cmplwi cr0,r3,PVR_476_ISS@h | |
719 | + beq head_start_47x | |
720 | +#endif /* CONFIG_PPC_47x */ | |
721 | + | |
722 | +/* | |
723 | + * In case the firmware didn't do it, we apply some workarounds | |
724 | + * that are good for all 440 core variants here | |
725 | + */ | |
726 | + mfspr r3,SPRN_CCR0 | |
727 | + rlwinm r3,r3,0,0,27 /* disable icache prefetch */ | |
728 | + isync | |
729 | + mtspr SPRN_CCR0,r3 | |
730 | + isync | |
731 | + sync | |
732 | + | |
733 | +/* | |
734 | + * Set up the initial MMU state for 44x | |
735 | + * | |
736 | + * We are still executing code at the virtual address | |
737 | + * mappings set by the firmware for the base of RAM. | |
738 | + * | |
739 | + * We first invalidate all TLB entries but the one | |
740 | + * we are running from. We then load the KERNELBASE | |
741 | + * mappings so we can begin to use kernel addresses | |
742 | + * natively and so the interrupt vector locations are | |
743 | + * permanently pinned (necessary since Book E | |
744 | + * implementations always have translation enabled). | |
745 | + * | |
746 | + * TODO: Use the known TLB entry we are running from to | |
747 | + * determine which physical region we are located | |
748 | + * in. This can be used to determine where in RAM | |
749 | + * (on a shared CPU system) or PCI memory space | |
750 | + * (on a DRAMless system) we are located. | |
751 | + * For now, we assume a perfect world which means | |
752 | + * we are located at the base of DRAM (physical 0). | |
753 | + */ | |
754 | + | |
755 | +/* | |
756 | + * Search TLB for entry that we are currently using. | |
757 | + * Invalidate all entries but the one we are using. | |
758 | + */ | |
759 | + /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ | |
760 | + mfspr r3,SPRN_PID /* Get PID */ | |
761 | + mfmsr r4 /* Get MSR */ | |
762 | + andi. r4,r4,MSR_IS@l /* TS=1? */ | |
763 | + beq wmmucr /* If not, leave STS=0 */ | |
764 | + oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */ | |
765 | +wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ | |
766 | + sync | |
767 | + | |
768 | + bl invstr /* Find our address */ | |
769 | +invstr: mflr r5 /* Make it accessible */ | |
770 | + tlbsx r23,0,r5 /* Find entry we are in */ | |
771 | + li r4,0 /* Start at TLB entry 0 */ | |
772 | + li r3,0 /* Set PAGEID inval value */ | |
773 | +1: cmpw r23,r4 /* Is this our entry? */ | |
774 | + beq skpinv /* If so, skip the inval */ | |
775 | + tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ | |
776 | +skpinv: addi r4,r4,1 /* Increment */ | |
777 | + cmpwi r4,64 /* Are we done? */ | |
778 | + bne 1b /* If not, repeat */ | |
779 | + isync /* If so, context change */ | |
780 | + | |
781 | +/* | |
782 | + * Configure and load pinned entry into TLB slot 63. | |
783 | + */ | |
784 | + | |
785 | + lis r3,PAGE_OFFSET@h | |
786 | + ori r3,r3,PAGE_OFFSET@l | |
787 | + | |
788 | + /* Kernel is at the base of RAM */ | |
789 | + li r4, 0 /* Load the kernel physical address */ | |
790 | + | |
791 | + /* Load the kernel PID = 0 */ | |
792 | + li r0,0 | |
793 | + mtspr SPRN_PID,r0 | |
794 | + sync | |
795 | + | |
796 | + /* Initialize MMUCR */ | |
797 | + li r5,0 | |
798 | + mtspr SPRN_MMUCR,r5 | |
799 | + sync | |
800 | + | |
801 | + /* pageid fields */ | |
802 | + clrrwi r3,r3,10 /* Mask off the effective page number */ | |
803 | + ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M | |
804 | + | |
805 | + /* xlat fields */ | |
806 | + clrrwi r4,r4,10 /* Mask off the real page number */ | |
807 | + /* ERPN is 0 for first 4GB page */ | |
808 | + | |
809 | + /* attrib fields */ | |
810 | + /* Added guarded bit to protect against speculative loads/stores */ | |
811 | + li r5,0 | |
812 | + ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) | |
813 | + | |
814 | + li r0,63 /* TLB slot 63 */ | |
815 | + | |
816 | + tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */ | |
817 | + tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */ | |
818 | + tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */ | |
819 | + | |
820 | + /* Force context change */ | |
821 | + mfmsr r0 | |
822 | + mtspr SPRN_SRR1, r0 | |
823 | + lis r0,3f@h | |
824 | + ori r0,r0,3f@l | |
825 | + mtspr SPRN_SRR0,r0 | |
826 | + sync | |
827 | + rfi | |
828 | + | |
829 | + /* If necessary, invalidate original entry we used */ | |
830 | +3: cmpwi r23,63 | |
831 | + beq 4f | |
832 | + li r6,0 | |
833 | + tlbwe r6,r23,PPC44x_TLB_PAGEID | |
834 | + isync | |
835 | + | |
836 | +4: | |
837 | +#ifdef CONFIG_PPC_EARLY_DEBUG_44x | |
838 | + /* Add UART mapping for early debug. */ | |
839 | + | |
840 | + /* pageid fields */ | |
841 | + lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h | |
842 | + ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K | |
843 | + | |
844 | + /* xlat fields */ | |
845 | + lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h | |
846 | + ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH | |
847 | + | |
848 | + /* attrib fields */ | |
849 | + li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G) | |
850 | + li r0,62 /* TLB slot 0 */ | |
851 | + | |
852 | + tlbwe r3,r0,PPC44x_TLB_PAGEID | |
853 | + tlbwe r4,r0,PPC44x_TLB_XLAT | |
854 | + tlbwe r5,r0,PPC44x_TLB_ATTRIB | |
855 | + | |
856 | + /* Force context change */ | |
857 | + isync | |
858 | +#endif /* CONFIG_PPC_EARLY_DEBUG_44x */ | |
859 | + | |
860 | + /* Establish the interrupt vector offsets */ | |
861 | + SET_IVOR(0, CriticalInput); | |
862 | + SET_IVOR(1, MachineCheck); | |
863 | + SET_IVOR(2, DataStorage); | |
864 | + SET_IVOR(3, InstructionStorage); | |
865 | + SET_IVOR(4, ExternalInput); | |
866 | + SET_IVOR(5, Alignment); | |
867 | + SET_IVOR(6, Program); | |
868 | + SET_IVOR(7, FloatingPointUnavailable); | |
869 | + SET_IVOR(8, SystemCall); | |
870 | + SET_IVOR(9, AuxillaryProcessorUnavailable); | |
871 | + SET_IVOR(10, Decrementer); | |
872 | + SET_IVOR(11, FixedIntervalTimer); | |
873 | + SET_IVOR(12, WatchdogTimer); | |
874 | + SET_IVOR(13, DataTLBError44x); | |
875 | + SET_IVOR(14, InstructionTLBError44x); | |
876 | + SET_IVOR(15, DebugCrit); | |
877 | + | |
878 | + b head_start_common | |
879 | + | |
880 | + | |
881 | +#ifdef CONFIG_PPC_47x | |
882 | + | |
883 | +#ifdef CONFIG_SMP | |
884 | + | |
885 | +/* Entry point for secondary 47x processors */ | |
886 | +_GLOBAL(start_secondary_47x) | |
887 | + mr r24,r3 /* CPU number */ | |
888 | + | |
889 | + bl init_cpu_state | |
890 | + | |
891 | + /* Now we need to bolt the rest of kernel memory which | |
892 | + * is done in C code. We must be careful because our task | |
893 | + * struct or our stack can (and will probably) be out | |
894 | + * of reach of the initial 256M TLB entry, so we use a | |
895 | + * small temporary stack in .bss for that. This works | |
896 | + * because only one CPU at a time can be in this code | |
897 | + */ | |
898 | + lis r1,temp_boot_stack@h | |
899 | + ori r1,r1,temp_boot_stack@l | |
900 | + addi r1,r1,1024-STACK_FRAME_OVERHEAD | |
901 | + li r0,0 | |
902 | + stw r0,0(r1) | |
903 | + bl mmu_init_secondary | |
904 | + | |
905 | + /* Now we can get our task struct and real stack pointer */ | |
906 | + | |
907 | + /* Get current_thread_info and current */ | |
908 | + lis r1,secondary_ti@ha | |
909 | + lwz r1,secondary_ti@l(r1) | |
910 | + lwz r2,TI_TASK(r1) | |
911 | + | |
912 | + /* Current stack pointer */ | |
913 | + addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | |
914 | + li r0,0 | |
915 | + stw r0,0(r1) | |
916 | + | |
917 | + /* Kernel stack for exception entry in SPRG3 */ | |
918 | + addi r4,r2,THREAD /* init task's THREAD */ | |
919 | + mtspr SPRN_SPRG3,r4 | |
920 | + | |
921 | + b start_secondary | |
922 | + | |
923 | +#endif /* CONFIG_SMP */ | |
924 | + | |
925 | +/* | |
926 | + * Set up the initial MMU state for 44x | |
927 | + * | |
928 | + * We are still executing code at the virtual address | |
929 | + * mappings set by the firmware for the base of RAM. | |
930 | + */ | |
931 | + | |
932 | +head_start_47x: | |
933 | + /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ | |
934 | + mfspr r3,SPRN_PID /* Get PID */ | |
935 | + mfmsr r4 /* Get MSR */ | |
936 | + andi. r4,r4,MSR_IS@l /* TS=1? */ | |
937 | + beq 1f /* If not, leave STS=0 */ | |
938 | + oris r3,r3,PPC47x_MMUCR_STS@h /* Set STS=1 */ | |
939 | +1: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ | |
940 | + sync | |
941 | + | |
942 | + /* Find the entry we are running from */ | |
943 | + bl 1f | |
944 | +1: mflr r23 | |
945 | + tlbsx r23,0,r23 | |
946 | + tlbre r24,r23,0 | |
947 | + tlbre r25,r23,1 | |
948 | + tlbre r26,r23,2 | |
949 | + | |
950 | +/* | |
951 | + * Cleanup time | |
952 | + */ | |
953 | + | |
954 | + /* Initialize MMUCR */ | |
955 | + li r5,0 | |
956 | + mtspr SPRN_MMUCR,r5 | |
957 | + sync | |
958 | + | |
959 | +clear_all_utlb_entries: | |
960 | + | |
961 | + #; Set initial values. | |
962 | + | |
963 | + addis r3,0,0x8000 | |
964 | + addi r4,0,0 | |
965 | + addi r5,0,0 | |
966 | + b clear_utlb_entry | |
967 | + | |
968 | + #; Align the loop to speed things up. | |
969 | + | |
970 | + .align 6 | |
971 | + | |
972 | +clear_utlb_entry: | |
973 | + | |
974 | + tlbwe r4,r3,0 | |
975 | + tlbwe r5,r3,1 | |
976 | + tlbwe r5,r3,2 | |
977 | + addis r3,r3,0x2000 | |
978 | + cmpwi r3,0 | |
979 | + bne clear_utlb_entry | |
980 | + addis r3,0,0x8000 | |
981 | + addis r4,r4,0x100 | |
982 | + cmpwi r4,0 | |
983 | + bne clear_utlb_entry | |
984 | + | |
985 | + #; Restore original entry. | |
986 | + | |
987 | + oris r23,r23,0x8000 /* specify the way */ | |
988 | + tlbwe r24,r23,0 | |
989 | + tlbwe r25,r23,1 | |
990 | + tlbwe r26,r23,2 | |
991 | + | |
992 | +/* | |
993 | + * Configure and load pinned entry into TLB for the kernel core | |
994 | + */ | |
995 | + | |
996 | + lis r3,PAGE_OFFSET@h | |
997 | + ori r3,r3,PAGE_OFFSET@l | |
998 | + | |
999 | + /* Kernel is at the base of RAM */ | |
1000 | + li r4, 0 /* Load the kernel physical address */ | |
1001 | + | |
1002 | + /* Load the kernel PID = 0 */ | |
1003 | + li r0,0 | |
1004 | + mtspr SPRN_PID,r0 | |
1005 | + sync | |
1006 | + | |
1007 | + /* Word 0 */ | |
1008 | + clrrwi r3,r3,12 /* Mask off the effective page number */ | |
1009 | + ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M | |
1010 | + | |
1011 | + /* Word 1 */ | |
1012 | + clrrwi r4,r4,12 /* Mask off the real page number */ | |
1013 | + /* ERPN is 0 for first 4GB page */ | |
1014 | + /* Word 2 */ | |
1015 | + li r5,0 | |
1016 | + ori r5,r5,PPC47x_TLB2_S_RWX | |
1017 | +#ifdef CONFIG_SMP | |
1018 | + ori r5,r5,PPC47x_TLB2_M | |
1019 | +#endif | |
1020 | + | |
1021 | + /* We write to way 0 and bolted 0 */ | |
1022 | + lis r0,0x8800 | |
1023 | + tlbwe r3,r0,0 | |
1024 | + tlbwe r4,r0,1 | |
1025 | + tlbwe r5,r0,2 | |
1026 | + | |
1027 | +/* | |
1028 | + * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix | |
1029 | + * them up later | |
1030 | + */ | |
1031 | + LOAD_REG_IMMEDIATE(r3, 0x9abcdef0) | |
1032 | + mtspr SPRN_SSPCR,r3 | |
1033 | + mtspr SPRN_USPCR,r3 | |
1034 | + LOAD_REG_IMMEDIATE(r3, 0x12345670) | |
1035 | + mtspr SPRN_ISPCR,r3 | |
1036 | + | |
1037 | + /* Force context change */ | |
1038 | + mfmsr r0 | |
1039 | + mtspr SPRN_SRR1, r0 | |
1040 | + lis r0,3f@h | |
1041 | + ori r0,r0,3f@l | |
1042 | + mtspr SPRN_SRR0,r0 | |
1043 | + sync | |
1044 | + rfi | |
1045 | + | |
1046 | + /* Invalidate original entry we used */ | |
1047 | +3: | |
1048 | + rlwinm r24,r24,0,21,19 /* clear the "valid" bit */ | |
1049 | + tlbwe r24,r23,0 | |
1050 | + addi r24,0,0 | |
1051 | + tlbwe r24,r23,1 | |
1052 | + tlbwe r24,r23,2 | |
1053 | + isync /* Clear out the shadow TLB entries */ | |
1054 | + | |
1055 | +#ifdef CONFIG_PPC_EARLY_DEBUG_44x | |
1056 | + /* Add UART mapping for early debug. */ | |
1057 | + | |
1058 | + /* Word 0 */ | |
1059 | + lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h | |
1060 | + ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M | |
1061 | + | |
1062 | + /* Word 1 */ | |
1063 | + lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h | |
1064 | + ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH | |
1065 | + | |
1066 | + /* Word 2 */ | |
1067 | + li r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG) | |
1068 | + | |
1069 | + /* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same | |
1070 | + * congruence class as the kernel, we need to make sure of it at | |
1071 | + * some point | |
1072 | + */ | |
1073 | + lis r0,0x8d00 | |
1074 | + tlbwe r3,r0,0 | |
1075 | + tlbwe r4,r0,1 | |
1076 | + tlbwe r5,r0,2 | |
1077 | + | |
1078 | + /* Force context change */ | |
1079 | + isync | |
1080 | +#endif /* CONFIG_PPC_EARLY_DEBUG_44x */ | |
1081 | + | |
1082 | + /* Establish the interrupt vector offsets */ | |
1083 | + SET_IVOR(0, CriticalInput); | |
1084 | + SET_IVOR(1, MachineCheckA); | |
1085 | + SET_IVOR(2, DataStorage); | |
1086 | + SET_IVOR(3, InstructionStorage); | |
1087 | + SET_IVOR(4, ExternalInput); | |
1088 | + SET_IVOR(5, Alignment); | |
1089 | + SET_IVOR(6, Program); | |
1090 | + SET_IVOR(7, FloatingPointUnavailable); | |
1091 | + SET_IVOR(8, SystemCall); | |
1092 | + SET_IVOR(9, AuxillaryProcessorUnavailable); | |
1093 | + SET_IVOR(10, Decrementer); | |
1094 | + SET_IVOR(11, FixedIntervalTimer); | |
1095 | + SET_IVOR(12, WatchdogTimer); | |
1096 | + SET_IVOR(13, DataTLBError47x); | |
1097 | + SET_IVOR(14, InstructionTLBError47x); | |
1098 | + SET_IVOR(15, DebugCrit); | |
1099 | + | |
1100 | + /* We configure icbi to invalidate 128 bytes at a time since the | |
1101 | + * current 32-bit kernel code isn't too happy with icache != dcache | |
1102 | + * block size | |
1103 | + */ | |
1104 | + mfspr r3,SPRN_CCR0 | |
1105 | + oris r3,r3,0x0020 | |
1106 | + mtspr SPRN_CCR0,r3 | |
1107 | + isync | |
1108 | + | |
1109 | +#endif /* CONFIG_PPC_47x */ | |
1110 | + | |
1111 | +/* | |
1112 | + * Here we are back to code that is common between 44x and 47x | |
1113 | + * | |
1114 | + * We proceed to further kernel initialization and return to the | |
1115 | + * main kernel entry | |
1116 | + */ | |
1117 | +head_start_common: | |
1118 | + /* Establish the interrupt vector base */ | |
1119 | + lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ | |
1120 | + mtspr SPRN_IVPR,r4 | |
1121 | + | |
1122 | + addis r22,r22,KERNELBASE@h | |
1123 | + mtlr r22 | |
1124 | + isync | |
1125 | + blr | |
1126 | + | |
1127 | +/* | |
650 | 1128 | * We put a few things here that have to be page-aligned. This stuff |
651 | 1129 | * goes at the beginning of the data segment, which is page-aligned. |
652 | 1130 | */ |
... | ... | @@ -671,4 +1149,10 @@ |
671 | 1149 | */ |
672 | 1150 | abatron_pteptrs: |
673 | 1151 | .space 8 |
1152 | + | |
1153 | +#ifdef CONFIG_SMP | |
1154 | + .align 12 | |
1155 | +temp_boot_stack: | |
1156 | + .space 1024 | |
1157 | +#endif /* CONFIG_SMP */ |
arch/powerpc/kernel/head_8xx.S
... | ... | @@ -71,9 +71,6 @@ |
71 | 71 | * in the first level table, but that would require many changes to the |
72 | 72 | * Linux page directory/table functions that I don't want to do right now. |
73 | 73 | * |
74 | - * I used to use SPRG2 for a temporary register in the TLB handler, but it | |
75 | - * has since been put to other uses. I now use a hack to save a register | |
76 | - * and the CCR at memory location 0.....Someday I'll fix this..... | |
77 | 74 | * -- Dan |
78 | 75 | */ |
79 | 76 | .globl __start |
80 | 77 | |
... | ... | @@ -302,8 +299,13 @@ |
302 | 299 | DO_8xx_CPU6(0x3f80, r3) |
303 | 300 | mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ |
304 | 301 | mfcr r10 |
302 | +#ifdef CONFIG_8xx_CPU6 | |
305 | 303 | stw r10, 0(r0) |
306 | 304 | stw r11, 4(r0) |
305 | +#else | |
306 | + mtspr SPRN_DAR, r10 | |
307 | + mtspr SPRN_SPRG2, r11 | |
308 | +#endif | |
307 | 309 | mfspr r10, SPRN_SRR0 /* Get effective address of fault */ |
308 | 310 | #ifdef CONFIG_8xx_CPU15 |
309 | 311 | addi r11, r10, 0x1000 |
310 | 312 | |
... | ... | @@ -318,12 +320,16 @@ |
318 | 320 | /* If we are faulting a kernel address, we have to use the |
319 | 321 | * kernel page tables. |
320 | 322 | */ |
323 | +#ifdef CONFIG_MODULES | |
324 | + /* Only modules will cause ITLB Misses as we always | |
325 | + * pin the first 8MB of kernel memory */ | |
321 | 326 | andi. r11, r10, 0x0800 /* Address >= 0x80000000 */ |
322 | 327 | beq 3f |
323 | 328 | lis r11, swapper_pg_dir@h |
324 | 329 | ori r11, r11, swapper_pg_dir@l |
325 | 330 | rlwimi r10, r11, 0, 2, 19 |
326 | 331 | 3: |
332 | +#endif | |
327 | 333 | lwz r11, 0(r10) /* Get the level 1 entry */ |
328 | 334 | rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ |
329 | 335 | beq 2f /* If zero, don't try to find a pte */ |
330 | 336 | |
331 | 337 | |
332 | 338 | |
333 | 339 | |
334 | 340 | |
335 | 341 | |
... | ... | @@ -339,31 +345,35 @@ |
339 | 345 | mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ |
340 | 346 | lwz r10, 0(r11) /* Get the pte */ |
341 | 347 | |
348 | +#ifdef CONFIG_SWAP | |
342 | 349 | andi. r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT |
343 | 350 | cmpwi cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT |
344 | 351 | bne- cr0, 2f |
345 | - | |
346 | - /* Clear PP lsb, 0x400 */ | |
347 | - rlwinm r10, r10, 0, 22, 20 | |
348 | - | |
352 | +#endif | |
349 | 353 | /* The Linux PTE won't go exactly into the MMU TLB. |
350 | - * Software indicator bits 22 and 28 must be clear. | |
354 | + * Software indicator bits 21 and 28 must be clear. | |
351 | 355 | * Software indicator bits 24, 25, 26, and 27 must be |
352 | 356 | * set. All other Linux PTE bits control the behavior |
353 | 357 | * of the MMU. |
354 | 358 | */ |
355 | 359 | li r11, 0x00f0 |
356 | - rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ | |
360 | + rlwimi r10, r11, 0, 0x07f8 /* Set 24-27, clear 21-23,28 */ | |
357 | 361 | DO_8xx_CPU6(0x2d80, r3) |
358 | 362 | mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ |
359 | 363 | |
360 | - mfspr r10, SPRN_M_TW /* Restore registers */ | |
364 | + /* Restore registers */ | |
365 | +#ifndef CONFIG_8xx_CPU6 | |
366 | + mfspr r10, SPRN_DAR | |
367 | + mtcr r10 | |
368 | + mtspr SPRN_DAR, r11 /* Tag DAR */ | |
369 | + mfspr r11, SPRN_SPRG2 | |
370 | +#else | |
361 | 371 | lwz r11, 0(r0) |
362 | 372 | mtcr r11 |
363 | 373 | lwz r11, 4(r0) |
364 | -#ifdef CONFIG_8xx_CPU6 | |
365 | 374 | lwz r3, 8(r0) |
366 | 375 | #endif |
376 | + mfspr r10, SPRN_M_TW | |
367 | 377 | rfi |
368 | 378 | 2: |
369 | 379 | mfspr r11, SPRN_SRR1 |
370 | 380 | |
371 | 381 | |
... | ... | @@ -373,13 +383,20 @@ |
373 | 383 | rlwinm r11, r11, 0, 0xffff |
374 | 384 | mtspr SPRN_SRR1, r11 |
375 | 385 | |
376 | - mfspr r10, SPRN_M_TW /* Restore registers */ | |
386 | + /* Restore registers */ | |
387 | +#ifndef CONFIG_8xx_CPU6 | |
388 | + mfspr r10, SPRN_DAR | |
389 | + mtcr r10 | |
390 | + li r11, 0x00f0 | |
391 | + mtspr SPRN_DAR, r11 /* Tag DAR */ | |
392 | + mfspr r11, SPRN_SPRG2 | |
393 | +#else | |
377 | 394 | lwz r11, 0(r0) |
378 | 395 | mtcr r11 |
379 | 396 | lwz r11, 4(r0) |
380 | -#ifdef CONFIG_8xx_CPU6 | |
381 | 397 | lwz r3, 8(r0) |
382 | 398 | #endif |
399 | + mfspr r10, SPRN_M_TW | |
383 | 400 | b InstructionAccess |
384 | 401 | |
385 | 402 | . = 0x1200 |
386 | 403 | |
... | ... | @@ -390,8 +407,13 @@ |
390 | 407 | DO_8xx_CPU6(0x3f80, r3) |
391 | 408 | mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ |
392 | 409 | mfcr r10 |
410 | +#ifdef CONFIG_8xx_CPU6 | |
393 | 411 | stw r10, 0(r0) |
394 | 412 | stw r11, 4(r0) |
413 | +#else | |
414 | + mtspr SPRN_DAR, r10 | |
415 | + mtspr SPRN_SPRG2, r11 | |
416 | +#endif | |
395 | 417 | mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ |
396 | 418 | |
397 | 419 | /* If we are faulting a kernel address, we have to use the |
398 | 420 | |
399 | 421 | |
... | ... | @@ -438,15 +460,14 @@ |
438 | 460 | * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5)); |
439 | 461 | * r10 = (r10 & ~PRESENT) | r11; |
440 | 462 | */ |
463 | +#ifdef CONFIG_SWAP | |
441 | 464 | rlwinm r11, r10, 32-5, _PAGE_PRESENT |
442 | 465 | and r11, r11, r10 |
443 | 466 | rlwimi r10, r11, 0, _PAGE_PRESENT |
444 | - | |
467 | +#endif | |
445 | 468 | /* Honour kernel RO, User NA */ |
446 | 469 | /* 0x200 == Extended encoding, bit 22 */ |
447 | - /* r11 = (r10 & _PAGE_USER) >> 2 */ | |
448 | - rlwinm r11, r10, 32-2, 0x200 | |
449 | - or r10, r11, r10 | |
470 | + rlwimi r10, r10, 32-2, 0x200 /* Copy USER to bit 22, 0x200 */ | |
450 | 471 | /* r11 = (r10 & _PAGE_RW) >> 1 */ |
451 | 472 | rlwinm r11, r10, 32-1, 0x200 |
452 | 473 | or r10, r11, r10 |
453 | 474 | |
454 | 475 | |
455 | 476 | |
... | ... | @@ -460,18 +481,24 @@ |
460 | 481 | * of the MMU. |
461 | 482 | */ |
462 | 483 | 2: li r11, 0x00f0 |
463 | - mtspr SPRN_DAR,r11 /* Tag DAR */ | |
464 | 484 | rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ |
465 | 485 | DO_8xx_CPU6(0x3d80, r3) |
466 | 486 | mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ |
467 | 487 | |
468 | - mfspr r10, SPRN_M_TW /* Restore registers */ | |
488 | + /* Restore registers */ | |
489 | +#ifndef CONFIG_8xx_CPU6 | |
490 | + mfspr r10, SPRN_DAR | |
491 | + mtcr r10 | |
492 | + mtspr SPRN_DAR, r11 /* Tag DAR */ | |
493 | + mfspr r11, SPRN_SPRG2 | |
494 | +#else | |
495 | + mtspr SPRN_DAR, r11 /* Tag DAR */ | |
469 | 496 | lwz r11, 0(r0) |
470 | 497 | mtcr r11 |
471 | 498 | lwz r11, 4(r0) |
472 | -#ifdef CONFIG_8xx_CPU6 | |
473 | 499 | lwz r3, 8(r0) |
474 | 500 | #endif |
501 | + mfspr r10, SPRN_M_TW | |
475 | 502 | rfi |
476 | 503 | |
477 | 504 | /* This is an instruction TLB error on the MPC8xx. This could be due |
... | ... | @@ -683,9 +710,6 @@ |
683 | 710 | tophys(r4,r2) |
684 | 711 | addi r4,r4,THREAD /* init task's THREAD */ |
685 | 712 | mtspr SPRN_SPRG_THREAD,r4 |
686 | - li r3,0 | |
687 | - /* XXX What is that for ? SPRG2 appears otherwise unused on 8xx */ | |
688 | - mtspr SPRN_SPRG2,r3 /* 0 => r1 has kernel sp */ | |
689 | 713 | |
690 | 714 | /* stack */ |
691 | 715 | lis r1,init_thread_union@ha |
arch/powerpc/kernel/head_booke.h
1 | 1 | #ifndef __HEAD_BOOKE_H__ |
2 | 2 | #define __HEAD_BOOKE_H__ |
3 | 3 | |
4 | +#include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */ | |
4 | 5 | /* |
5 | 6 | * Macros used for common Book-e exception handling |
6 | 7 | */ |
... | ... | @@ -48,6 +49,9 @@ |
48 | 49 | stw r10,0(r11); \ |
49 | 50 | rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ |
50 | 51 | stw r0,GPR0(r11); \ |
52 | + lis r10, STACK_FRAME_REGS_MARKER@ha;/* exception frame marker */ \ | |
53 | + addi r10, r10, STACK_FRAME_REGS_MARKER@l; \ | |
54 | + stw r10, 8(r11); \ | |
51 | 55 | SAVE_4GPRS(3, r11); \ |
52 | 56 | SAVE_2GPRS(7, r11) |
53 | 57 |
arch/powerpc/kernel/head_fsl_booke.S
... | ... | @@ -639,6 +639,13 @@ |
639 | 639 | rlwinm r12,r12,0,16,1 |
640 | 640 | mtspr SPRN_MAS1,r12 |
641 | 641 | |
642 | + /* Make up the required permissions for kernel code */ | |
643 | +#ifdef CONFIG_PTE_64BIT | |
644 | + li r13,_PAGE_PRESENT | _PAGE_BAP_SX | |
645 | + oris r13,r13,_PAGE_ACCESSED@h | |
646 | +#else | |
647 | + li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | |
648 | +#endif | |
642 | 649 | b 4f |
643 | 650 | |
644 | 651 | /* Get the PGD for the current thread */ |
645 | 652 | |
646 | 653 | |
... | ... | @@ -646,15 +653,15 @@ |
646 | 653 | mfspr r11,SPRN_SPRG_THREAD |
647 | 654 | lwz r11,PGDIR(r11) |
648 | 655 | |
649 | -4: | |
650 | - /* Make up the required permissions */ | |
656 | + /* Make up the required permissions for user code */ | |
651 | 657 | #ifdef CONFIG_PTE_64BIT |
652 | - li r13,_PAGE_PRESENT | _PAGE_EXEC | |
658 | + li r13,_PAGE_PRESENT | _PAGE_BAP_UX | |
653 | 659 | oris r13,r13,_PAGE_ACCESSED@h |
654 | 660 | #else |
655 | 661 | li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC |
656 | 662 | #endif |
657 | 663 | |
664 | +4: | |
658 | 665 | FIND_PTE |
659 | 666 | andc. r13,r13,r11 /* Check permission */ |
660 | 667 |
arch/powerpc/kernel/iommu.c
... | ... | @@ -43,20 +43,9 @@ |
43 | 43 | #define DBG(...) |
44 | 44 | |
45 | 45 | static int novmerge; |
46 | -static int protect4gb = 1; | |
47 | 46 | |
48 | 47 | static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); |
49 | 48 | |
50 | -static int __init setup_protect4gb(char *str) | |
51 | -{ | |
52 | - if (strcmp(str, "on") == 0) | |
53 | - protect4gb = 1; | |
54 | - else if (strcmp(str, "off") == 0) | |
55 | - protect4gb = 0; | |
56 | - | |
57 | - return 1; | |
58 | -} | |
59 | - | |
60 | 49 | static int __init setup_iommu(char *str) |
61 | 50 | { |
62 | 51 | if (!strcmp(str, "novmerge")) |
... | ... | @@ -66,7 +55,6 @@ |
66 | 55 | return 1; |
67 | 56 | } |
68 | 57 | |
69 | -__setup("protect4gb=", setup_protect4gb); | |
70 | 58 | __setup("iommu=", setup_iommu); |
71 | 59 | |
72 | 60 | static unsigned long iommu_range_alloc(struct device *dev, |
arch/powerpc/kernel/irq.c
... | ... | @@ -284,29 +284,32 @@ |
284 | 284 | } |
285 | 285 | |
286 | 286 | #ifdef CONFIG_HOTPLUG_CPU |
287 | -void fixup_irqs(cpumask_t map) | |
287 | +void fixup_irqs(const struct cpumask *map) | |
288 | 288 | { |
289 | 289 | struct irq_desc *desc; |
290 | 290 | unsigned int irq; |
291 | 291 | static int warned; |
292 | + cpumask_var_t mask; | |
292 | 293 | |
293 | - for_each_irq(irq) { | |
294 | - cpumask_t mask; | |
294 | + alloc_cpumask_var(&mask, GFP_KERNEL); | |
295 | 295 | |
296 | + for_each_irq(irq) { | |
296 | 297 | desc = irq_to_desc(irq); |
297 | 298 | if (desc && desc->status & IRQ_PER_CPU) |
298 | 299 | continue; |
299 | 300 | |
300 | - cpumask_and(&mask, desc->affinity, &map); | |
301 | - if (any_online_cpu(mask) == NR_CPUS) { | |
301 | + cpumask_and(mask, desc->affinity, map); | |
302 | + if (cpumask_any(mask) >= nr_cpu_ids) { | |
302 | 303 | printk("Breaking affinity for irq %i\n", irq); |
303 | - mask = map; | |
304 | + cpumask_copy(mask, map); | |
304 | 305 | } |
305 | 306 | if (desc->chip->set_affinity) |
306 | - desc->chip->set_affinity(irq, &mask); | |
307 | + desc->chip->set_affinity(irq, mask); | |
307 | 308 | else if (desc->action && !(warned++)) |
308 | 309 | printk("Cannot set affinity for irq %i\n", irq); |
309 | 310 | } |
311 | + | |
312 | + free_cpumask_var(mask); | |
310 | 313 | |
311 | 314 | local_irq_enable(); |
312 | 315 | mdelay(1); |
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/lparcfg.c
... | ... | @@ -38,7 +38,7 @@ |
38 | 38 | #include <asm/vio.h> |
39 | 39 | #include <asm/mmu.h> |
40 | 40 | |
41 | -#define MODULE_VERS "1.8" | |
41 | +#define MODULE_VERS "1.9" | |
42 | 42 | #define MODULE_NAME "lparcfg" |
43 | 43 | |
44 | 44 | /* #define LPARCFG_DEBUG */ |
... | ... | @@ -487,6 +487,14 @@ |
487 | 487 | seq_printf(m, "dispatch_dispersions=%lu\n", dispatch_dispersions); |
488 | 488 | } |
489 | 489 | |
490 | +static void parse_em_data(struct seq_file *m) | |
491 | +{ | |
492 | + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | |
493 | + | |
494 | + if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS) | |
495 | + seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]); | |
496 | +} | |
497 | + | |
490 | 498 | static int pseries_lparcfg_data(struct seq_file *m, void *v) |
491 | 499 | { |
492 | 500 | int partition_potential_processors; |
... | ... | @@ -540,6 +548,8 @@ |
540 | 548 | seq_printf(m, "shared_processor_mode=%d\n", lppaca[0].shared_proc); |
541 | 549 | |
542 | 550 | seq_printf(m, "slb_size=%d\n", mmu_slb_size); |
551 | + | |
552 | + parse_em_data(m); | |
543 | 553 | |
544 | 554 | return 0; |
545 | 555 | } |
arch/powerpc/kernel/machine_kexec_64.c
... | ... | @@ -155,33 +155,38 @@ |
155 | 155 | |
156 | 156 | #ifdef CONFIG_SMP |
157 | 157 | |
158 | -/* FIXME: we should schedule this function to be called on all cpus based | |
159 | - * on calling the interrupts, but we would like to call it off irq level | |
160 | - * so that the interrupt controller is clean. | |
161 | - */ | |
158 | +static int kexec_all_irq_disabled = 0; | |
159 | + | |
162 | 160 | static void kexec_smp_down(void *arg) |
163 | 161 | { |
162 | + local_irq_disable(); | |
163 | + mb(); /* make sure our irqs are disabled before we say they are */ | |
164 | + get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF; | |
165 | + while(kexec_all_irq_disabled == 0) | |
166 | + cpu_relax(); | |
167 | + mb(); /* make sure all irqs are disabled before this */ | |
168 | + /* | |
169 | + * Now every CPU has IRQs off, we can clear out any pending | |
170 | + * IPIs and be sure that no more will come in after this. | |
171 | + */ | |
164 | 172 | if (ppc_md.kexec_cpu_down) |
165 | 173 | ppc_md.kexec_cpu_down(0, 1); |
166 | 174 | |
167 | - local_irq_disable(); | |
168 | 175 | kexec_smp_wait(); |
169 | 176 | /* NOTREACHED */ |
170 | 177 | } |
171 | 178 | |
172 | -static void kexec_prepare_cpus(void) | |
179 | +static void kexec_prepare_cpus_wait(int wait_state) | |
173 | 180 | { |
174 | 181 | int my_cpu, i, notified=-1; |
175 | 182 | |
176 | - smp_call_function(kexec_smp_down, NULL, /* wait */0); | |
177 | 183 | my_cpu = get_cpu(); |
178 | - | |
179 | - /* check the others cpus are now down (via paca hw cpu id == -1) */ | |
184 | + /* Make sure each CPU has atleast made it to the state we need */ | |
180 | 185 | for (i=0; i < NR_CPUS; i++) { |
181 | 186 | if (i == my_cpu) |
182 | 187 | continue; |
183 | 188 | |
184 | - while (paca[i].hw_cpu_id != -1) { | |
189 | + while (paca[i].kexec_state < wait_state) { | |
185 | 190 | barrier(); |
186 | 191 | if (!cpu_possible(i)) { |
187 | 192 | printk("kexec: cpu %d hw_cpu_id %d is not" |
188 | 193 | |
189 | 194 | |
190 | 195 | |
191 | 196 | |
... | ... | @@ -201,20 +206,35 @@ |
201 | 206 | } |
202 | 207 | if (i != notified) { |
203 | 208 | printk( "kexec: waiting for cpu %d (physical" |
204 | - " %d) to go down\n", | |
205 | - i, paca[i].hw_cpu_id); | |
209 | + " %d) to enter %i state\n", | |
210 | + i, paca[i].hw_cpu_id, wait_state); | |
206 | 211 | notified = i; |
207 | 212 | } |
208 | 213 | } |
209 | 214 | } |
215 | + mb(); | |
216 | +} | |
210 | 217 | |
218 | +static void kexec_prepare_cpus(void) | |
219 | +{ | |
220 | + | |
221 | + smp_call_function(kexec_smp_down, NULL, /* wait */0); | |
222 | + local_irq_disable(); | |
223 | + mb(); /* make sure IRQs are disabled before we say they are */ | |
224 | + get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF; | |
225 | + | |
226 | + kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF); | |
227 | + /* we are sure every CPU has IRQs off at this point */ | |
228 | + kexec_all_irq_disabled = 1; | |
229 | + | |
211 | 230 | /* after we tell the others to go down */ |
212 | 231 | if (ppc_md.kexec_cpu_down) |
213 | 232 | ppc_md.kexec_cpu_down(0, 0); |
214 | 233 | |
215 | - put_cpu(); | |
234 | + /* Before removing MMU mapings make sure all CPUs have entered real mode */ | |
235 | + kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE); | |
216 | 236 | |
217 | - local_irq_disable(); | |
237 | + put_cpu(); | |
218 | 238 | } |
219 | 239 | |
220 | 240 | #else /* ! SMP */ |
arch/powerpc/kernel/misc_32.S
... | ... | @@ -441,7 +441,7 @@ |
441 | 441 | addi r3,r3,L1_CACHE_BYTES |
442 | 442 | bdnz 0b |
443 | 443 | sync |
444 | -#ifndef CONFIG_44x | |
444 | +#ifdef CONFIG_44x | |
445 | 445 | /* We don't flush the icache on 44x. Those have a virtual icache |
446 | 446 | * and we don't have access to the virtual address here (it's |
447 | 447 | * not the page vaddr but where it's mapped in user space). The |
448 | 448 | |
449 | 449 | |
... | ... | @@ -449,15 +449,19 @@ |
449 | 449 | * a change in the address space occurs, before returning to |
450 | 450 | * user space |
451 | 451 | */ |
452 | +BEGIN_MMU_FTR_SECTION | |
453 | + blr | |
454 | +END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x) | |
455 | +#endif /* CONFIG_44x */ | |
452 | 456 | mtctr r4 |
453 | 457 | 1: icbi 0,r6 |
454 | 458 | addi r6,r6,L1_CACHE_BYTES |
455 | 459 | bdnz 1b |
456 | 460 | sync |
457 | 461 | isync |
458 | -#endif /* CONFIG_44x */ | |
459 | 462 | blr |
460 | 463 | |
464 | +#ifndef CONFIG_BOOKE | |
461 | 465 | /* |
462 | 466 | * Flush a particular page from the data cache to RAM, identified |
463 | 467 | * by its physical address. We turn off the MMU so we can just use |
... | ... | @@ -490,6 +494,7 @@ |
490 | 494 | mtmsr r10 /* restore DR */ |
491 | 495 | isync |
492 | 496 | blr |
497 | +#endif /* CONFIG_BOOKE */ | |
493 | 498 | |
494 | 499 | /* |
495 | 500 | * Clear pages using the dcbz instruction, which doesn't cause any |
arch/powerpc/kernel/misc_64.S
... | ... | @@ -24,6 +24,7 @@ |
24 | 24 | #include <asm/asm-offsets.h> |
25 | 25 | #include <asm/cputable.h> |
26 | 26 | #include <asm/thread_info.h> |
27 | +#include <asm/kexec.h> | |
27 | 28 | |
28 | 29 | .text |
29 | 30 | |
... | ... | @@ -471,6 +472,10 @@ |
471 | 472 | 1: mflr r5 |
472 | 473 | addi r5,r5,kexec_flag-1b |
473 | 474 | |
475 | + li r4,KEXEC_STATE_REAL_MODE | |
476 | + stb r4,PACAKEXECSTATE(r13) | |
477 | + SYNC | |
478 | + | |
474 | 479 | 99: HMT_LOW |
475 | 480 | #ifdef CONFIG_KEXEC /* use no memory without kexec */ |
476 | 481 | lwz r4,0(r5) |
477 | 482 | |
... | ... | @@ -494,14 +499,11 @@ |
494 | 499 | * note: this is a terminal routine, it does not save lr |
495 | 500 | * |
496 | 501 | * get phys id from paca |
497 | - * set paca id to -1 to say we got here | |
498 | 502 | * switch to real mode |
499 | 503 | * join other cpus in kexec_wait(phys_id) |
500 | 504 | */ |
501 | 505 | _GLOBAL(kexec_smp_wait) |
502 | 506 | lhz r3,PACAHWCPUID(r13) |
503 | - li r4,-1 | |
504 | - sth r4,PACAHWCPUID(r13) /* let others know we left */ | |
505 | 507 | bl real_mode |
506 | 508 | b .kexec_wait |
507 | 509 |
arch/powerpc/kernel/paca.c
... | ... | @@ -18,6 +18,7 @@ |
18 | 18 | #include <asm/pgtable.h> |
19 | 19 | #include <asm/iseries/lpar_map.h> |
20 | 20 | #include <asm/iseries/hv_types.h> |
21 | +#include <asm/kexec.h> | |
21 | 22 | |
22 | 23 | /* This symbol is provided by the linker - let it fill in the paca |
23 | 24 | * field correctly */ |
... | ... | @@ -97,6 +98,7 @@ |
97 | 98 | new_paca->kernelbase = (unsigned long) _stext; |
98 | 99 | new_paca->kernel_msr = MSR_KERNEL; |
99 | 100 | new_paca->hw_cpu_id = 0xffff; |
101 | + new_paca->kexec_state = KEXEC_STATE_NONE; | |
100 | 102 | new_paca->__current = &init_task; |
101 | 103 | #ifdef CONFIG_PPC_STD_MMU_64 |
102 | 104 | new_paca->slb_shadow_ptr = &slb_shadow[cpu]; |
arch/powerpc/kernel/pci_of_scan.c
... | ... | @@ -310,6 +310,8 @@ |
310 | 310 | /* Scan direct children */ |
311 | 311 | for_each_child_of_node(node, child) { |
312 | 312 | pr_debug(" * %s\n", child->full_name); |
313 | + if (!of_device_is_available(child)) | |
314 | + continue; | |
313 | 315 | reg = of_get_property(child, "reg", ®len); |
314 | 316 | if (reg == NULL || reglen < 20) |
315 | 317 | continue; |
arch/powerpc/kernel/process.c
arch/powerpc/kernel/ptrace.c
... | ... | @@ -39,6 +39,109 @@ |
39 | 39 | #include <asm/system.h> |
40 | 40 | |
41 | 41 | /* |
42 | + * The parameter save area on the stack is used to store arguments being passed | |
43 | + * to callee function and is located at fixed offset from stack pointer. | |
44 | + */ | |
45 | +#ifdef CONFIG_PPC32 | |
46 | +#define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */ | |
47 | +#else /* CONFIG_PPC32 */ | |
48 | +#define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */ | |
49 | +#endif | |
50 | + | |
51 | +struct pt_regs_offset { | |
52 | + const char *name; | |
53 | + int offset; | |
54 | +}; | |
55 | + | |
56 | +#define STR(s) #s /* convert to string */ | |
57 | +#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} | |
58 | +#define GPR_OFFSET_NAME(num) \ | |
59 | + {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])} | |
60 | +#define REG_OFFSET_END {.name = NULL, .offset = 0} | |
61 | + | |
62 | +static const struct pt_regs_offset regoffset_table[] = { | |
63 | + GPR_OFFSET_NAME(0), | |
64 | + GPR_OFFSET_NAME(1), | |
65 | + GPR_OFFSET_NAME(2), | |
66 | + GPR_OFFSET_NAME(3), | |
67 | + GPR_OFFSET_NAME(4), | |
68 | + GPR_OFFSET_NAME(5), | |
69 | + GPR_OFFSET_NAME(6), | |
70 | + GPR_OFFSET_NAME(7), | |
71 | + GPR_OFFSET_NAME(8), | |
72 | + GPR_OFFSET_NAME(9), | |
73 | + GPR_OFFSET_NAME(10), | |
74 | + GPR_OFFSET_NAME(11), | |
75 | + GPR_OFFSET_NAME(12), | |
76 | + GPR_OFFSET_NAME(13), | |
77 | + GPR_OFFSET_NAME(14), | |
78 | + GPR_OFFSET_NAME(15), | |
79 | + GPR_OFFSET_NAME(16), | |
80 | + GPR_OFFSET_NAME(17), | |
81 | + GPR_OFFSET_NAME(18), | |
82 | + GPR_OFFSET_NAME(19), | |
83 | + GPR_OFFSET_NAME(20), | |
84 | + GPR_OFFSET_NAME(21), | |
85 | + GPR_OFFSET_NAME(22), | |
86 | + GPR_OFFSET_NAME(23), | |
87 | + GPR_OFFSET_NAME(24), | |
88 | + GPR_OFFSET_NAME(25), | |
89 | + GPR_OFFSET_NAME(26), | |
90 | + GPR_OFFSET_NAME(27), | |
91 | + GPR_OFFSET_NAME(28), | |
92 | + GPR_OFFSET_NAME(29), | |
93 | + GPR_OFFSET_NAME(30), | |
94 | + GPR_OFFSET_NAME(31), | |
95 | + REG_OFFSET_NAME(nip), | |
96 | + REG_OFFSET_NAME(msr), | |
97 | + REG_OFFSET_NAME(ctr), | |
98 | + REG_OFFSET_NAME(link), | |
99 | + REG_OFFSET_NAME(xer), | |
100 | + REG_OFFSET_NAME(ccr), | |
101 | +#ifdef CONFIG_PPC64 | |
102 | + REG_OFFSET_NAME(softe), | |
103 | +#else | |
104 | + REG_OFFSET_NAME(mq), | |
105 | +#endif | |
106 | + REG_OFFSET_NAME(trap), | |
107 | + REG_OFFSET_NAME(dar), | |
108 | + REG_OFFSET_NAME(dsisr), | |
109 | + REG_OFFSET_END, | |
110 | +}; | |
111 | + | |
112 | +/** | |
113 | + * regs_query_register_offset() - query register offset from its name | |
114 | + * @name: the name of a register | |
115 | + * | |
116 | + * regs_query_register_offset() returns the offset of a register in struct | |
117 | + * pt_regs from its name. If the name is invalid, this returns -EINVAL; | |
118 | + */ | |
119 | +int regs_query_register_offset(const char *name) | |
120 | +{ | |
121 | + const struct pt_regs_offset *roff; | |
122 | + for (roff = regoffset_table; roff->name != NULL; roff++) | |
123 | + if (!strcmp(roff->name, name)) | |
124 | + return roff->offset; | |
125 | + return -EINVAL; | |
126 | +} | |
127 | + | |
128 | +/** | |
129 | + * regs_query_register_name() - query register name from its offset | |
130 | + * @offset: the offset of a register in struct pt_regs. | |
131 | + * | |
132 | + * regs_query_register_name() returns the name of a register from its | |
133 | + * offset in struct pt_regs. If the @offset is invalid, this returns NULL; | |
134 | + */ | |
135 | +const char *regs_query_register_name(unsigned int offset) | |
136 | +{ | |
137 | + const struct pt_regs_offset *roff; | |
138 | + for (roff = regoffset_table; roff->name != NULL; roff++) | |
139 | + if (roff->offset == offset) | |
140 | + return roff->name; | |
141 | + return NULL; | |
142 | +} | |
143 | + | |
144 | +/* | |
42 | 145 | * does not yet catch signals sent when the child dies. |
43 | 146 | * in exit.c or in signal.c. |
44 | 147 | */ |
arch/powerpc/kernel/rtas.c
... | ... | @@ -691,12 +691,16 @@ |
691 | 691 | { |
692 | 692 | int status; |
693 | 693 | |
694 | - if (panic_timeout) | |
694 | + /* | |
695 | + * Firmware with the ibm,extended-os-term property is guaranteed | |
696 | + * to always return from an ibm,os-term call. Earlier versions without | |
697 | + * this property may terminate the partition which we want to avoid | |
698 | + * since it interferes with panic_timeout. | |
699 | + */ | |
700 | + if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") || | |
701 | + RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term")) | |
695 | 702 | return; |
696 | 703 | |
697 | - if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term")) | |
698 | - return; | |
699 | - | |
700 | 704 | snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str); |
701 | 705 | |
702 | 706 | do { |
... | ... | @@ -705,8 +709,7 @@ |
705 | 709 | } while (rtas_busy_delay(status)); |
706 | 710 | |
707 | 711 | if (status != 0) |
708 | - printk(KERN_EMERG "ibm,os-term call failed %d\n", | |
709 | - status); | |
712 | + printk(KERN_EMERG "ibm,os-term call failed %d\n", status); | |
710 | 713 | } |
711 | 714 | |
712 | 715 | static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE; |
arch/powerpc/kernel/rtasd.c
... | ... | @@ -411,9 +411,9 @@ |
411 | 411 | |
412 | 412 | get_online_cpus(); |
413 | 413 | |
414 | - cpu = next_cpu(smp_processor_id(), cpu_online_map); | |
415 | - if (cpu == NR_CPUS) { | |
416 | - cpu = first_cpu(cpu_online_map); | |
414 | + cpu = cpumask_next(smp_processor_id(), cpu_online_mask); | |
415 | + if (cpu >= nr_cpu_ids) { | |
416 | + cpu = cpumask_first(cpu_online_mask); | |
417 | 417 | |
418 | 418 | if (first_pass) { |
419 | 419 | first_pass = 0; |
... | ... | @@ -466,8 +466,8 @@ |
466 | 466 | /* Retreive errors from nvram if any */ |
467 | 467 | retreive_nvram_error_log(); |
468 | 468 | |
469 | - schedule_delayed_work_on(first_cpu(cpu_online_map), &event_scan_work, | |
470 | - event_scan_delay); | |
469 | + schedule_delayed_work_on(cpumask_first(cpu_online_mask), | |
470 | + &event_scan_work, event_scan_delay); | |
471 | 471 | } |
472 | 472 | |
473 | 473 | static int __init rtas_init(void) |
... | ... | @@ -488,6 +488,12 @@ |
488 | 488 | if (rtas_event_scan_rate == RTAS_UNKNOWN_SERVICE) { |
489 | 489 | printk(KERN_ERR "rtasd: no rtas-event-scan-rate on system\n"); |
490 | 490 | return -ENODEV; |
491 | + } | |
492 | + | |
493 | + if (!rtas_event_scan_rate) { | |
494 | + /* Broken firmware: take a rate of zero to mean don't scan */ | |
495 | + printk(KERN_DEBUG "rtasd: scan rate is 0, not scanning\n"); | |
496 | + return 0; | |
491 | 497 | } |
492 | 498 | |
493 | 499 | /* Make room for the sequence number */ |
arch/powerpc/kernel/setup-common.c
... | ... | @@ -161,45 +161,44 @@ |
161 | 161 | DEFINE_PER_CPU(unsigned int, cpu_pvr); |
162 | 162 | #endif |
163 | 163 | |
164 | -static int show_cpuinfo(struct seq_file *m, void *v) | |
164 | +static void show_cpuinfo_summary(struct seq_file *m) | |
165 | 165 | { |
166 | - unsigned long cpu_id = (unsigned long)v - 1; | |
167 | - unsigned int pvr; | |
168 | - unsigned short maj; | |
169 | - unsigned short min; | |
170 | - | |
171 | - if (cpu_id == NR_CPUS) { | |
172 | - struct device_node *root; | |
173 | - const char *model = NULL; | |
166 | + struct device_node *root; | |
167 | + const char *model = NULL; | |
174 | 168 | #if defined(CONFIG_SMP) && defined(CONFIG_PPC32) |
175 | - unsigned long bogosum = 0; | |
176 | - int i; | |
177 | - for_each_online_cpu(i) | |
178 | - bogosum += loops_per_jiffy; | |
179 | - seq_printf(m, "total bogomips\t: %lu.%02lu\n", | |
180 | - bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); | |
169 | + unsigned long bogosum = 0; | |
170 | + int i; | |
171 | + for_each_online_cpu(i) | |
172 | + bogosum += loops_per_jiffy; | |
173 | + seq_printf(m, "total bogomips\t: %lu.%02lu\n", | |
174 | + bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); | |
181 | 175 | #endif /* CONFIG_SMP && CONFIG_PPC32 */ |
182 | - seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq); | |
183 | - if (ppc_md.name) | |
184 | - seq_printf(m, "platform\t: %s\n", ppc_md.name); | |
185 | - root = of_find_node_by_path("/"); | |
186 | - if (root) | |
187 | - model = of_get_property(root, "model", NULL); | |
188 | - if (model) | |
189 | - seq_printf(m, "model\t\t: %s\n", model); | |
190 | - of_node_put(root); | |
176 | + seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq); | |
177 | + if (ppc_md.name) | |
178 | + seq_printf(m, "platform\t: %s\n", ppc_md.name); | |
179 | + root = of_find_node_by_path("/"); | |
180 | + if (root) | |
181 | + model = of_get_property(root, "model", NULL); | |
182 | + if (model) | |
183 | + seq_printf(m, "model\t\t: %s\n", model); | |
184 | + of_node_put(root); | |
191 | 185 | |
192 | - if (ppc_md.show_cpuinfo != NULL) | |
193 | - ppc_md.show_cpuinfo(m); | |
186 | + if (ppc_md.show_cpuinfo != NULL) | |
187 | + ppc_md.show_cpuinfo(m); | |
194 | 188 | |
195 | 189 | #ifdef CONFIG_PPC32 |
196 | - /* Display the amount of memory */ | |
197 | - seq_printf(m, "Memory\t\t: %d MB\n", | |
198 | - (unsigned int)(total_memory / (1024 * 1024))); | |
190 | + /* Display the amount of memory */ | |
191 | + seq_printf(m, "Memory\t\t: %d MB\n", | |
192 | + (unsigned int)(total_memory / (1024 * 1024))); | |
199 | 193 | #endif |
194 | +} | |
200 | 195 | |
201 | - return 0; | |
202 | - } | |
196 | +static int show_cpuinfo(struct seq_file *m, void *v) | |
197 | +{ | |
198 | + unsigned long cpu_id = (unsigned long)v - 1; | |
199 | + unsigned int pvr; | |
200 | + unsigned short maj; | |
201 | + unsigned short min; | |
203 | 202 | |
204 | 203 | /* We only show online cpus: disable preempt (overzealous, I |
205 | 204 | * knew) to prevent cpu going down. */ |
206 | 205 | |
207 | 206 | |
... | ... | @@ -308,19 +307,28 @@ |
308 | 307 | #endif |
309 | 308 | |
310 | 309 | preempt_enable(); |
310 | + | |
311 | + /* If this is the last cpu, print the summary */ | |
312 | + if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) | |
313 | + show_cpuinfo_summary(m); | |
314 | + | |
311 | 315 | return 0; |
312 | 316 | } |
313 | 317 | |
314 | 318 | static void *c_start(struct seq_file *m, loff_t *pos) |
315 | 319 | { |
316 | - unsigned long i = *pos; | |
317 | - | |
318 | - return i <= NR_CPUS ? (void *)(i + 1) : NULL; | |
320 | + if (*pos == 0) /* just in case, cpu 0 is not the first */ | |
321 | + *pos = cpumask_first(cpu_online_mask); | |
322 | + else | |
323 | + *pos = cpumask_next(*pos - 1, cpu_online_mask); | |
324 | + if ((*pos) < nr_cpu_ids) | |
325 | + return (void *)(unsigned long)(*pos + 1); | |
326 | + return NULL; | |
319 | 327 | } |
320 | 328 | |
321 | 329 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) |
322 | 330 | { |
323 | - ++*pos; | |
331 | + (*pos)++; | |
324 | 332 | return c_start(m, pos); |
325 | 333 | } |
326 | 334 | |
327 | 335 | |
... | ... | @@ -386,14 +394,14 @@ |
386 | 394 | |
387 | 395 | /** |
388 | 396 | * setup_cpu_maps - initialize the following cpu maps: |
389 | - * cpu_possible_map | |
390 | - * cpu_present_map | |
397 | + * cpu_possible_mask | |
398 | + * cpu_present_mask | |
391 | 399 | * |
392 | 400 | * Having the possible map set up early allows us to restrict allocations |
393 | 401 | * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. |
394 | 402 | * |
395 | 403 | * We do not initialize the online map here; cpus set their own bits in |
396 | - * cpu_online_map as they come up. | |
404 | + * cpu_online_mask as they come up. | |
397 | 405 | * |
398 | 406 | * This function is valid only for Open Firmware systems. finish_device_tree |
399 | 407 | * must be called before using this. |
arch/powerpc/kernel/setup_64.c
... | ... | @@ -424,9 +424,18 @@ |
424 | 424 | DBG(" <- setup_system()\n"); |
425 | 425 | } |
426 | 426 | |
427 | +static u64 slb0_limit(void) | |
428 | +{ | |
429 | + if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) { | |
430 | + return 1UL << SID_SHIFT_1T; | |
431 | + } | |
432 | + return 1UL << SID_SHIFT; | |
433 | +} | |
434 | + | |
427 | 435 | #ifdef CONFIG_IRQSTACKS |
428 | 436 | static void __init irqstack_early_init(void) |
429 | 437 | { |
438 | + u64 limit = slb0_limit(); | |
430 | 439 | unsigned int i; |
431 | 440 | |
432 | 441 | /* |
433 | 442 | |
... | ... | @@ -436,10 +445,10 @@ |
436 | 445 | for_each_possible_cpu(i) { |
437 | 446 | softirq_ctx[i] = (struct thread_info *) |
438 | 447 | __va(lmb_alloc_base(THREAD_SIZE, |
439 | - THREAD_SIZE, 0x10000000)); | |
448 | + THREAD_SIZE, limit)); | |
440 | 449 | hardirq_ctx[i] = (struct thread_info *) |
441 | 450 | __va(lmb_alloc_base(THREAD_SIZE, |
442 | - THREAD_SIZE, 0x10000000)); | |
451 | + THREAD_SIZE, limit)); | |
443 | 452 | } |
444 | 453 | } |
445 | 454 | #else |
... | ... | @@ -470,7 +479,7 @@ |
470 | 479 | */ |
471 | 480 | static void __init emergency_stack_init(void) |
472 | 481 | { |
473 | - unsigned long limit; | |
482 | + u64 limit; | |
474 | 483 | unsigned int i; |
475 | 484 | |
476 | 485 | /* |
... | ... | @@ -482,7 +491,7 @@ |
482 | 491 | * bringup, we need to get at them in real mode. This means they |
483 | 492 | * must also be within the RMO region. |
484 | 493 | */ |
485 | - limit = min(0x10000000ULL, lmb.rmo_size); | |
494 | + limit = min(slb0_limit(), lmb.rmo_size); | |
486 | 495 | |
487 | 496 | for_each_possible_cpu(i) { |
488 | 497 | unsigned long sp; |
... | ... | @@ -571,12 +580,6 @@ |
571 | 580 | { |
572 | 581 | ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg); |
573 | 582 | printk("[boot]%04x %s\n", src, msg); |
574 | -} | |
575 | - | |
576 | -void cpu_die(void) | |
577 | -{ | |
578 | - if (ppc_md.cpu_die) | |
579 | - ppc_md.cpu_die(); | |
580 | 583 | } |
581 | 584 | |
582 | 585 | #ifdef CONFIG_SMP |
arch/powerpc/kernel/smp.c
... | ... | @@ -59,8 +59,8 @@ |
59 | 59 | |
60 | 60 | struct thread_info *secondary_ti; |
61 | 61 | |
62 | -DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; | |
63 | -DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; | |
62 | +DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); | |
63 | +DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); | |
64 | 64 | |
65 | 65 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
66 | 66 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
... | ... | @@ -271,6 +271,16 @@ |
271 | 271 | smp_store_cpu_info(boot_cpuid); |
272 | 272 | cpu_callin_map[boot_cpuid] = 1; |
273 | 273 | |
274 | + for_each_possible_cpu(cpu) { | |
275 | + zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), | |
276 | + GFP_KERNEL, cpu_to_node(cpu)); | |
277 | + zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), | |
278 | + GFP_KERNEL, cpu_to_node(cpu)); | |
279 | + } | |
280 | + | |
281 | + cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); | |
282 | + cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); | |
283 | + | |
274 | 284 | if (smp_ops) |
275 | 285 | if (smp_ops->probe) |
276 | 286 | max_cpus = smp_ops->probe(); |
... | ... | @@ -289,10 +299,6 @@ |
289 | 299 | void __devinit smp_prepare_boot_cpu(void) |
290 | 300 | { |
291 | 301 | BUG_ON(smp_processor_id() != boot_cpuid); |
292 | - | |
293 | - set_cpu_online(boot_cpuid, true); | |
294 | - cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid)); | |
295 | - cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid)); | |
296 | 302 | #ifdef CONFIG_PPC64 |
297 | 303 | paca[boot_cpuid].__current = current; |
298 | 304 | #endif |
... | ... | @@ -313,7 +319,7 @@ |
313 | 319 | set_cpu_online(cpu, false); |
314 | 320 | #ifdef CONFIG_PPC64 |
315 | 321 | vdso_data->processorCount--; |
316 | - fixup_irqs(cpu_online_map); | |
322 | + fixup_irqs(cpu_online_mask); | |
317 | 323 | #endif |
318 | 324 | return 0; |
319 | 325 | } |
... | ... | @@ -333,7 +339,7 @@ |
333 | 339 | cpu_relax(); |
334 | 340 | |
335 | 341 | #ifdef CONFIG_PPC64 |
336 | - fixup_irqs(cpu_online_map); | |
342 | + fixup_irqs(cpu_online_mask); | |
337 | 343 | /* counter the irq disable in fixup_irqs */ |
338 | 344 | local_irq_enable(); |
339 | 345 | #endif |
... | ... | @@ -462,7 +468,7 @@ |
462 | 468 | return id; |
463 | 469 | } |
464 | 470 | |
465 | -/* Must be called when no change can occur to cpu_present_map, | |
471 | +/* Must be called when no change can occur to cpu_present_mask, | |
466 | 472 | * i.e. during cpu online or offline. |
467 | 473 | */ |
468 | 474 | static struct device_node *cpu_to_l2cache(int cpu) |
... | ... | @@ -495,6 +501,14 @@ |
495 | 501 | current->active_mm = &init_mm; |
496 | 502 | |
497 | 503 | smp_store_cpu_info(cpu); |
504 | + | |
505 | +#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) | |
506 | + /* Clear any pending timer interrupts */ | |
507 | + mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); | |
508 | + | |
509 | + /* Enable decrementer interrupt */ | |
510 | + mtspr(SPRN_TCR, TCR_DIE); | |
511 | +#endif | |
498 | 512 | set_dec(tb_ticks_per_jiffy); |
499 | 513 | preempt_disable(); |
500 | 514 | cpu_callin_map[cpu] = 1; |
501 | 515 | |
... | ... | @@ -517,15 +531,15 @@ |
517 | 531 | for (i = 0; i < threads_per_core; i++) { |
518 | 532 | if (cpu_is_offline(base + i)) |
519 | 533 | continue; |
520 | - cpu_set(cpu, per_cpu(cpu_sibling_map, base + i)); | |
521 | - cpu_set(base + i, per_cpu(cpu_sibling_map, cpu)); | |
534 | + cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); | |
535 | + cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); | |
522 | 536 | |
523 | 537 | /* cpu_core_map should be a superset of |
524 | 538 | * cpu_sibling_map even if we don't have cache |
525 | 539 | * information, so update the former here, too. |
526 | 540 | */ |
527 | - cpu_set(cpu, per_cpu(cpu_core_map, base +i)); | |
528 | - cpu_set(base + i, per_cpu(cpu_core_map, cpu)); | |
541 | + cpumask_set_cpu(cpu, cpu_core_mask(base + i)); | |
542 | + cpumask_set_cpu(base + i, cpu_core_mask(cpu)); | |
529 | 543 | } |
530 | 544 | l2_cache = cpu_to_l2cache(cpu); |
531 | 545 | for_each_online_cpu(i) { |
... | ... | @@ -533,8 +547,8 @@ |
533 | 547 | if (!np) |
534 | 548 | continue; |
535 | 549 | if (np == l2_cache) { |
536 | - cpu_set(cpu, per_cpu(cpu_core_map, i)); | |
537 | - cpu_set(i, per_cpu(cpu_core_map, cpu)); | |
550 | + cpumask_set_cpu(cpu, cpu_core_mask(i)); | |
551 | + cpumask_set_cpu(i, cpu_core_mask(cpu)); | |
538 | 552 | } |
539 | 553 | of_node_put(np); |
540 | 554 | } |
541 | 555 | |
542 | 556 | |
543 | 557 | |
... | ... | @@ -554,20 +568,23 @@ |
554 | 568 | |
555 | 569 | void __init smp_cpus_done(unsigned int max_cpus) |
556 | 570 | { |
557 | - cpumask_t old_mask; | |
571 | + cpumask_var_t old_mask; | |
558 | 572 | |
559 | 573 | /* We want the setup_cpu() here to be called from CPU 0, but our |
560 | 574 | * init thread may have been "borrowed" by another CPU in the meantime |
561 | 575 | * se we pin us down to CPU 0 for a short while |
562 | 576 | */ |
563 | - old_mask = current->cpus_allowed; | |
564 | - set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid)); | |
577 | + alloc_cpumask_var(&old_mask, GFP_NOWAIT); | |
578 | + cpumask_copy(old_mask, ¤t->cpus_allowed); | |
579 | + set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); | |
565 | 580 | |
566 | 581 | if (smp_ops && smp_ops->setup_cpu) |
567 | 582 | smp_ops->setup_cpu(boot_cpuid); |
568 | 583 | |
569 | - set_cpus_allowed(current, old_mask); | |
584 | + set_cpus_allowed_ptr(current, old_mask); | |
570 | 585 | |
586 | + free_cpumask_var(old_mask); | |
587 | + | |
571 | 588 | snapshot_timebases(); |
572 | 589 | |
573 | 590 | dump_numa_cpu_topology(); |
... | ... | @@ -591,10 +608,10 @@ |
591 | 608 | /* Update sibling maps */ |
592 | 609 | base = cpu_first_thread_in_core(cpu); |
593 | 610 | for (i = 0; i < threads_per_core; i++) { |
594 | - cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i)); | |
595 | - cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu)); | |
596 | - cpu_clear(cpu, per_cpu(cpu_core_map, base +i)); | |
597 | - cpu_clear(base + i, per_cpu(cpu_core_map, cpu)); | |
611 | + cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); | |
612 | + cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); | |
613 | + cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); | |
614 | + cpumask_clear_cpu(base + i, cpu_core_mask(cpu)); | |
598 | 615 | } |
599 | 616 | |
600 | 617 | l2_cache = cpu_to_l2cache(cpu); |
... | ... | @@ -603,8 +620,8 @@ |
603 | 620 | if (!np) |
604 | 621 | continue; |
605 | 622 | if (np == l2_cache) { |
606 | - cpu_clear(cpu, per_cpu(cpu_core_map, i)); | |
607 | - cpu_clear(i, per_cpu(cpu_core_map, cpu)); | |
623 | + cpumask_clear_cpu(cpu, cpu_core_mask(i)); | |
624 | + cpumask_clear_cpu(i, cpu_core_mask(cpu)); | |
608 | 625 | } |
609 | 626 | of_node_put(np); |
610 | 627 | } |
... | ... | @@ -630,6 +647,12 @@ |
630 | 647 | void cpu_hotplug_driver_unlock() |
631 | 648 | { |
632 | 649 | mutex_unlock(&powerpc_cpu_hotplug_driver_mutex); |
650 | +} | |
651 | + | |
652 | +void cpu_die(void) | |
653 | +{ | |
654 | + if (ppc_md.cpu_die) | |
655 | + ppc_md.cpu_die(); | |
633 | 656 | } |
634 | 657 | #endif |
arch/powerpc/kernel/sysfs.c
... | ... | @@ -35,7 +35,7 @@ |
35 | 35 | #ifdef CONFIG_PPC64 |
36 | 36 | |
37 | 37 | /* Time in microseconds we delay before sleeping in the idle loop */ |
38 | -DEFINE_PER_CPU(unsigned long, smt_snooze_delay) = { 100 }; | |
38 | +DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 }; | |
39 | 39 | |
40 | 40 | static ssize_t store_smt_snooze_delay(struct sys_device *dev, |
41 | 41 | struct sysdev_attribute *attr, |
42 | 42 | |
... | ... | @@ -44,9 +44,9 @@ |
44 | 44 | { |
45 | 45 | struct cpu *cpu = container_of(dev, struct cpu, sysdev); |
46 | 46 | ssize_t ret; |
47 | - unsigned long snooze; | |
47 | + long snooze; | |
48 | 48 | |
49 | - ret = sscanf(buf, "%lu", &snooze); | |
49 | + ret = sscanf(buf, "%ld", &snooze); | |
50 | 50 | if (ret != 1) |
51 | 51 | return -EINVAL; |
52 | 52 | |
53 | 53 | |
54 | 54 | |
55 | 55 | |
... | ... | @@ -61,53 +61,23 @@ |
61 | 61 | { |
62 | 62 | struct cpu *cpu = container_of(dev, struct cpu, sysdev); |
63 | 63 | |
64 | - return sprintf(buf, "%lu\n", per_cpu(smt_snooze_delay, cpu->sysdev.id)); | |
64 | + return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->sysdev.id)); | |
65 | 65 | } |
66 | 66 | |
67 | 67 | static SYSDEV_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay, |
68 | 68 | store_smt_snooze_delay); |
69 | 69 | |
70 | -/* Only parse OF options if the matching cmdline option was not specified */ | |
71 | -static int smt_snooze_cmdline; | |
72 | - | |
73 | -static int __init smt_setup(void) | |
74 | -{ | |
75 | - struct device_node *options; | |
76 | - const unsigned int *val; | |
77 | - unsigned int cpu; | |
78 | - | |
79 | - if (!cpu_has_feature(CPU_FTR_SMT)) | |
80 | - return -ENODEV; | |
81 | - | |
82 | - options = of_find_node_by_path("/options"); | |
83 | - if (!options) | |
84 | - return -ENODEV; | |
85 | - | |
86 | - val = of_get_property(options, "ibm,smt-snooze-delay", NULL); | |
87 | - if (!smt_snooze_cmdline && val) { | |
88 | - for_each_possible_cpu(cpu) | |
89 | - per_cpu(smt_snooze_delay, cpu) = *val; | |
90 | - } | |
91 | - | |
92 | - of_node_put(options); | |
93 | - return 0; | |
94 | -} | |
95 | -__initcall(smt_setup); | |
96 | - | |
97 | 70 | static int __init setup_smt_snooze_delay(char *str) |
98 | 71 | { |
99 | 72 | unsigned int cpu; |
100 | - int snooze; | |
73 | + long snooze; | |
101 | 74 | |
102 | 75 | if (!cpu_has_feature(CPU_FTR_SMT)) |
103 | 76 | return 1; |
104 | 77 | |
105 | - smt_snooze_cmdline = 1; | |
106 | - | |
107 | - if (get_option(&str, &snooze)) { | |
108 | - for_each_possible_cpu(cpu) | |
109 | - per_cpu(smt_snooze_delay, cpu) = snooze; | |
110 | - } | |
78 | + snooze = simple_strtol(str, NULL, 10); | |
79 | + for_each_possible_cpu(cpu) | |
80 | + per_cpu(smt_snooze_delay, cpu) = snooze; | |
111 | 81 | |
112 | 82 | return 1; |
113 | 83 | } |
arch/powerpc/kernel/traps.c
... | ... | @@ -380,6 +380,46 @@ |
380 | 380 | } |
381 | 381 | return 0; |
382 | 382 | } |
383 | + | |
384 | +int machine_check_47x(struct pt_regs *regs) | |
385 | +{ | |
386 | + unsigned long reason = get_mc_reason(regs); | |
387 | + u32 mcsr; | |
388 | + | |
389 | + printk(KERN_ERR "Machine check in kernel mode.\n"); | |
390 | + if (reason & ESR_IMCP) { | |
391 | + printk(KERN_ERR | |
392 | + "Instruction Synchronous Machine Check exception\n"); | |
393 | + mtspr(SPRN_ESR, reason & ~ESR_IMCP); | |
394 | + return 0; | |
395 | + } | |
396 | + mcsr = mfspr(SPRN_MCSR); | |
397 | + if (mcsr & MCSR_IB) | |
398 | + printk(KERN_ERR "Instruction Read PLB Error\n"); | |
399 | + if (mcsr & MCSR_DRB) | |
400 | + printk(KERN_ERR "Data Read PLB Error\n"); | |
401 | + if (mcsr & MCSR_DWB) | |
402 | + printk(KERN_ERR "Data Write PLB Error\n"); | |
403 | + if (mcsr & MCSR_TLBP) | |
404 | + printk(KERN_ERR "TLB Parity Error\n"); | |
405 | + if (mcsr & MCSR_ICP) { | |
406 | + flush_instruction_cache(); | |
407 | + printk(KERN_ERR "I-Cache Parity Error\n"); | |
408 | + } | |
409 | + if (mcsr & MCSR_DCSP) | |
410 | + printk(KERN_ERR "D-Cache Search Parity Error\n"); | |
411 | + if (mcsr & PPC47x_MCSR_GPR) | |
412 | + printk(KERN_ERR "GPR Parity Error\n"); | |
413 | + if (mcsr & PPC47x_MCSR_FPR) | |
414 | + printk(KERN_ERR "FPR Parity Error\n"); | |
415 | + if (mcsr & PPC47x_MCSR_IPR) | |
416 | + printk(KERN_ERR "Machine Check exception is imprecise\n"); | |
417 | + | |
418 | + /* Clear MCSR */ | |
419 | + mtspr(SPRN_MCSR, mcsr); | |
420 | + | |
421 | + return 0; | |
422 | +} | |
383 | 423 | #elif defined(CONFIG_E500) |
384 | 424 | int machine_check_e500(struct pt_regs *regs) |
385 | 425 | { |
arch/powerpc/kernel/vio.c
... | ... | @@ -645,8 +645,10 @@ |
645 | 645 | found = 1; |
646 | 646 | break; |
647 | 647 | } |
648 | - if (!found) | |
648 | + if (!found) { | |
649 | + spin_unlock_irqrestore(&vio_cmo.lock, flags); | |
649 | 650 | return; |
651 | + } | |
650 | 652 | |
651 | 653 | /* Increase/decrease in desired device entitlement */ |
652 | 654 | if (desired >= viodev->cmo.desired) { |
653 | 655 | |
... | ... | @@ -958,9 +960,12 @@ |
958 | 960 | |
959 | 961 | static ssize_t name_show(struct device *, struct device_attribute *, char *); |
960 | 962 | static ssize_t devspec_show(struct device *, struct device_attribute *, char *); |
963 | +static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, | |
964 | + char *buf); | |
961 | 965 | static struct device_attribute vio_cmo_dev_attrs[] = { |
962 | 966 | __ATTR_RO(name), |
963 | 967 | __ATTR_RO(devspec), |
968 | + __ATTR_RO(modalias), | |
964 | 969 | __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, |
965 | 970 | viodev_cmo_desired_show, viodev_cmo_desired_set), |
966 | 971 | __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL), |
967 | 972 | |
... | ... | @@ -1320,9 +1325,27 @@ |
1320 | 1325 | return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none"); |
1321 | 1326 | } |
1322 | 1327 | |
1328 | +static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, | |
1329 | + char *buf) | |
1330 | +{ | |
1331 | + const struct vio_dev *vio_dev = to_vio_dev(dev); | |
1332 | + struct device_node *dn; | |
1333 | + const char *cp; | |
1334 | + | |
1335 | + dn = dev->archdata.of_node; | |
1336 | + if (!dn) | |
1337 | + return -ENODEV; | |
1338 | + cp = of_get_property(dn, "compatible", NULL); | |
1339 | + if (!cp) | |
1340 | + return -ENODEV; | |
1341 | + | |
1342 | + return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp); | |
1343 | +} | |
1344 | + | |
1323 | 1345 | static struct device_attribute vio_dev_attrs[] = { |
1324 | 1346 | __ATTR_RO(name), |
1325 | 1347 | __ATTR_RO(devspec), |
1348 | + __ATTR_RO(modalias), | |
1326 | 1349 | __ATTR_NULL |
1327 | 1350 | }; |
1328 | 1351 | |
... | ... | @@ -1365,6 +1388,7 @@ |
1365 | 1388 | .match = vio_bus_match, |
1366 | 1389 | .probe = vio_bus_probe, |
1367 | 1390 | .remove = vio_bus_remove, |
1391 | + .pm = GENERIC_SUBSYS_PM_OPS, | |
1368 | 1392 | }; |
1369 | 1393 | |
1370 | 1394 | /** |
arch/powerpc/lib/string.S
... | ... | @@ -28,7 +28,7 @@ |
28 | 28 | /* This clears out any unused part of the destination buffer, |
29 | 29 | just as the libc version does. -- paulus */ |
30 | 30 | _GLOBAL(strncpy) |
31 | - cmpwi 0,r5,0 | |
31 | + PPC_LCMPI 0,r5,0 | |
32 | 32 | beqlr |
33 | 33 | mtctr r5 |
34 | 34 | addi r6,r3,-1 |
... | ... | @@ -39,7 +39,7 @@ |
39 | 39 | bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */ |
40 | 40 | bnelr /* if we didn't hit a null char, we're done */ |
41 | 41 | mfctr r5 |
42 | - cmpwi 0,r5,0 /* any space left in destination buffer? */ | |
42 | + PPC_LCMPI 0,r5,0 /* any space left in destination buffer? */ | |
43 | 43 | beqlr /* we know r0 == 0 here */ |
44 | 44 | 2: stbu r0,1(r6) /* clear it out if so */ |
45 | 45 | bdnz 2b |
... | ... | @@ -70,8 +70,8 @@ |
70 | 70 | blr |
71 | 71 | |
72 | 72 | _GLOBAL(strncmp) |
73 | - PPC_LCMPI r5,0 | |
74 | - beqlr | |
73 | + PPC_LCMPI 0,r5,0 | |
74 | + beq- 2f | |
75 | 75 | mtctr r5 |
76 | 76 | addi r5,r3,-1 |
77 | 77 | addi r4,r4,-1 |
... | ... | @@ -82,6 +82,8 @@ |
82 | 82 | beqlr 1 |
83 | 83 | bdnzt eq,1b |
84 | 84 | blr |
85 | +2: li r3,0 | |
86 | + blr | |
85 | 87 | |
86 | 88 | _GLOBAL(strlen) |
87 | 89 | addi r4,r3,-1 |
... | ... | @@ -92,8 +94,8 @@ |
92 | 94 | blr |
93 | 95 | |
94 | 96 | _GLOBAL(memcmp) |
95 | - cmpwi 0,r5,0 | |
96 | - ble- 2f | |
97 | + PPC_LCMPI 0,r5,0 | |
98 | + beq- 2f | |
97 | 99 | mtctr r5 |
98 | 100 | addi r6,r3,-1 |
99 | 101 | addi r4,r4,-1 |
... | ... | @@ -106,8 +108,8 @@ |
106 | 108 | blr |
107 | 109 | |
108 | 110 | _GLOBAL(memchr) |
109 | - cmpwi 0,r5,0 | |
110 | - ble- 2f | |
111 | + PPC_LCMPI 0,r5,0 | |
112 | + beq- 2f | |
111 | 113 | mtctr r5 |
112 | 114 | addi r3,r3,-1 |
113 | 115 | 1: lbzu r0,1(r3) |
arch/powerpc/mm/44x_mmu.c
... | ... | @@ -38,7 +38,9 @@ |
38 | 38 | unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS; |
39 | 39 | int icache_44x_need_flush; |
40 | 40 | |
41 | -static void __init ppc44x_update_tlb_hwater(void) | |
41 | +unsigned long tlb_47x_boltmap[1024/8]; | |
42 | + | |
43 | +static void __cpuinit ppc44x_update_tlb_hwater(void) | |
42 | 44 | { |
43 | 45 | extern unsigned int tlb_44x_patch_hwater_D[]; |
44 | 46 | extern unsigned int tlb_44x_patch_hwater_I[]; |
... | ... | @@ -59,7 +61,7 @@ |
59 | 61 | } |
60 | 62 | |
61 | 63 | /* |
62 | - * "Pins" a 256MB TLB entry in AS0 for kernel lowmem | |
64 | + * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 44x type MMU | |
63 | 65 | */ |
64 | 66 | static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) |
65 | 67 | { |
66 | 68 | |
67 | 69 | |
... | ... | @@ -67,12 +69,18 @@ |
67 | 69 | |
68 | 70 | ppc44x_update_tlb_hwater(); |
69 | 71 | |
72 | + mtspr(SPRN_MMUCR, 0); | |
73 | + | |
70 | 74 | __asm__ __volatile__( |
71 | 75 | "tlbwe %2,%3,%4\n" |
72 | 76 | "tlbwe %1,%3,%5\n" |
73 | 77 | "tlbwe %0,%3,%6\n" |
74 | 78 | : |
79 | +#ifdef CONFIG_PPC47x | |
80 | + : "r" (PPC47x_TLB2_S_RWX), | |
81 | +#else | |
75 | 82 | : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), |
83 | +#endif | |
76 | 84 | "r" (phys), |
77 | 85 | "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M), |
78 | 86 | "r" (entry), |
79 | 87 | |
... | ... | @@ -81,8 +89,93 @@ |
81 | 89 | "i" (PPC44x_TLB_ATTRIB)); |
82 | 90 | } |
83 | 91 | |
92 | +static int __init ppc47x_find_free_bolted(void) | |
93 | +{ | |
94 | + unsigned int mmube0 = mfspr(SPRN_MMUBE0); | |
95 | + unsigned int mmube1 = mfspr(SPRN_MMUBE1); | |
96 | + | |
97 | + if (!(mmube0 & MMUBE0_VBE0)) | |
98 | + return 0; | |
99 | + if (!(mmube0 & MMUBE0_VBE1)) | |
100 | + return 1; | |
101 | + if (!(mmube0 & MMUBE0_VBE2)) | |
102 | + return 2; | |
103 | + if (!(mmube1 & MMUBE1_VBE3)) | |
104 | + return 3; | |
105 | + if (!(mmube1 & MMUBE1_VBE4)) | |
106 | + return 4; | |
107 | + if (!(mmube1 & MMUBE1_VBE5)) | |
108 | + return 5; | |
109 | + return -1; | |
110 | +} | |
111 | + | |
112 | +static void __init ppc47x_update_boltmap(void) | |
113 | +{ | |
114 | + unsigned int mmube0 = mfspr(SPRN_MMUBE0); | |
115 | + unsigned int mmube1 = mfspr(SPRN_MMUBE1); | |
116 | + | |
117 | + if (mmube0 & MMUBE0_VBE0) | |
118 | + __set_bit((mmube0 >> MMUBE0_IBE0_SHIFT) & 0xff, | |
119 | + tlb_47x_boltmap); | |
120 | + if (mmube0 & MMUBE0_VBE1) | |
121 | + __set_bit((mmube0 >> MMUBE0_IBE1_SHIFT) & 0xff, | |
122 | + tlb_47x_boltmap); | |
123 | + if (mmube0 & MMUBE0_VBE2) | |
124 | + __set_bit((mmube0 >> MMUBE0_IBE2_SHIFT) & 0xff, | |
125 | + tlb_47x_boltmap); | |
126 | + if (mmube1 & MMUBE1_VBE3) | |
127 | + __set_bit((mmube1 >> MMUBE1_IBE3_SHIFT) & 0xff, | |
128 | + tlb_47x_boltmap); | |
129 | + if (mmube1 & MMUBE1_VBE4) | |
130 | + __set_bit((mmube1 >> MMUBE1_IBE4_SHIFT) & 0xff, | |
131 | + tlb_47x_boltmap); | |
132 | + if (mmube1 & MMUBE1_VBE5) | |
133 | + __set_bit((mmube1 >> MMUBE1_IBE5_SHIFT) & 0xff, | |
134 | + tlb_47x_boltmap); | |
135 | +} | |
136 | + | |
137 | +/* | |
138 | + * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU | |
139 | + */ | |
140 | +static void __cpuinit ppc47x_pin_tlb(unsigned int virt, unsigned int phys) | |
141 | +{ | |
142 | + unsigned int rA; | |
143 | + int bolted; | |
144 | + | |
145 | + /* Base rA is HW way select, way 0, bolted bit set */ | |
146 | + rA = 0x88000000; | |
147 | + | |
148 | + /* Look for a bolted entry slot */ | |
149 | + bolted = ppc47x_find_free_bolted(); | |
150 | + BUG_ON(bolted < 0); | |
151 | + | |
152 | + /* Insert bolted slot number */ | |
153 | + rA |= bolted << 24; | |
154 | + | |
155 | + pr_debug("256M TLB entry for 0x%08x->0x%08x in bolt slot %d\n", | |
156 | + virt, phys, bolted); | |
157 | + | |
158 | + mtspr(SPRN_MMUCR, 0); | |
159 | + | |
160 | + __asm__ __volatile__( | |
161 | + "tlbwe %2,%3,0\n" | |
162 | + "tlbwe %1,%3,1\n" | |
163 | + "tlbwe %0,%3,2\n" | |
164 | + : | |
165 | + : "r" (PPC47x_TLB2_SW | PPC47x_TLB2_SR | | |
166 | + PPC47x_TLB2_SX | |
167 | +#ifdef CONFIG_SMP | |
168 | + | PPC47x_TLB2_M | |
169 | +#endif | |
170 | + ), | |
171 | + "r" (phys), | |
172 | + "r" (virt | PPC47x_TLB0_VALID | PPC47x_TLB0_256M), | |
173 | + "r" (rA)); | |
174 | +} | |
175 | + | |
84 | 176 | void __init MMU_init_hw(void) |
85 | 177 | { |
178 | + /* This is not useful on 47x but won't hurt either */ | |
86 | 179 | ppc44x_update_tlb_hwater(); |
87 | 180 | |
88 | 181 | flush_instruction_cache(); |
89 | 182 | |
90 | 183 | |
... | ... | @@ -95,9 +188,52 @@ |
95 | 188 | /* Pin in enough TLBs to cover any lowmem not covered by the |
96 | 189 | * initial 256M mapping established in head_44x.S */ |
97 | 190 | for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr; |
98 | - addr += PPC_PIN_SIZE) | |
99 | - ppc44x_pin_tlb(addr + PAGE_OFFSET, addr); | |
191 | + addr += PPC_PIN_SIZE) { | |
192 | + if (mmu_has_feature(MMU_FTR_TYPE_47x)) | |
193 | + ppc47x_pin_tlb(addr + PAGE_OFFSET, addr); | |
194 | + else | |
195 | + ppc44x_pin_tlb(addr + PAGE_OFFSET, addr); | |
196 | + } | |
197 | + if (mmu_has_feature(MMU_FTR_TYPE_47x)) { | |
198 | + ppc47x_update_boltmap(); | |
100 | 199 | |
200 | +#ifdef DEBUG | |
201 | + { | |
202 | + int i; | |
203 | + | |
204 | + printk(KERN_DEBUG "bolted entries: "); | |
205 | + for (i = 0; i < 255; i++) { | |
206 | + if (test_bit(i, tlb_47x_boltmap)) | |
207 | + printk("%d ", i); | |
208 | + } | |
209 | + printk("\n"); | |
210 | + } | |
211 | +#endif /* DEBUG */ | |
212 | + } | |
101 | 213 | return total_lowmem; |
102 | 214 | } |
215 | + | |
216 | +#ifdef CONFIG_SMP | |
217 | +void __cpuinit mmu_init_secondary(int cpu) | |
218 | +{ | |
219 | + unsigned long addr; | |
220 | + | |
221 | + /* Pin in enough TLBs to cover any lowmem not covered by the | |
222 | + * initial 256M mapping established in head_44x.S | |
223 | + * | |
224 | + * WARNING: This is called with only the first 256M of the | |
225 | + * linear mapping in the TLB and we can't take faults yet | |
226 | + * so beware of what this code uses. It runs off a temporary | |
227 | + * stack. current (r2) isn't initialized, smp_processor_id() | |
228 | + * will not work, current thread info isn't accessible, ... | |
229 | + */ | |
230 | + for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr; | |
231 | + addr += PPC_PIN_SIZE) { | |
232 | + if (mmu_has_feature(MMU_FTR_TYPE_47x)) | |
233 | + ppc47x_pin_tlb(addr + PAGE_OFFSET, addr); | |
234 | + else | |
235 | + ppc44x_pin_tlb(addr + PAGE_OFFSET, addr); | |
236 | + } | |
237 | +} | |
238 | +#endif /* CONFIG_SMP */ |
arch/powerpc/mm/fault.c
... | ... | @@ -151,13 +151,14 @@ |
151 | 151 | if (!user_mode(regs) && (address >= TASK_SIZE)) |
152 | 152 | return SIGSEGV; |
153 | 153 | |
154 | -#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) | |
154 | +#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \ | |
155 | + defined(CONFIG_PPC_BOOK3S_64)) | |
155 | 156 | if (error_code & DSISR_DABRMATCH) { |
156 | 157 | /* DABR match */ |
157 | 158 | do_dabr(regs, address, error_code); |
158 | 159 | return 0; |
159 | 160 | } |
160 | -#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/ | |
161 | +#endif | |
161 | 162 | |
162 | 163 | if (in_atomic() || mm == NULL) { |
163 | 164 | if (!user_mode(regs)) |
... | ... | @@ -307,7 +308,6 @@ |
307 | 308 | * make sure we exit gracefully rather than endlessly redo |
308 | 309 | * the fault. |
309 | 310 | */ |
310 | - survive: | |
311 | 311 | ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); |
312 | 312 | if (unlikely(ret & VM_FAULT_ERROR)) { |
313 | 313 | if (ret & VM_FAULT_OOM) |
... | ... | @@ -359,15 +359,10 @@ |
359 | 359 | */ |
360 | 360 | out_of_memory: |
361 | 361 | up_read(&mm->mmap_sem); |
362 | - if (is_global_init(current)) { | |
363 | - yield(); | |
364 | - down_read(&mm->mmap_sem); | |
365 | - goto survive; | |
366 | - } | |
367 | - printk("VM: killing process %s\n", current->comm); | |
368 | - if (user_mode(regs)) | |
369 | - do_group_exit(SIGKILL); | |
370 | - return SIGKILL; | |
362 | + if (!user_mode(regs)) | |
363 | + return SIGKILL; | |
364 | + pagefault_out_of_memory(); | |
365 | + return 0; | |
371 | 366 | |
372 | 367 | do_sigbus: |
373 | 368 | up_read(&mm->mmap_sem); |
arch/powerpc/mm/fsl_booke_mmu.c
... | ... | @@ -2,7 +2,7 @@ |
2 | 2 | * Modifications by Kumar Gala (galak@kernel.crashing.org) to support |
3 | 3 | * E500 Book E processors. |
4 | 4 | * |
5 | - * Copyright 2004 Freescale Semiconductor, Inc | |
5 | + * Copyright 2004,2010 Freescale Semiconductor, Inc. | |
6 | 6 | * |
7 | 7 | * This file contains the routines for initializing the MMU |
8 | 8 | * on the 4xx series of chips. |
9 | 9 | |
... | ... | @@ -56,19 +56,13 @@ |
56 | 56 | |
57 | 57 | unsigned int tlbcam_index; |
58 | 58 | |
59 | -#define NUM_TLBCAMS (64) | |
60 | 59 | |
61 | 60 | #if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS) |
62 | 61 | #error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS" |
63 | 62 | #endif |
64 | 63 | |
65 | -struct tlbcam { | |
66 | - u32 MAS0; | |
67 | - u32 MAS1; | |
68 | - unsigned long MAS2; | |
69 | - u32 MAS3; | |
70 | - u32 MAS7; | |
71 | -} TLBCAM[NUM_TLBCAMS]; | |
64 | +#define NUM_TLBCAMS (64) | |
65 | +struct tlbcam TLBCAM[NUM_TLBCAMS]; | |
72 | 66 | |
73 | 67 | struct tlbcamrange { |
74 | 68 | unsigned long start; |
... | ... | @@ -107,19 +101,6 @@ |
107 | 101 | +tlbcam_addrs[b].phys) |
108 | 102 | return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys); |
109 | 103 | return 0; |
110 | -} | |
111 | - | |
112 | -void loadcam_entry(int idx) | |
113 | -{ | |
114 | - mtspr(SPRN_MAS0, TLBCAM[idx].MAS0); | |
115 | - mtspr(SPRN_MAS1, TLBCAM[idx].MAS1); | |
116 | - mtspr(SPRN_MAS2, TLBCAM[idx].MAS2); | |
117 | - mtspr(SPRN_MAS3, TLBCAM[idx].MAS3); | |
118 | - | |
119 | - if (mmu_has_feature(MMU_FTR_BIG_PHYS)) | |
120 | - mtspr(SPRN_MAS7, TLBCAM[idx].MAS7); | |
121 | - | |
122 | - asm volatile("isync;tlbwe;isync" : : : "memory"); | |
123 | 104 | } |
124 | 105 | |
125 | 106 | /* |
arch/powerpc/mm/init_64.c
... | ... | @@ -252,6 +252,47 @@ |
252 | 252 | } |
253 | 253 | #endif /* CONFIG_PPC_BOOK3E */ |
254 | 254 | |
255 | +struct vmemmap_backing *vmemmap_list; | |
256 | + | |
257 | +static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node) | |
258 | +{ | |
259 | + static struct vmemmap_backing *next; | |
260 | + static int num_left; | |
261 | + | |
262 | + /* allocate a page when required and hand out chunks */ | |
263 | + if (!next || !num_left) { | |
264 | + next = vmemmap_alloc_block(PAGE_SIZE, node); | |
265 | + if (unlikely(!next)) { | |
266 | + WARN_ON(1); | |
267 | + return NULL; | |
268 | + } | |
269 | + num_left = PAGE_SIZE / sizeof(struct vmemmap_backing); | |
270 | + } | |
271 | + | |
272 | + num_left--; | |
273 | + | |
274 | + return next++; | |
275 | +} | |
276 | + | |
277 | +static __meminit void vmemmap_list_populate(unsigned long phys, | |
278 | + unsigned long start, | |
279 | + int node) | |
280 | +{ | |
281 | + struct vmemmap_backing *vmem_back; | |
282 | + | |
283 | + vmem_back = vmemmap_list_alloc(node); | |
284 | + if (unlikely(!vmem_back)) { | |
285 | + WARN_ON(1); | |
286 | + return; | |
287 | + } | |
288 | + | |
289 | + vmem_back->phys = phys; | |
290 | + vmem_back->virt_addr = start; | |
291 | + vmem_back->list = vmemmap_list; | |
292 | + | |
293 | + vmemmap_list = vmem_back; | |
294 | +} | |
295 | + | |
255 | 296 | int __meminit vmemmap_populate(struct page *start_page, |
256 | 297 | unsigned long nr_pages, int node) |
257 | 298 | { |
... | ... | @@ -275,6 +316,8 @@ |
275 | 316 | p = vmemmap_alloc_block(page_size, node); |
276 | 317 | if (!p) |
277 | 318 | return -ENOMEM; |
319 | + | |
320 | + vmemmap_list_populate(__pa(p), start, node); | |
278 | 321 | |
279 | 322 | pr_debug(" * %016lx..%016lx allocated at %p\n", |
280 | 323 | start, start + page_size, p); |
arch/powerpc/mm/mmu_context_nohash.c
... | ... | @@ -395,10 +395,18 @@ |
395 | 395 | * the PID/TID comparison is disabled, so we can use a TID of zero |
396 | 396 | * to represent all kernel pages as shared among all contexts. |
397 | 397 | * -- Dan |
398 | + * | |
399 | + * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We | |
400 | + * should normally never have to steal though the facility is | |
401 | + * present if needed. | |
402 | + * -- BenH | |
398 | 403 | */ |
399 | 404 | if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { |
400 | 405 | first_context = 0; |
401 | 406 | last_context = 15; |
407 | + } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { | |
408 | + first_context = 1; | |
409 | + last_context = 65535; | |
402 | 410 | } else { |
403 | 411 | first_context = 1; |
404 | 412 | last_context = 255; |
arch/powerpc/mm/mmu_decl.h
... | ... | @@ -69,12 +69,7 @@ |
69 | 69 | } |
70 | 70 | #endif /* CONIFG_8xx */ |
71 | 71 | |
72 | -/* | |
73 | - * As of today, we don't support tlbivax broadcast on any | |
74 | - * implementation. When that becomes the case, this will be | |
75 | - * an extern. | |
76 | - */ | |
77 | -#ifdef CONFIG_PPC_BOOK3E | |
72 | +#if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_PPC_47x) | |
78 | 73 | extern void _tlbivax_bcast(unsigned long address, unsigned int pid, |
79 | 74 | unsigned int tsize, unsigned int ind); |
80 | 75 | #else |
81 | 76 | |
... | ... | @@ -149,7 +144,15 @@ |
149 | 144 | extern void MMU_init_hw(void); |
150 | 145 | extern unsigned long mmu_mapin_ram(unsigned long top); |
151 | 146 | extern void adjust_total_lowmem(void); |
147 | +extern void loadcam_entry(unsigned int index); | |
152 | 148 | |
149 | +struct tlbcam { | |
150 | + u32 MAS0; | |
151 | + u32 MAS1; | |
152 | + unsigned long MAS2; | |
153 | + u32 MAS3; | |
154 | + u32 MAS7; | |
155 | +}; | |
153 | 156 | #elif defined(CONFIG_PPC32) |
154 | 157 | /* anything 32-bit except 4xx or 8xx */ |
155 | 158 | extern void MMU_init_hw(void); |
arch/powerpc/mm/numa.c
... | ... | @@ -33,16 +33,41 @@ |
33 | 33 | #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); } |
34 | 34 | |
35 | 35 | int numa_cpu_lookup_table[NR_CPUS]; |
36 | -cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; | |
36 | +cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; | |
37 | 37 | struct pglist_data *node_data[MAX_NUMNODES]; |
38 | 38 | |
39 | 39 | EXPORT_SYMBOL(numa_cpu_lookup_table); |
40 | -EXPORT_SYMBOL(numa_cpumask_lookup_table); | |
40 | +EXPORT_SYMBOL(node_to_cpumask_map); | |
41 | 41 | EXPORT_SYMBOL(node_data); |
42 | 42 | |
43 | 43 | static int min_common_depth; |
44 | 44 | static int n_mem_addr_cells, n_mem_size_cells; |
45 | 45 | |
46 | +/* | |
47 | + * Allocate node_to_cpumask_map based on number of available nodes | |
48 | + * Requires node_possible_map to be valid. | |
49 | + * | |
50 | + * Note: node_to_cpumask() is not valid until after this is done. | |
51 | + */ | |
52 | +static void __init setup_node_to_cpumask_map(void) | |
53 | +{ | |
54 | + unsigned int node, num = 0; | |
55 | + | |
56 | + /* setup nr_node_ids if not done yet */ | |
57 | + if (nr_node_ids == MAX_NUMNODES) { | |
58 | + for_each_node_mask(node, node_possible_map) | |
59 | + num = node; | |
60 | + nr_node_ids = num + 1; | |
61 | + } | |
62 | + | |
63 | + /* allocate the map */ | |
64 | + for (node = 0; node < nr_node_ids; node++) | |
65 | + alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); | |
66 | + | |
67 | + /* cpumask_of_node() will now work */ | |
68 | + dbg("Node to cpumask map for %d nodes\n", nr_node_ids); | |
69 | +} | |
70 | + | |
46 | 71 | static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn, |
47 | 72 | unsigned int *nid) |
48 | 73 | { |
... | ... | @@ -138,8 +163,8 @@ |
138 | 163 | |
139 | 164 | dbg("adding cpu %d to node %d\n", cpu, node); |
140 | 165 | |
141 | - if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node]))) | |
142 | - cpu_set(cpu, numa_cpumask_lookup_table[node]); | |
166 | + if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) | |
167 | + cpumask_set_cpu(cpu, node_to_cpumask_map[node]); | |
143 | 168 | } |
144 | 169 | |
145 | 170 | #ifdef CONFIG_HOTPLUG_CPU |
... | ... | @@ -149,8 +174,8 @@ |
149 | 174 | |
150 | 175 | dbg("removing cpu %lu from node %d\n", cpu, node); |
151 | 176 | |
152 | - if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { | |
153 | - cpu_clear(cpu, numa_cpumask_lookup_table[node]); | |
177 | + if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { | |
178 | + cpumask_set_cpu(cpu, node_to_cpumask_map[node]); | |
154 | 179 | } else { |
155 | 180 | printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", |
156 | 181 | cpu, node); |
... | ... | @@ -246,7 +271,8 @@ |
246 | 271 | const unsigned int *ref_points; |
247 | 272 | struct device_node *rtas_root; |
248 | 273 | unsigned int len; |
249 | - struct device_node *options; | |
274 | + struct device_node *chosen; | |
275 | + const char *vec5; | |
250 | 276 | |
251 | 277 | rtas_root = of_find_node_by_path("/rtas"); |
252 | 278 | |
253 | 279 | |
... | ... | @@ -264,14 +290,17 @@ |
264 | 290 | "ibm,associativity-reference-points", &len); |
265 | 291 | |
266 | 292 | /* |
267 | - * For type 1 affinity information we want the first field | |
293 | + * For form 1 affinity information we want the first field | |
268 | 294 | */ |
269 | - options = of_find_node_by_path("/options"); | |
270 | - if (options) { | |
271 | - const char *str; | |
272 | - str = of_get_property(options, "ibm,associativity-form", NULL); | |
273 | - if (str && !strcmp(str, "1")) | |
274 | - index = 0; | |
295 | +#define VEC5_AFFINITY_BYTE 5 | |
296 | +#define VEC5_AFFINITY 0x80 | |
297 | + chosen = of_find_node_by_path("/chosen"); | |
298 | + if (chosen) { | |
299 | + vec5 = of_get_property(chosen, "ibm,architecture-vec-5", NULL); | |
300 | + if (vec5 && (vec5[VEC5_AFFINITY_BYTE] & VEC5_AFFINITY)) { | |
301 | + dbg("Using form 1 affinity\n"); | |
302 | + index = 0; | |
303 | + } | |
275 | 304 | } |
276 | 305 | |
277 | 306 | if ((len >= 2 * sizeof(unsigned int)) && ref_points) { |
... | ... | @@ -750,8 +779,9 @@ |
750 | 779 | * If we used a CPU iterator here we would miss printing |
751 | 780 | * the holes in the cpumap. |
752 | 781 | */ |
753 | - for (cpu = 0; cpu < NR_CPUS; cpu++) { | |
754 | - if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { | |
782 | + for (cpu = 0; cpu < nr_cpu_ids; cpu++) { | |
783 | + if (cpumask_test_cpu(cpu, | |
784 | + node_to_cpumask_map[node])) { | |
755 | 785 | if (count == 0) |
756 | 786 | printk(" %u", cpu); |
757 | 787 | ++count; |
... | ... | @@ -763,7 +793,7 @@ |
763 | 793 | } |
764 | 794 | |
765 | 795 | if (count > 1) |
766 | - printk("-%u", NR_CPUS - 1); | |
796 | + printk("-%u", nr_cpu_ids - 1); | |
767 | 797 | printk("\n"); |
768 | 798 | } |
769 | 799 | } |
... | ... | @@ -939,10 +969,6 @@ |
939 | 969 | else |
940 | 970 | dump_numa_memory_topology(); |
941 | 971 | |
942 | - register_cpu_notifier(&ppc64_numa_nb); | |
943 | - cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE, | |
944 | - (void *)(unsigned long)boot_cpuid); | |
945 | - | |
946 | 972 | for_each_online_node(nid) { |
947 | 973 | unsigned long start_pfn, end_pfn; |
948 | 974 | void *bootmem_vaddr; |
... | ... | @@ -996,6 +1022,16 @@ |
996 | 1022 | } |
997 | 1023 | |
998 | 1024 | init_bootmem_done = 1; |
1025 | + | |
1026 | + /* | |
1027 | + * Now bootmem is initialised we can create the node to cpumask | |
1028 | + * lookup tables and setup the cpu callback to populate them. | |
1029 | + */ | |
1030 | + setup_node_to_cpumask_map(); | |
1031 | + | |
1032 | + register_cpu_notifier(&ppc64_numa_nb); | |
1033 | + cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE, | |
1034 | + (void *)(unsigned long)boot_cpuid); | |
999 | 1035 | } |
1000 | 1036 | |
1001 | 1037 | void __init paging_init(void) |
arch/powerpc/mm/pgtable_32.c
... | ... | @@ -146,6 +146,14 @@ |
146 | 146 | /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ |
147 | 147 | flags &= ~(_PAGE_USER | _PAGE_EXEC); |
148 | 148 | |
149 | +#ifdef _PAGE_BAP_SR | |
150 | + /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format | |
151 | + * which means that we just cleared supervisor access... oops ;-) This | |
152 | + * restores it | |
153 | + */ | |
154 | + flags |= _PAGE_BAP_SR; | |
155 | +#endif | |
156 | + | |
149 | 157 | return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); |
150 | 158 | } |
151 | 159 | EXPORT_SYMBOL(ioremap_flags); |
152 | 160 | |
... | ... | @@ -385,11 +393,7 @@ |
385 | 393 | return -EINVAL; |
386 | 394 | __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0); |
387 | 395 | wmb(); |
388 | -#ifdef CONFIG_PPC_STD_MMU | |
389 | - flush_hash_pages(0, address, pmd_val(*kpmd), 1); | |
390 | -#else | |
391 | 396 | flush_tlb_page(NULL, address); |
392 | -#endif | |
393 | 397 | pte_unmap(kpte); |
394 | 398 | |
395 | 399 | return 0; |
arch/powerpc/mm/pgtable_64.c
... | ... | @@ -265,6 +265,14 @@ |
265 | 265 | /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ |
266 | 266 | flags &= ~(_PAGE_USER | _PAGE_EXEC); |
267 | 267 | |
268 | +#ifdef _PAGE_BAP_SR | |
269 | + /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format | |
270 | + * which means that we just cleared supervisor access... oops ;-) This | |
271 | + * restores it | |
272 | + */ | |
273 | + flags |= _PAGE_BAP_SR; | |
274 | +#endif | |
275 | + | |
268 | 276 | if (ppc_md.ioremap) |
269 | 277 | return ppc_md.ioremap(addr, size, flags, caller); |
270 | 278 | return __ioremap_caller(addr, size, flags, caller); |
arch/powerpc/mm/tlb_nohash_low.S
... | ... | @@ -10,7 +10,7 @@ |
10 | 10 | * - tlbil_va |
11 | 11 | * - tlbil_pid |
12 | 12 | * - tlbil_all |
13 | - * - tlbivax_bcast (not yet) | |
13 | + * - tlbivax_bcast | |
14 | 14 | * |
15 | 15 | * Code mostly moved over from misc_32.S |
16 | 16 | * |
... | ... | @@ -33,6 +33,7 @@ |
33 | 33 | #include <asm/ppc_asm.h> |
34 | 34 | #include <asm/asm-offsets.h> |
35 | 35 | #include <asm/processor.h> |
36 | +#include <asm/bug.h> | |
36 | 37 | |
37 | 38 | #if defined(CONFIG_40x) |
38 | 39 | |
... | ... | @@ -65,7 +66,7 @@ |
65 | 66 | * Nothing to do for 8xx, everything is inline |
66 | 67 | */ |
67 | 68 | |
68 | -#elif defined(CONFIG_44x) | |
69 | +#elif defined(CONFIG_44x) /* Includes 47x */ | |
69 | 70 | |
70 | 71 | /* |
71 | 72 | * 440 implementation uses tlbsx/we for tlbil_va and a full sweep |
72 | 73 | |
... | ... | @@ -73,8 +74,14 @@ |
73 | 74 | */ |
74 | 75 | _GLOBAL(__tlbil_va) |
75 | 76 | mfspr r5,SPRN_MMUCR |
76 | - rlwimi r5,r4,0,24,31 /* Set TID */ | |
77 | + mfmsr r10 | |
77 | 78 | |
79 | + /* | |
80 | + * We write 16 bits of STID since 47x supports that much, we | |
81 | + * will never be passed out of bounds values on 440 (hopefully) | |
82 | + */ | |
83 | + rlwimi r5,r4,0,16,31 | |
84 | + | |
78 | 85 | /* We have to run the search with interrupts disabled, otherwise |
79 | 86 | * an interrupt which causes a TLB miss can clobber the MMUCR |
80 | 87 | * between the mtspr and the tlbsx. |
81 | 88 | |
82 | 89 | |
83 | 90 | |
84 | 91 | |
85 | 92 | |
... | ... | @@ -83,24 +90,41 @@ |
83 | 90 | * and restoring MMUCR, so only normal interrupts have to be |
84 | 91 | * taken care of. |
85 | 92 | */ |
86 | - mfmsr r4 | |
87 | 93 | wrteei 0 |
88 | 94 | mtspr SPRN_MMUCR,r5 |
89 | - tlbsx. r3, 0, r3 | |
90 | - wrtee r4 | |
91 | - bne 1f | |
95 | + tlbsx. r6,0,r3 | |
96 | + bne 10f | |
92 | 97 | sync |
93 | - /* There are only 64 TLB entries, so r3 < 64, | |
94 | - * which means bit 22, is clear. Since 22 is | |
95 | - * the V bit in the TLB_PAGEID, loading this | |
98 | +BEGIN_MMU_FTR_SECTION | |
99 | + b 2f | |
100 | +END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x) | |
101 | + /* On 440 There are only 64 TLB entries, so r3 < 64, which means bit | |
102 | + * 22, is clear. Since 22 is the V bit in the TLB_PAGEID, loading this | |
96 | 103 | * value will invalidate the TLB entry. |
97 | 104 | */ |
98 | - tlbwe r3, r3, PPC44x_TLB_PAGEID | |
105 | + tlbwe r6,r6,PPC44x_TLB_PAGEID | |
99 | 106 | isync |
100 | -1: blr | |
107 | +10: wrtee r10 | |
108 | + blr | |
109 | +2: | |
110 | +#ifdef CONFIG_PPC_47x | |
111 | + oris r7,r6,0x8000 /* specify way explicitely */ | |
112 | + clrrwi r4,r3,12 /* get an EPN for the hashing with V = 0 */ | |
113 | + ori r4,r4,PPC47x_TLBE_SIZE | |
114 | + tlbwe r4,r7,0 /* write it */ | |
115 | + isync | |
116 | + wrtee r10 | |
117 | + blr | |
118 | +#else /* CONFIG_PPC_47x */ | |
119 | +1: trap | |
120 | + EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0; | |
121 | +#endif /* !CONFIG_PPC_47x */ | |
101 | 122 | |
102 | 123 | _GLOBAL(_tlbil_all) |
103 | 124 | _GLOBAL(_tlbil_pid) |
125 | +BEGIN_MMU_FTR_SECTION | |
126 | + b 2f | |
127 | +END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x) | |
104 | 128 | li r3,0 |
105 | 129 | sync |
106 | 130 | |
107 | 131 | |
... | ... | @@ -115,7 +139,77 @@ |
115 | 139 | |
116 | 140 | isync |
117 | 141 | blr |
142 | +2: | |
143 | +#ifdef CONFIG_PPC_47x | |
144 | + /* 476 variant. There's not simple way to do this, hopefully we'll | |
145 | + * try to limit the amount of such full invalidates | |
146 | + */ | |
147 | + mfmsr r11 /* Interrupts off */ | |
148 | + wrteei 0 | |
149 | + li r3,-1 /* Current set */ | |
150 | + lis r10,tlb_47x_boltmap@h | |
151 | + ori r10,r10,tlb_47x_boltmap@l | |
152 | + lis r7,0x8000 /* Specify way explicitely */ | |
118 | 153 | |
154 | + b 9f /* For each set */ | |
155 | + | |
156 | +1: li r9,4 /* Number of ways */ | |
157 | + li r4,0 /* Current way */ | |
158 | + li r6,0 /* Default entry value 0 */ | |
159 | + andi. r0,r8,1 /* Check if way 0 is bolted */ | |
160 | + mtctr r9 /* Load way counter */ | |
161 | + bne- 3f /* Bolted, skip loading it */ | |
162 | + | |
163 | +2: /* For each way */ | |
164 | + or r5,r3,r4 /* Make way|index for tlbre */ | |
165 | + rlwimi r5,r5,16,8,15 /* Copy index into position */ | |
166 | + tlbre r6,r5,0 /* Read entry */ | |
167 | +3: addis r4,r4,0x2000 /* Next way */ | |
168 | + andi. r0,r6,PPC47x_TLB0_VALID /* Valid entry ? */ | |
169 | + beq 4f /* Nope, skip it */ | |
170 | + rlwimi r7,r5,0,1,2 /* Insert way number */ | |
171 | + rlwinm r6,r6,0,21,19 /* Clear V */ | |
172 | + tlbwe r6,r7,0 /* Write it */ | |
173 | +4: bdnz 2b /* Loop for each way */ | |
174 | + srwi r8,r8,1 /* Next boltmap bit */ | |
175 | +9: cmpwi cr1,r3,255 /* Last set done ? */ | |
176 | + addi r3,r3,1 /* Next set */ | |
177 | + beq cr1,1f /* End of loop */ | |
178 | + andi. r0,r3,0x1f /* Need to load a new boltmap word ? */ | |
179 | + bne 1b /* No, loop */ | |
180 | + lwz r8,0(r10) /* Load boltmap entry */ | |
181 | + addi r10,r10,4 /* Next word */ | |
182 | + b 1b /* Then loop */ | |
183 | +1: isync /* Sync shadows */ | |
184 | + wrtee r11 | |
185 | +#else /* CONFIG_PPC_47x */ | |
186 | +1: trap | |
187 | + EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0; | |
188 | +#endif /* !CONFIG_PPC_47x */ | |
189 | + blr | |
190 | + | |
191 | +#ifdef CONFIG_PPC_47x | |
192 | +/* | |
193 | + * _tlbivax_bcast is only on 47x. We don't bother doing a runtime | |
194 | + * check though, it will blow up soon enough if we mistakenly try | |
195 | + * to use it on a 440. | |
196 | + */ | |
197 | +_GLOBAL(_tlbivax_bcast) | |
198 | + mfspr r5,SPRN_MMUCR | |
199 | + mfmsr r10 | |
200 | + rlwimi r5,r4,0,16,31 | |
201 | + wrteei 0 | |
202 | + mtspr SPRN_MMUCR,r5 | |
203 | +/* tlbivax 0,r3 - use .long to avoid binutils deps */ | |
204 | + .long 0x7c000624 | (r3 << 11) | |
205 | + isync | |
206 | + eieio | |
207 | + tlbsync | |
208 | + sync | |
209 | + wrtee r10 | |
210 | + blr | |
211 | +#endif /* CONFIG_PPC_47x */ | |
212 | + | |
119 | 213 | #elif defined(CONFIG_FSL_BOOKE) |
120 | 214 | /* |
121 | 215 | * FSL BookE implementations. |
... | ... | @@ -270,5 +364,33 @@ |
270 | 364 | blr |
271 | 365 | #else |
272 | 366 | #error Unsupported processor type ! |
367 | +#endif | |
368 | + | |
369 | +#if defined(CONFIG_FSL_BOOKE) | |
370 | +/* | |
371 | + * extern void loadcam_entry(unsigned int index) | |
372 | + * | |
373 | + * Load TLBCAM[index] entry in to the L2 CAM MMU | |
374 | + */ | |
375 | +_GLOBAL(loadcam_entry) | |
376 | + LOAD_REG_ADDR(r4, TLBCAM) | |
377 | + mulli r5,r3,TLBCAM_SIZE | |
378 | + add r3,r5,r4 | |
379 | + lwz r4,TLBCAM_MAS0(r3) | |
380 | + mtspr SPRN_MAS0,r4 | |
381 | + lwz r4,TLBCAM_MAS1(r3) | |
382 | + mtspr SPRN_MAS1,r4 | |
383 | + PPC_LL r4,TLBCAM_MAS2(r3) | |
384 | + mtspr SPRN_MAS2,r4 | |
385 | + lwz r4,TLBCAM_MAS3(r3) | |
386 | + mtspr SPRN_MAS3,r4 | |
387 | +BEGIN_MMU_FTR_SECTION | |
388 | + lwz r4,TLBCAM_MAS7(r3) | |
389 | + mtspr SPRN_MAS7,r4 | |
390 | +END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS) | |
391 | + isync | |
392 | + tlbwe | |
393 | + isync | |
394 | + blr | |
273 | 395 | #endif |
arch/powerpc/platforms/44x/Kconfig
1 | +config PPC_47x | |
2 | + bool "Support for 47x variant" | |
3 | + depends on 44x | |
4 | + default n | |
5 | + select MPIC | |
6 | + help | |
7 | + This option enables support for the 47x family of processors and is | |
8 | + not currently compatible with other 44x or 46x varients | |
9 | + | |
1 | 10 | config BAMBOO |
2 | 11 | bool "Bamboo" |
3 | 12 | depends on 44x |
... | ... | @@ -150,6 +159,17 @@ |
150 | 159 | select PCI |
151 | 160 | help |
152 | 161 | This option enables support for the AMCC PPC440EP evaluation board. |
162 | + | |
163 | +config ISS4xx | |
164 | + bool "ISS 4xx Simulator" | |
165 | + depends on (44x || 40x) | |
166 | + default n | |
167 | + select 405GP if 40x | |
168 | + select 440GP if 44x && !PPC_47x | |
169 | + select PPC_FPU | |
170 | + select OF_RTC | |
171 | + help | |
172 | + This option enables support for the IBM ISS simulation environment | |
153 | 173 | |
154 | 174 | #config LUAN |
155 | 175 | # bool "Luan" |
arch/powerpc/platforms/44x/Makefile
arch/powerpc/platforms/44x/iss4xx.c
1 | +/* | |
2 | + * PPC476 board specific routines | |
3 | + * | |
4 | + * Copyright 2010 Torez Smith, IBM Corporation. | |
5 | + * | |
6 | + * Based on earlier code: | |
7 | + * Matt Porter <mporter@kernel.crashing.org> | |
8 | + * Copyright 2002-2005 MontaVista Software Inc. | |
9 | + * | |
10 | + * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> | |
11 | + * Copyright (c) 2003-2005 Zultys Technologies | |
12 | + * | |
13 | + * Rewritten and ported to the merged powerpc tree: | |
14 | + * Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation. | |
15 | + * | |
16 | + * This program is free software; you can redistribute it and/or modify it | |
17 | + * under the terms of the GNU General Public License as published by the | |
18 | + * Free Software Foundation; either version 2 of the License, or (at your | |
19 | + * option) any later version. | |
20 | + */ | |
21 | + | |
22 | +#include <linux/init.h> | |
23 | +#include <linux/of_platform.h> | |
24 | +#include <linux/rtc.h> | |
25 | + | |
26 | +#include <asm/machdep.h> | |
27 | +#include <asm/prom.h> | |
28 | +#include <asm/udbg.h> | |
29 | +#include <asm/time.h> | |
30 | +#include <asm/uic.h> | |
31 | +#include <asm/ppc4xx.h> | |
32 | +#include <asm/mpic.h> | |
33 | +#include <asm/mmu.h> | |
34 | + | |
35 | +static __initdata struct of_device_id iss4xx_of_bus[] = { | |
36 | + { .compatible = "ibm,plb4", }, | |
37 | + { .compatible = "ibm,plb6", }, | |
38 | + { .compatible = "ibm,opb", }, | |
39 | + { .compatible = "ibm,ebc", }, | |
40 | + {}, | |
41 | +}; | |
42 | + | |
43 | +static int __init iss4xx_device_probe(void) | |
44 | +{ | |
45 | + of_platform_bus_probe(NULL, iss4xx_of_bus, NULL); | |
46 | + of_instantiate_rtc(); | |
47 | + | |
48 | + return 0; | |
49 | +} | |
50 | +machine_device_initcall(iss4xx, iss4xx_device_probe); | |
51 | + | |
52 | +/* We can have either UICs or MPICs */ | |
53 | +static void __init iss4xx_init_irq(void) | |
54 | +{ | |
55 | + struct device_node *np; | |
56 | + | |
57 | + /* Find top level interrupt controller */ | |
58 | + for_each_node_with_property(np, "interrupt-controller") { | |
59 | + if (of_get_property(np, "interrupts", NULL) == NULL) | |
60 | + break; | |
61 | + } | |
62 | + if (np == NULL) | |
63 | + panic("Can't find top level interrupt controller"); | |
64 | + | |
65 | + /* Check type and do appropriate initialization */ | |
66 | + if (of_device_is_compatible(np, "ibm,uic")) { | |
67 | + uic_init_tree(); | |
68 | + ppc_md.get_irq = uic_get_irq; | |
69 | +#ifdef CONFIG_MPIC | |
70 | + } else if (of_device_is_compatible(np, "chrp,open-pic")) { | |
71 | + /* The MPIC driver will get everything it needs from the | |
72 | + * device-tree, just pass 0 to all arguments | |
73 | + */ | |
74 | + struct mpic *mpic = mpic_alloc(np, 0, MPIC_PRIMARY, 0, 0, | |
75 | + " MPIC "); | |
76 | + BUG_ON(mpic == NULL); | |
77 | + mpic_init(mpic); | |
78 | + ppc_md.get_irq = mpic_get_irq; | |
79 | +#endif | |
80 | + } else | |
81 | + panic("Unrecognized top level interrupt controller"); | |
82 | +} | |
83 | + | |
84 | +#ifdef CONFIG_SMP | |
85 | +static void __cpuinit smp_iss4xx_setup_cpu(int cpu) | |
86 | +{ | |
87 | + mpic_setup_this_cpu(); | |
88 | +} | |
89 | + | |
90 | +static void __cpuinit smp_iss4xx_kick_cpu(int cpu) | |
91 | +{ | |
92 | + struct device_node *cpunode = of_get_cpu_node(cpu, NULL); | |
93 | + const u64 *spin_table_addr_prop; | |
94 | + u32 *spin_table; | |
95 | + extern void start_secondary_47x(void); | |
96 | + | |
97 | + BUG_ON(cpunode == NULL); | |
98 | + | |
99 | + /* Assume spin table. We could test for the enable-method in | |
100 | + * the device-tree but currently there's little point as it's | |
101 | + * our only supported method | |
102 | + */ | |
103 | + spin_table_addr_prop = of_get_property(cpunode, "cpu-release-addr", | |
104 | + NULL); | |
105 | + if (spin_table_addr_prop == NULL) { | |
106 | + pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu); | |
107 | + return; | |
108 | + } | |
109 | + | |
110 | + /* Assume it's mapped as part of the linear mapping. This is a bit | |
111 | + * fishy but will work fine for now | |
112 | + */ | |
113 | + spin_table = (u32 *)__va(*spin_table_addr_prop); | |
114 | + pr_debug("CPU%d: Spin table mapped at %p\n", cpu, spin_table); | |
115 | + | |
116 | + spin_table[3] = cpu; | |
117 | + smp_wmb(); | |
118 | + spin_table[1] = __pa(start_secondary_47x); | |
119 | + mb(); | |
120 | +} | |
121 | + | |
122 | +static struct smp_ops_t iss_smp_ops = { | |
123 | + .probe = smp_mpic_probe, | |
124 | + .message_pass = smp_mpic_message_pass, | |
125 | + .setup_cpu = smp_iss4xx_setup_cpu, | |
126 | + .kick_cpu = smp_iss4xx_kick_cpu, | |
127 | + .give_timebase = smp_generic_give_timebase, | |
128 | + .take_timebase = smp_generic_take_timebase, | |
129 | +}; | |
130 | + | |
131 | +static void __init iss4xx_smp_init(void) | |
132 | +{ | |
133 | + if (mmu_has_feature(MMU_FTR_TYPE_47x)) | |
134 | + smp_ops = &iss_smp_ops; | |
135 | +} | |
136 | + | |
137 | +#else /* CONFIG_SMP */ | |
138 | +static void __init iss4xx_smp_init(void) { } | |
139 | +#endif /* CONFIG_SMP */ | |
140 | + | |
141 | +static void __init iss4xx_setup_arch(void) | |
142 | +{ | |
143 | + iss4xx_smp_init(); | |
144 | +} | |
145 | + | |
146 | +/* | |
147 | + * Called very early, MMU is off, device-tree isn't unflattened | |
148 | + */ | |
149 | +static int __init iss4xx_probe(void) | |
150 | +{ | |
151 | + unsigned long root = of_get_flat_dt_root(); | |
152 | + | |
153 | + if (!of_flat_dt_is_compatible(root, "ibm,iss-4xx")) | |
154 | + return 0; | |
155 | + | |
156 | + return 1; | |
157 | +} | |
158 | + | |
159 | +define_machine(iss4xx) { | |
160 | + .name = "ISS-4xx", | |
161 | + .probe = iss4xx_probe, | |
162 | + .progress = udbg_progress, | |
163 | + .init_IRQ = iss4xx_init_irq, | |
164 | + .setup_arch = iss4xx_setup_arch, | |
165 | + .restart = ppc4xx_reset_system, | |
166 | + .calibrate_decr = generic_calibrate_decr, | |
167 | +}; |
arch/powerpc/platforms/83xx/mpc831x_rdb.c
arch/powerpc/platforms/83xx/mpc837x_rdb.c
arch/powerpc/platforms/86xx/mpc8610_hpcd.c
... | ... | @@ -83,7 +83,8 @@ |
83 | 83 | { .compatible = "fsl,mpc8610-immr", }, |
84 | 84 | { .compatible = "fsl,mpc8610-guts", }, |
85 | 85 | { .compatible = "simple-bus", }, |
86 | - { .compatible = "gianfar", }, | |
86 | + /* So that the DMA channel nodes can be probed individually: */ | |
87 | + { .compatible = "fsl,eloplus-dma", }, | |
87 | 88 | {} |
88 | 89 | }; |
89 | 90 |
arch/powerpc/platforms/Kconfig.cputype
... | ... | @@ -43,7 +43,7 @@ |
43 | 43 | select PPC_PCI_CHOICE |
44 | 44 | |
45 | 45 | config 44x |
46 | - bool "AMCC 44x" | |
46 | + bool "AMCC 44x, 46x or 47x" | |
47 | 47 | select PPC_DCR_NATIVE |
48 | 48 | select PPC_UDBG_16550 |
49 | 49 | select 4xx_SOC |
... | ... | @@ -294,7 +294,7 @@ |
294 | 294 | This enables the powerpc-specific perf_event back-end. |
295 | 295 | |
296 | 296 | config SMP |
297 | - depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE | |
297 | + depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE || PPC_47x | |
298 | 298 | bool "Symmetric multi-processing support" |
299 | 299 | ---help--- |
300 | 300 | This enables support for systems with more than one CPU. If you have |
... | ... | @@ -322,6 +322,7 @@ |
322 | 322 | config NOT_COHERENT_CACHE |
323 | 323 | bool |
324 | 324 | depends on 4xx || 8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON |
325 | + default n if PPC_47x | |
325 | 326 | default y |
326 | 327 | |
327 | 328 | config CHECK_CACHE_COHERENCY |
arch/powerpc/platforms/cell/cbe_cpufreq.c
... | ... | @@ -118,7 +118,7 @@ |
118 | 118 | policy->cur = cbe_freqs[cur_pmode].frequency; |
119 | 119 | |
120 | 120 | #ifdef CONFIG_SMP |
121 | - cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); | |
121 | + cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); | |
122 | 122 | #endif |
123 | 123 | |
124 | 124 | cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); |
arch/powerpc/platforms/iseries/exception.S
... | ... | @@ -252,8 +252,8 @@ |
252 | 252 | li r11,1 |
253 | 253 | ld r12,PACALPPACAPTR(r13) |
254 | 254 | stb r11,LPPACADECRINT(r12) |
255 | - LOAD_REG_IMMEDIATE(r12, tb_ticks_per_jiffy) | |
256 | - lwz r12,0(r12) | |
255 | + li r12,-1 | |
256 | + clrldi r12,r12,33 /* set DEC to 0x7fffffff */ | |
257 | 257 | mtspr SPRN_DEC,r12 |
258 | 258 | /* fall through */ |
259 | 259 |
arch/powerpc/platforms/iseries/pci.c
... | ... | @@ -32,6 +32,7 @@ |
32 | 32 | #include <linux/module.h> |
33 | 33 | #include <linux/pci.h> |
34 | 34 | #include <linux/of.h> |
35 | +#include <linux/ratelimit.h> | |
35 | 36 | |
36 | 37 | #include <asm/types.h> |
37 | 38 | #include <asm/io.h> |
38 | 39 | |
... | ... | @@ -584,14 +585,9 @@ |
584 | 585 | |
585 | 586 | orig_addr = (unsigned long __force)addr; |
586 | 587 | if ((orig_addr < BASE_IO_MEMORY) || (orig_addr >= max_io_memory)) { |
587 | - static unsigned long last_jiffies; | |
588 | - static int num_printed; | |
588 | + static DEFINE_RATELIMIT_STATE(ratelimit, 60 * HZ, 10); | |
589 | 589 | |
590 | - if (time_after(jiffies, last_jiffies + 60 * HZ)) { | |
591 | - last_jiffies = jiffies; | |
592 | - num_printed = 0; | |
593 | - } | |
594 | - if (num_printed++ < 10) | |
590 | + if (__ratelimit(&ratelimit)) | |
595 | 591 | printk(KERN_ERR |
596 | 592 | "iSeries_%s: invalid access at IO address %p\n", |
597 | 593 | func, addr); |
arch/powerpc/platforms/iseries/smp.c
arch/powerpc/platforms/pasemi/cpufreq.c
... | ... | @@ -213,7 +213,7 @@ |
213 | 213 | pr_debug("current astate is at %d\n",cur_astate); |
214 | 214 | |
215 | 215 | policy->cur = pas_freqs[cur_astate].frequency; |
216 | - cpumask_copy(policy->cpus, &cpu_online_map); | |
216 | + cpumask_copy(policy->cpus, cpu_online_mask); | |
217 | 217 | |
218 | 218 | ppc_proc_freq = policy->cur * 1000ul; |
219 | 219 |
arch/powerpc/platforms/powermac/cpufreq_64.c
... | ... | @@ -362,7 +362,7 @@ |
362 | 362 | /* secondary CPUs are tied to the primary one by the |
363 | 363 | * cpufreq core if in the secondary policy we tell it that |
364 | 364 | * it actually must be one policy together with all others. */ |
365 | - cpumask_copy(policy->cpus, &cpu_online_map); | |
365 | + cpumask_copy(policy->cpus, cpu_online_mask); | |
366 | 366 | cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu); |
367 | 367 | |
368 | 368 | return cpufreq_frequency_table_cpuinfo(policy, |
arch/powerpc/platforms/powermac/low_i2c.c
... | ... | @@ -592,7 +592,7 @@ |
592 | 592 | /* Probe keywest-i2c busses */ |
593 | 593 | for_each_compatible_node(np, "i2c","keywest-i2c") { |
594 | 594 | struct pmac_i2c_host_kw *host; |
595 | - int multibus, chans, i; | |
595 | + int multibus; | |
596 | 596 | |
597 | 597 | /* Found one, init a host structure */ |
598 | 598 | host = kw_i2c_host_init(np); |
... | ... | @@ -614,6 +614,8 @@ |
614 | 614 | * parent type |
615 | 615 | */ |
616 | 616 | if (multibus) { |
617 | + int chans, i; | |
618 | + | |
617 | 619 | parent = of_get_parent(np); |
618 | 620 | if (parent == NULL) |
619 | 621 | continue; |
... | ... | @@ -1258,8 +1260,7 @@ |
1258 | 1260 | if (inst == NULL) |
1259 | 1261 | return; |
1260 | 1262 | pmac_i2c_close(inst->bus); |
1261 | - if (inst) | |
1262 | - kfree(inst); | |
1263 | + kfree(inst); | |
1263 | 1264 | } |
1264 | 1265 | |
1265 | 1266 | static int pmac_i2c_do_read(PMF_STD_ARGS, u32 len) |
arch/powerpc/platforms/powermac/pmac.h
arch/powerpc/platforms/powermac/setup.c
... | ... | @@ -480,7 +480,7 @@ |
480 | 480 | #endif |
481 | 481 | |
482 | 482 | /* SMP Init has to be done early as we need to patch up |
483 | - * cpu_possible_map before interrupt stacks are allocated | |
483 | + * cpu_possible_mask before interrupt stacks are allocated | |
484 | 484 | * or kaboom... |
485 | 485 | */ |
486 | 486 | #ifdef CONFIG_SMP |
... | ... | @@ -646,7 +646,7 @@ |
646 | 646 | /* access per cpu vars from generic smp.c */ |
647 | 647 | DECLARE_PER_CPU(int, cpu_state); |
648 | 648 | |
649 | -static void pmac_cpu_die(void) | |
649 | +static void pmac64_cpu_die(void) | |
650 | 650 | { |
651 | 651 | /* |
652 | 652 | * turn off as much as possible, we'll be |
... | ... | @@ -717,8 +717,13 @@ |
717 | 717 | .pcibios_after_init = pmac_pcibios_after_init, |
718 | 718 | .phys_mem_access_prot = pci_phys_mem_access_prot, |
719 | 719 | #endif |
720 | -#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC64) | |
721 | - .cpu_die = pmac_cpu_die, | |
720 | +#ifdef CONFIG_HOTPLUG_CPU | |
721 | +#ifdef CONFIG_PPC64 | |
722 | + .cpu_die = pmac64_cpu_die, | |
723 | +#endif | |
724 | +#ifdef CONFIG_PPC32 | |
725 | + .cpu_die = pmac32_cpu_die, | |
726 | +#endif | |
722 | 727 | #endif |
723 | 728 | #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) |
724 | 729 | .cpu_die = generic_mach_cpu_die, |
arch/powerpc/platforms/powermac/smp.c
... | ... | @@ -53,6 +53,8 @@ |
53 | 53 | #include <asm/pmac_low_i2c.h> |
54 | 54 | #include <asm/pmac_pfunc.h> |
55 | 55 | |
56 | +#include "pmac.h" | |
57 | + | |
56 | 58 | #undef DEBUG |
57 | 59 | |
58 | 60 | #ifdef DEBUG |
... | ... | @@ -315,7 +317,7 @@ |
315 | 317 | /* This is necessary because OF doesn't know about the |
316 | 318 | * secondary cpu(s), and thus there aren't nodes in the |
317 | 319 | * device tree for them, and smp_setup_cpu_maps hasn't |
318 | - * set their bits in cpu_present_map. | |
320 | + * set their bits in cpu_present_mask. | |
319 | 321 | */ |
320 | 322 | if (ncpus > NR_CPUS) |
321 | 323 | ncpus = NR_CPUS; |
322 | 324 | |
... | ... | @@ -878,10 +880,9 @@ |
878 | 880 | return 0; |
879 | 881 | } |
880 | 882 | |
881 | -extern void low_cpu_die(void) __attribute__((noreturn)); /* in sleep.S */ | |
882 | 883 | static int cpu_dead[NR_CPUS]; |
883 | 884 | |
884 | -void cpu_die(void) | |
885 | +void pmac32_cpu_die(void) | |
885 | 886 | { |
886 | 887 | local_irq_disable(); |
887 | 888 | cpu_dead[smp_processor_id()] = 1; |
... | ... | @@ -944,7 +945,7 @@ |
944 | 945 | } |
945 | 946 | #ifdef CONFIG_PPC32 |
946 | 947 | else { |
947 | - /* We have to set bits in cpu_possible_map here since the | |
948 | + /* We have to set bits in cpu_possible_mask here since the | |
948 | 949 | * secondary CPU(s) aren't in the device tree. Various |
949 | 950 | * things won't be initialized for CPUs not in the possible |
950 | 951 | * map, so we really need to fix it up here. |
arch/powerpc/platforms/pseries/Makefile
arch/powerpc/platforms/pseries/dlpar.c
... | ... | @@ -79,13 +79,12 @@ |
79 | 79 | * prepend this to the full_name. |
80 | 80 | */ |
81 | 81 | name = (char *)ccwa + ccwa->name_offset; |
82 | - dn->full_name = kmalloc(strlen(name) + 2, GFP_KERNEL); | |
82 | + dn->full_name = kasprintf(GFP_KERNEL, "/%s", name); | |
83 | 83 | if (!dn->full_name) { |
84 | 84 | kfree(dn); |
85 | 85 | return NULL; |
86 | 86 | } |
87 | 87 | |
88 | - sprintf(dn->full_name, "/%s", name); | |
89 | 88 | return dn; |
90 | 89 | } |
91 | 90 | |
92 | 91 | |
... | ... | @@ -410,15 +409,13 @@ |
410 | 409 | * directory of the device tree. CPUs actually live in the |
411 | 410 | * cpus directory so we need to fixup the full_name. |
412 | 411 | */ |
413 | - cpu_name = kzalloc(strlen(dn->full_name) + strlen("/cpus") + 1, | |
414 | - GFP_KERNEL); | |
412 | + cpu_name = kasprintf(GFP_KERNEL, "/cpus%s", dn->full_name); | |
415 | 413 | if (!cpu_name) { |
416 | 414 | dlpar_free_cc_nodes(dn); |
417 | 415 | rc = -ENOMEM; |
418 | 416 | goto out; |
419 | 417 | } |
420 | 418 | |
421 | - sprintf(cpu_name, "/cpus%s", dn->full_name); | |
422 | 419 | kfree(dn->full_name); |
423 | 420 | dn->full_name = cpu_name; |
424 | 421 | |
... | ... | @@ -433,6 +430,7 @@ |
433 | 430 | if (rc) { |
434 | 431 | dlpar_release_drc(drc_index); |
435 | 432 | dlpar_free_cc_nodes(dn); |
433 | + goto out; | |
436 | 434 | } |
437 | 435 | |
438 | 436 | rc = dlpar_online_cpu(dn); |
arch/powerpc/platforms/pseries/eeh.c
arch/powerpc/platforms/pseries/event_sources.c
1 | +/* | |
2 | + * Copyright (C) 2001 Dave Engebretsen IBM Corporation | |
3 | + * | |
4 | + * This program is free software; you can redistribute it and/or modify | |
5 | + * it under the terms of the GNU General Public License as published by | |
6 | + * the Free Software Foundation; either version 2 of the License, or | |
7 | + * (at your option) any later version. | |
8 | + * | |
9 | + * This program is distributed in the hope that it will be useful, | |
10 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | + * GNU General Public License for more details. | |
13 | + * | |
14 | + * You should have received a copy of the GNU General Public License | |
15 | + * along with this program; if not, write to the Free Software | |
16 | + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
17 | + */ | |
18 | + | |
19 | +#include <asm/prom.h> | |
20 | + | |
21 | +#include "pseries.h" | |
22 | + | |
23 | +void request_event_sources_irqs(struct device_node *np, | |
24 | + irq_handler_t handler, | |
25 | + const char *name) | |
26 | +{ | |
27 | + int i, index, count = 0; | |
28 | + struct of_irq oirq; | |
29 | + const u32 *opicprop; | |
30 | + unsigned int opicplen; | |
31 | + unsigned int virqs[16]; | |
32 | + | |
33 | + /* Check for obsolete "open-pic-interrupt" property. If present, then | |
34 | + * map those interrupts using the default interrupt host and default | |
35 | + * trigger | |
36 | + */ | |
37 | + opicprop = of_get_property(np, "open-pic-interrupt", &opicplen); | |
38 | + if (opicprop) { | |
39 | + opicplen /= sizeof(u32); | |
40 | + for (i = 0; i < opicplen; i++) { | |
41 | + if (count > 15) | |
42 | + break; | |
43 | + virqs[count] = irq_create_mapping(NULL, *(opicprop++)); | |
44 | + if (virqs[count] == NO_IRQ) | |
45 | + printk(KERN_ERR "Unable to allocate interrupt " | |
46 | + "number for %s\n", np->full_name); | |
47 | + else | |
48 | + count++; | |
49 | + | |
50 | + } | |
51 | + } | |
52 | + /* Else use normal interrupt tree parsing */ | |
53 | + else { | |
54 | + /* First try to do a proper OF tree parsing */ | |
55 | + for (index = 0; of_irq_map_one(np, index, &oirq) == 0; | |
56 | + index++) { | |
57 | + if (count > 15) | |
58 | + break; | |
59 | + virqs[count] = irq_create_of_mapping(oirq.controller, | |
60 | + oirq.specifier, | |
61 | + oirq.size); | |
62 | + if (virqs[count] == NO_IRQ) | |
63 | + printk(KERN_ERR "Unable to allocate interrupt " | |
64 | + "number for %s\n", np->full_name); | |
65 | + else | |
66 | + count++; | |
67 | + } | |
68 | + } | |
69 | + | |
70 | + /* Now request them */ | |
71 | + for (i = 0; i < count; i++) { | |
72 | + if (request_irq(virqs[i], handler, 0, name, NULL)) { | |
73 | + printk(KERN_ERR "Unable to request interrupt %d for " | |
74 | + "%s\n", virqs[i], np->full_name); | |
75 | + return; | |
76 | + } | |
77 | + } | |
78 | +} |
arch/powerpc/platforms/pseries/hotplug-cpu.c
... | ... | @@ -154,30 +154,6 @@ |
154 | 154 | for(;;); |
155 | 155 | } |
156 | 156 | |
157 | -static int qcss_tok; /* query-cpu-stopped-state token */ | |
158 | - | |
159 | -/* Get state of physical CPU. | |
160 | - * Return codes: | |
161 | - * 0 - The processor is in the RTAS stopped state | |
162 | - * 1 - stop-self is in progress | |
163 | - * 2 - The processor is not in the RTAS stopped state | |
164 | - * -1 - Hardware Error | |
165 | - * -2 - Hardware Busy, Try again later. | |
166 | - */ | |
167 | -static int query_cpu_stopped(unsigned int pcpu) | |
168 | -{ | |
169 | - int cpu_status, status; | |
170 | - | |
171 | - status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu); | |
172 | - if (status != 0) { | |
173 | - printk(KERN_ERR | |
174 | - "RTAS query-cpu-stopped-state failed: %i\n", status); | |
175 | - return status; | |
176 | - } | |
177 | - | |
178 | - return cpu_status; | |
179 | -} | |
180 | - | |
181 | 157 | static int pseries_cpu_disable(void) |
182 | 158 | { |
183 | 159 | int cpu = smp_processor_id(); |
... | ... | @@ -187,7 +163,7 @@ |
187 | 163 | |
188 | 164 | /*fix boot_cpuid here*/ |
189 | 165 | if (cpu == boot_cpuid) |
190 | - boot_cpuid = any_online_cpu(cpu_online_map); | |
166 | + boot_cpuid = cpumask_any(cpu_online_mask); | |
191 | 167 | |
192 | 168 | /* FIXME: abstract this to not be platform specific later on */ |
193 | 169 | xics_migrate_irqs_away(); |
... | ... | @@ -224,8 +200,9 @@ |
224 | 200 | } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { |
225 | 201 | |
226 | 202 | for (tries = 0; tries < 25; tries++) { |
227 | - cpu_status = query_cpu_stopped(pcpu); | |
228 | - if (cpu_status == 0 || cpu_status == -1) | |
203 | + cpu_status = smp_query_cpu_stopped(pcpu); | |
204 | + if (cpu_status == QCSS_STOPPED || | |
205 | + cpu_status == QCSS_HARDWARE_ERROR) | |
229 | 206 | break; |
230 | 207 | cpu_relax(); |
231 | 208 | } |
... | ... | @@ -245,7 +222,7 @@ |
245 | 222 | } |
246 | 223 | |
247 | 224 | /* |
248 | - * Update cpu_present_map and paca(s) for a new cpu node. The wrinkle | |
225 | + * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle | |
249 | 226 | * here is that a cpu device node may represent up to two logical cpus |
250 | 227 | * in the SMT case. We must honor the assumption in other code that |
251 | 228 | * the logical ids for sibling SMT threads x and y are adjacent, such |
... | ... | @@ -254,7 +231,7 @@ |
254 | 231 | static int pseries_add_processor(struct device_node *np) |
255 | 232 | { |
256 | 233 | unsigned int cpu; |
257 | - cpumask_t candidate_map, tmp = CPU_MASK_NONE; | |
234 | + cpumask_var_t candidate_mask, tmp; | |
258 | 235 | int err = -ENOSPC, len, nthreads, i; |
259 | 236 | const u32 *intserv; |
260 | 237 | |
261 | 238 | |
262 | 239 | |
263 | 240 | |
264 | 241 | |
265 | 242 | |
266 | 243 | |
267 | 244 | |
268 | 245 | |
269 | 246 | |
... | ... | @@ -262,48 +239,53 @@ |
262 | 239 | if (!intserv) |
263 | 240 | return 0; |
264 | 241 | |
242 | + zalloc_cpumask_var(&candidate_mask, GFP_KERNEL); | |
243 | + zalloc_cpumask_var(&tmp, GFP_KERNEL); | |
244 | + | |
265 | 245 | nthreads = len / sizeof(u32); |
266 | 246 | for (i = 0; i < nthreads; i++) |
267 | - cpu_set(i, tmp); | |
247 | + cpumask_set_cpu(i, tmp); | |
268 | 248 | |
269 | 249 | cpu_maps_update_begin(); |
270 | 250 | |
271 | - BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map)); | |
251 | + BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask)); | |
272 | 252 | |
273 | 253 | /* Get a bitmap of unoccupied slots. */ |
274 | - cpus_xor(candidate_map, cpu_possible_map, cpu_present_map); | |
275 | - if (cpus_empty(candidate_map)) { | |
254 | + cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask); | |
255 | + if (cpumask_empty(candidate_mask)) { | |
276 | 256 | /* If we get here, it most likely means that NR_CPUS is |
277 | 257 | * less than the partition's max processors setting. |
278 | 258 | */ |
279 | 259 | printk(KERN_ERR "Cannot add cpu %s; this system configuration" |
280 | 260 | " supports %d logical cpus.\n", np->full_name, |
281 | - cpus_weight(cpu_possible_map)); | |
261 | + cpumask_weight(cpu_possible_mask)); | |
282 | 262 | goto out_unlock; |
283 | 263 | } |
284 | 264 | |
285 | - while (!cpus_empty(tmp)) | |
286 | - if (cpus_subset(tmp, candidate_map)) | |
265 | + while (!cpumask_empty(tmp)) | |
266 | + if (cpumask_subset(tmp, candidate_mask)) | |
287 | 267 | /* Found a range where we can insert the new cpu(s) */ |
288 | 268 | break; |
289 | 269 | else |
290 | - cpus_shift_left(tmp, tmp, nthreads); | |
270 | + cpumask_shift_left(tmp, tmp, nthreads); | |
291 | 271 | |
292 | - if (cpus_empty(tmp)) { | |
293 | - printk(KERN_ERR "Unable to find space in cpu_present_map for" | |
272 | + if (cpumask_empty(tmp)) { | |
273 | + printk(KERN_ERR "Unable to find space in cpu_present_mask for" | |
294 | 274 | " processor %s with %d thread(s)\n", np->name, |
295 | 275 | nthreads); |
296 | 276 | goto out_unlock; |
297 | 277 | } |
298 | 278 | |
299 | - for_each_cpu_mask(cpu, tmp) { | |
300 | - BUG_ON(cpu_isset(cpu, cpu_present_map)); | |
279 | + for_each_cpu(cpu, tmp) { | |
280 | + BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask)); | |
301 | 281 | set_cpu_present(cpu, true); |
302 | 282 | set_hard_smp_processor_id(cpu, *intserv++); |
303 | 283 | } |
304 | 284 | err = 0; |
305 | 285 | out_unlock: |
306 | 286 | cpu_maps_update_done(); |
287 | + free_cpumask_var(candidate_mask); | |
288 | + free_cpumask_var(tmp); | |
307 | 289 | return err; |
308 | 290 | } |
309 | 291 | |
... | ... | @@ -334,7 +316,7 @@ |
334 | 316 | set_hard_smp_processor_id(cpu, -1); |
335 | 317 | break; |
336 | 318 | } |
337 | - if (cpu == NR_CPUS) | |
319 | + if (cpu >= nr_cpu_ids) | |
338 | 320 | printk(KERN_WARNING "Could not find cpu to remove " |
339 | 321 | "with physical id 0x%x\n", intserv[i]); |
340 | 322 | } |
... | ... | @@ -388,6 +370,7 @@ |
388 | 370 | struct device_node *np; |
389 | 371 | const char *typep; |
390 | 372 | int cpu; |
373 | + int qcss_tok; | |
391 | 374 | |
392 | 375 | for_each_node_by_name(np, "interrupt-controller") { |
393 | 376 | typep = of_get_property(np, "compatible", NULL); |
arch/powerpc/platforms/pseries/hvCall.S
... | ... | @@ -228,4 +228,42 @@ |
228 | 228 | mtcrf 0xff,r0 |
229 | 229 | |
230 | 230 | blr /* return r3 = status */ |
231 | + | |
232 | +/* See plpar_hcall_raw to see why this is needed */ | |
233 | +_GLOBAL(plpar_hcall9_raw) | |
234 | + HMT_MEDIUM | |
235 | + | |
236 | + mfcr r0 | |
237 | + stw r0,8(r1) | |
238 | + | |
239 | + std r4,STK_PARM(r4)(r1) /* Save ret buffer */ | |
240 | + | |
241 | + mr r4,r5 | |
242 | + mr r5,r6 | |
243 | + mr r6,r7 | |
244 | + mr r7,r8 | |
245 | + mr r8,r9 | |
246 | + mr r9,r10 | |
247 | + ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */ | |
248 | + ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */ | |
249 | + ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */ | |
250 | + | |
251 | + HVSC /* invoke the hypervisor */ | |
252 | + | |
253 | + mr r0,r12 | |
254 | + ld r12,STK_PARM(r4)(r1) | |
255 | + std r4, 0(r12) | |
256 | + std r5, 8(r12) | |
257 | + std r6, 16(r12) | |
258 | + std r7, 24(r12) | |
259 | + std r8, 32(r12) | |
260 | + std r9, 40(r12) | |
261 | + std r10,48(r12) | |
262 | + std r11,56(r12) | |
263 | + std r0, 64(r12) | |
264 | + | |
265 | + lwz r0,8(r1) | |
266 | + mtcrf 0xff,r0 | |
267 | + | |
268 | + blr /* return r3 = status */ |
arch/powerpc/platforms/pseries/lpar.c
... | ... | @@ -367,21 +367,28 @@ |
367 | 367 | { |
368 | 368 | unsigned long size_bytes = 1UL << ppc64_pft_size; |
369 | 369 | unsigned long hpte_count = size_bytes >> 4; |
370 | - unsigned long dummy1, dummy2, dword0; | |
370 | + struct { | |
371 | + unsigned long pteh; | |
372 | + unsigned long ptel; | |
373 | + } ptes[4]; | |
371 | 374 | long lpar_rc; |
372 | - int i; | |
375 | + int i, j; | |
373 | 376 | |
374 | - /* TODO: Use bulk call */ | |
375 | - for (i = 0; i < hpte_count; i++) { | |
376 | - /* dont remove HPTEs with VRMA mappings */ | |
377 | - lpar_rc = plpar_pte_remove_raw(H_ANDCOND, i, HPTE_V_1TB_SEG, | |
378 | - &dummy1, &dummy2); | |
379 | - if (lpar_rc == H_NOT_FOUND) { | |
380 | - lpar_rc = plpar_pte_read_raw(0, i, &dword0, &dummy1); | |
381 | - if (!lpar_rc && ((dword0 & HPTE_V_VRMA_MASK) | |
382 | - != HPTE_V_VRMA_MASK)) | |
383 | - /* Can be hpte for 1TB Seg. So remove it */ | |
384 | - plpar_pte_remove_raw(0, i, 0, &dummy1, &dummy2); | |
377 | + /* Read in batches of 4, | |
378 | + * invalidate only valid entries not in the VRMA | |
379 | + * hpte_count will be a multiple of 4 | |
380 | + */ | |
381 | + for (i = 0; i < hpte_count; i += 4) { | |
382 | + lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes); | |
383 | + if (lpar_rc != H_SUCCESS) | |
384 | + continue; | |
385 | + for (j = 0; j < 4; j++){ | |
386 | + if ((ptes[j].pteh & HPTE_V_VRMA_MASK) == | |
387 | + HPTE_V_VRMA_MASK) | |
388 | + continue; | |
389 | + if (ptes[j].pteh & HPTE_V_VALID) | |
390 | + plpar_pte_remove_raw(0, i + j, 0, | |
391 | + &(ptes[j].pteh), &(ptes[j].ptel)); | |
385 | 392 | } |
386 | 393 | } |
387 | 394 | } |
arch/powerpc/platforms/pseries/plpar_wrappers.h
... | ... | @@ -4,6 +4,14 @@ |
4 | 4 | #include <asm/hvcall.h> |
5 | 5 | #include <asm/page.h> |
6 | 6 | |
7 | +/* Get state of physical CPU from query_cpu_stopped */ | |
8 | +int smp_query_cpu_stopped(unsigned int pcpu); | |
9 | +#define QCSS_STOPPED 0 | |
10 | +#define QCSS_STOPPING 1 | |
11 | +#define QCSS_NOT_STOPPED 2 | |
12 | +#define QCSS_HARDWARE_ERROR -1 | |
13 | +#define QCSS_HARDWARE_BUSY -2 | |
14 | + | |
7 | 15 | static inline long poll_pending(void) |
8 | 16 | { |
9 | 17 | return plpar_hcall_norets(H_POLL_PENDING); |
... | ... | @@ -179,6 +187,24 @@ |
179 | 187 | |
180 | 188 | *old_pteh_ret = retbuf[0]; |
181 | 189 | *old_ptel_ret = retbuf[1]; |
190 | + | |
191 | + return rc; | |
192 | +} | |
193 | + | |
194 | +/* | |
195 | + * plpar_pte_read_4_raw can be called in real mode. | |
196 | + * ptes must be 8*sizeof(unsigned long) | |
197 | + */ | |
198 | +static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex, | |
199 | + unsigned long *ptes) | |
200 | + | |
201 | +{ | |
202 | + long rc; | |
203 | + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | |
204 | + | |
205 | + rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex); | |
206 | + | |
207 | + memcpy(ptes, retbuf, 8*sizeof(unsigned long)); | |
182 | 208 | |
183 | 209 | return rc; |
184 | 210 | } |
arch/powerpc/platforms/pseries/pseries.h
... | ... | @@ -10,6 +10,13 @@ |
10 | 10 | #ifndef _PSERIES_PSERIES_H |
11 | 11 | #define _PSERIES_PSERIES_H |
12 | 12 | |
13 | +#include <linux/interrupt.h> | |
14 | + | |
15 | +struct device_node; | |
16 | + | |
17 | +extern void request_event_sources_irqs(struct device_node *np, | |
18 | + irq_handler_t handler, const char *name); | |
19 | + | |
13 | 20 | extern void __init fw_feature_init(const char *hypertas, unsigned long len); |
14 | 21 | |
15 | 22 | struct pt_regs; |
arch/powerpc/platforms/pseries/ras.c
... | ... | @@ -67,63 +67,6 @@ |
67 | 67 | static irqreturn_t ras_error_interrupt(int irq, void *dev_id); |
68 | 68 | |
69 | 69 | |
70 | -static void request_ras_irqs(struct device_node *np, | |
71 | - irq_handler_t handler, | |
72 | - const char *name) | |
73 | -{ | |
74 | - int i, index, count = 0; | |
75 | - struct of_irq oirq; | |
76 | - const u32 *opicprop; | |
77 | - unsigned int opicplen; | |
78 | - unsigned int virqs[16]; | |
79 | - | |
80 | - /* Check for obsolete "open-pic-interrupt" property. If present, then | |
81 | - * map those interrupts using the default interrupt host and default | |
82 | - * trigger | |
83 | - */ | |
84 | - opicprop = of_get_property(np, "open-pic-interrupt", &opicplen); | |
85 | - if (opicprop) { | |
86 | - opicplen /= sizeof(u32); | |
87 | - for (i = 0; i < opicplen; i++) { | |
88 | - if (count > 15) | |
89 | - break; | |
90 | - virqs[count] = irq_create_mapping(NULL, *(opicprop++)); | |
91 | - if (virqs[count] == NO_IRQ) | |
92 | - printk(KERN_ERR "Unable to allocate interrupt " | |
93 | - "number for %s\n", np->full_name); | |
94 | - else | |
95 | - count++; | |
96 | - | |
97 | - } | |
98 | - } | |
99 | - /* Else use normal interrupt tree parsing */ | |
100 | - else { | |
101 | - /* First try to do a proper OF tree parsing */ | |
102 | - for (index = 0; of_irq_map_one(np, index, &oirq) == 0; | |
103 | - index++) { | |
104 | - if (count > 15) | |
105 | - break; | |
106 | - virqs[count] = irq_create_of_mapping(oirq.controller, | |
107 | - oirq.specifier, | |
108 | - oirq.size); | |
109 | - if (virqs[count] == NO_IRQ) | |
110 | - printk(KERN_ERR "Unable to allocate interrupt " | |
111 | - "number for %s\n", np->full_name); | |
112 | - else | |
113 | - count++; | |
114 | - } | |
115 | - } | |
116 | - | |
117 | - /* Now request them */ | |
118 | - for (i = 0; i < count; i++) { | |
119 | - if (request_irq(virqs[i], handler, 0, name, NULL)) { | |
120 | - printk(KERN_ERR "Unable to request interrupt %d for " | |
121 | - "%s\n", virqs[i], np->full_name); | |
122 | - return; | |
123 | - } | |
124 | - } | |
125 | -} | |
126 | - | |
127 | 70 | /* |
128 | 71 | * Initialize handlers for the set of interrupts caused by hardware errors |
129 | 72 | * and power system events. |
130 | 73 | |
... | ... | @@ -138,14 +81,15 @@ |
138 | 81 | /* Internal Errors */ |
139 | 82 | np = of_find_node_by_path("/event-sources/internal-errors"); |
140 | 83 | if (np != NULL) { |
141 | - request_ras_irqs(np, ras_error_interrupt, "RAS_ERROR"); | |
84 | + request_event_sources_irqs(np, ras_error_interrupt, | |
85 | + "RAS_ERROR"); | |
142 | 86 | of_node_put(np); |
143 | 87 | } |
144 | 88 | |
145 | 89 | /* EPOW Events */ |
146 | 90 | np = of_find_node_by_path("/event-sources/epow-events"); |
147 | 91 | if (np != NULL) { |
148 | - request_ras_irqs(np, ras_epow_interrupt, "RAS_EPOW"); | |
92 | + request_event_sources_irqs(np, ras_epow_interrupt, "RAS_EPOW"); | |
149 | 93 | of_node_put(np); |
150 | 94 | } |
151 | 95 |
arch/powerpc/platforms/pseries/setup.c
... | ... | @@ -496,13 +496,14 @@ |
496 | 496 | } |
497 | 497 | |
498 | 498 | |
499 | -DECLARE_PER_CPU(unsigned long, smt_snooze_delay); | |
499 | +DECLARE_PER_CPU(long, smt_snooze_delay); | |
500 | 500 | |
501 | 501 | static void pseries_dedicated_idle_sleep(void) |
502 | 502 | { |
503 | 503 | unsigned int cpu = smp_processor_id(); |
504 | 504 | unsigned long start_snooze; |
505 | 505 | unsigned long in_purr, out_purr; |
506 | + long snooze = __get_cpu_var(smt_snooze_delay); | |
506 | 507 | |
507 | 508 | /* |
508 | 509 | * Indicate to the HV that we are idle. Now would be |
509 | 510 | |
... | ... | @@ -517,13 +518,12 @@ |
517 | 518 | * has been checked recently. If we should poll for a little |
518 | 519 | * while, do so. |
519 | 520 | */ |
520 | - if (__get_cpu_var(smt_snooze_delay)) { | |
521 | - start_snooze = get_tb() + | |
522 | - __get_cpu_var(smt_snooze_delay) * tb_ticks_per_usec; | |
521 | + if (snooze) { | |
522 | + start_snooze = get_tb() + snooze * tb_ticks_per_usec; | |
523 | 523 | local_irq_enable(); |
524 | 524 | set_thread_flag(TIF_POLLING_NRFLAG); |
525 | 525 | |
526 | - while (get_tb() < start_snooze) { | |
526 | + while ((snooze < 0) || (get_tb() < start_snooze)) { | |
527 | 527 | if (need_resched() || cpu_is_offline(cpu)) |
528 | 528 | goto out; |
529 | 529 | ppc64_runlatch_off(); |
arch/powerpc/platforms/pseries/smp.c
... | ... | @@ -55,8 +55,30 @@ |
55 | 55 | * The Primary thread of each non-boot processor was started from the OF client |
56 | 56 | * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop. |
57 | 57 | */ |
58 | -static cpumask_t of_spin_map; | |
58 | +static cpumask_var_t of_spin_mask; | |
59 | 59 | |
60 | +/* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */ | |
61 | +int smp_query_cpu_stopped(unsigned int pcpu) | |
62 | +{ | |
63 | + int cpu_status, status; | |
64 | + int qcss_tok = rtas_token("query-cpu-stopped-state"); | |
65 | + | |
66 | + if (qcss_tok == RTAS_UNKNOWN_SERVICE) { | |
67 | + printk(KERN_INFO "Firmware doesn't support " | |
68 | + "query-cpu-stopped-state\n"); | |
69 | + return QCSS_HARDWARE_ERROR; | |
70 | + } | |
71 | + | |
72 | + status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu); | |
73 | + if (status != 0) { | |
74 | + printk(KERN_ERR | |
75 | + "RTAS query-cpu-stopped-state failed: %i\n", status); | |
76 | + return status; | |
77 | + } | |
78 | + | |
79 | + return cpu_status; | |
80 | +} | |
81 | + | |
60 | 82 | /** |
61 | 83 | * smp_startup_cpu() - start the given cpu |
62 | 84 | * |
63 | 85 | |
... | ... | @@ -76,12 +98,18 @@ |
76 | 98 | unsigned int pcpu; |
77 | 99 | int start_cpu; |
78 | 100 | |
79 | - if (cpu_isset(lcpu, of_spin_map)) | |
101 | + if (cpumask_test_cpu(lcpu, of_spin_mask)) | |
80 | 102 | /* Already started by OF and sitting in spin loop */ |
81 | 103 | return 1; |
82 | 104 | |
83 | 105 | pcpu = get_hard_smp_processor_id(lcpu); |
84 | 106 | |
107 | + /* Check to see if the CPU out of FW already for kexec */ | |
108 | + if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){ | |
109 | + cpumask_set_cpu(lcpu, of_spin_mask); | |
110 | + return 1; | |
111 | + } | |
112 | + | |
85 | 113 | /* Fixup atomic count: it exited inside IRQ handler. */ |
86 | 114 | task_thread_info(paca[lcpu].__current)->preempt_count = 0; |
87 | 115 | |
... | ... | @@ -115,7 +143,7 @@ |
115 | 143 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) |
116 | 144 | vpa_init(cpu); |
117 | 145 | |
118 | - cpu_clear(cpu, of_spin_map); | |
146 | + cpumask_clear_cpu(cpu, of_spin_mask); | |
119 | 147 | set_cpu_current_state(cpu, CPU_STATE_ONLINE); |
120 | 148 | set_default_offline_state(cpu); |
121 | 149 | |
122 | 150 | |
123 | 151 | |
124 | 152 | |
... | ... | @@ -186,17 +214,19 @@ |
186 | 214 | |
187 | 215 | pr_debug(" -> smp_init_pSeries()\n"); |
188 | 216 | |
217 | + alloc_bootmem_cpumask_var(&of_spin_mask); | |
218 | + | |
189 | 219 | /* Mark threads which are still spinning in hold loops. */ |
190 | 220 | if (cpu_has_feature(CPU_FTR_SMT)) { |
191 | 221 | for_each_present_cpu(i) { |
192 | 222 | if (cpu_thread_in_core(i) == 0) |
193 | - cpu_set(i, of_spin_map); | |
223 | + cpumask_set_cpu(i, of_spin_mask); | |
194 | 224 | } |
195 | 225 | } else { |
196 | - of_spin_map = cpu_present_map; | |
226 | + cpumask_copy(of_spin_mask, cpu_present_mask); | |
197 | 227 | } |
198 | 228 | |
199 | - cpu_clear(boot_cpuid, of_spin_map); | |
229 | + cpumask_clear_cpu(boot_cpuid, of_spin_mask); | |
200 | 230 | |
201 | 231 | /* Non-lpar has additional take/give timebase */ |
202 | 232 | if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { |
arch/powerpc/platforms/pseries/xics.c
... | ... | @@ -163,29 +163,37 @@ |
163 | 163 | /* Interface to generic irq subsystem */ |
164 | 164 | |
165 | 165 | #ifdef CONFIG_SMP |
166 | -static int get_irq_server(unsigned int virq, cpumask_t cpumask, | |
166 | +/* | |
167 | + * For the moment we only implement delivery to all cpus or one cpu. | |
168 | + * | |
169 | + * If the requested affinity is cpu_all_mask, we set global affinity. | |
170 | + * If not we set it to the first cpu in the mask, even if multiple cpus | |
171 | + * are set. This is so things like irqbalance (which set core and package | |
172 | + * wide affinities) do the right thing. | |
173 | + */ | |
174 | +static int get_irq_server(unsigned int virq, const struct cpumask *cpumask, | |
167 | 175 | unsigned int strict_check) |
168 | 176 | { |
169 | - int server; | |
170 | - /* For the moment only implement delivery to all cpus or one cpu */ | |
171 | - cpumask_t tmp = CPU_MASK_NONE; | |
172 | 177 | |
173 | 178 | if (!distribute_irqs) |
174 | 179 | return default_server; |
175 | 180 | |
176 | - if (!cpus_equal(cpumask, CPU_MASK_ALL)) { | |
177 | - cpus_and(tmp, cpu_online_map, cpumask); | |
181 | + if (!cpumask_equal(cpumask, cpu_all_mask)) { | |
182 | + int server = cpumask_first_and(cpu_online_mask, cpumask); | |
178 | 183 | |
179 | - server = first_cpu(tmp); | |
180 | - | |
181 | - if (server < NR_CPUS) | |
184 | + if (server < nr_cpu_ids) | |
182 | 185 | return get_hard_smp_processor_id(server); |
183 | 186 | |
184 | 187 | if (strict_check) |
185 | 188 | return -1; |
186 | 189 | } |
187 | 190 | |
188 | - if (cpus_equal(cpu_online_map, cpu_present_map)) | |
191 | + /* | |
192 | + * Workaround issue with some versions of JS20 firmware that | |
193 | + * deliver interrupts to cpus which haven't been started. This | |
194 | + * happens when using the maxcpus= boot option. | |
195 | + */ | |
196 | + if (cpumask_equal(cpu_online_mask, cpu_present_mask)) | |
189 | 197 | return default_distrib_server; |
190 | 198 | |
191 | 199 | return default_server; |
... | ... | @@ -207,7 +215,7 @@ |
207 | 215 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) |
208 | 216 | return; |
209 | 217 | |
210 | - server = get_irq_server(virq, *(irq_to_desc(virq)->affinity), 0); | |
218 | + server = get_irq_server(virq, irq_to_desc(virq)->affinity, 0); | |
211 | 219 | |
212 | 220 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, |
213 | 221 | DEFAULT_PRIORITY); |
... | ... | @@ -398,11 +406,7 @@ |
398 | 406 | return -1; |
399 | 407 | } |
400 | 408 | |
401 | - /* | |
402 | - * For the moment only implement delivery to all cpus or one cpu. | |
403 | - * Get current irq_server for the given irq | |
404 | - */ | |
405 | - irq_server = get_irq_server(virq, *cpumask, 1); | |
409 | + irq_server = get_irq_server(virq, cpumask, 1); | |
406 | 410 | if (irq_server == -1) { |
407 | 411 | char cpulist[128]; |
408 | 412 | cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); |
... | ... | @@ -611,7 +615,7 @@ |
611 | 615 | { |
612 | 616 | xics_request_ipi(); |
613 | 617 | |
614 | - return cpus_weight(cpu_possible_map); | |
618 | + return cpumask_weight(cpu_possible_mask); | |
615 | 619 | } |
616 | 620 | |
617 | 621 | #endif /* CONFIG_SMP */ |
arch/powerpc/sysdev/mpc8xxx_gpio.c
... | ... | @@ -16,6 +16,7 @@ |
16 | 16 | #include <linux/of_gpio.h> |
17 | 17 | #include <linux/gpio.h> |
18 | 18 | #include <linux/slab.h> |
19 | +#include <linux/irq.h> | |
19 | 20 | |
20 | 21 | #define MPC8XXX_GPIO_PINS 32 |
21 | 22 | |
... | ... | @@ -35,6 +36,7 @@ |
35 | 36 | * open drain mode safely |
36 | 37 | */ |
37 | 38 | u32 data; |
39 | + struct irq_host *irq; | |
38 | 40 | }; |
39 | 41 | |
40 | 42 | static inline u32 mpc8xxx_gpio2mask(unsigned int gpio) |
41 | 43 | |
... | ... | @@ -128,12 +130,136 @@ |
128 | 130 | return 0; |
129 | 131 | } |
130 | 132 | |
133 | +static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset) | |
134 | +{ | |
135 | + struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); | |
136 | + struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); | |
137 | + | |
138 | + if (mpc8xxx_gc->irq && offset < MPC8XXX_GPIO_PINS) | |
139 | + return irq_create_mapping(mpc8xxx_gc->irq, offset); | |
140 | + else | |
141 | + return -ENXIO; | |
142 | +} | |
143 | + | |
144 | +static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc) | |
145 | +{ | |
146 | + struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_desc_data(desc); | |
147 | + struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; | |
148 | + unsigned int mask; | |
149 | + | |
150 | + mask = in_be32(mm->regs + GPIO_IER) & in_be32(mm->regs + GPIO_IMR); | |
151 | + if (mask) | |
152 | + generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq, | |
153 | + 32 - ffs(mask))); | |
154 | +} | |
155 | + | |
156 | +static void mpc8xxx_irq_unmask(unsigned int virq) | |
157 | +{ | |
158 | + struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq); | |
159 | + struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; | |
160 | + unsigned long flags; | |
161 | + | |
162 | + spin_lock_irqsave(&mpc8xxx_gc->lock, flags); | |
163 | + | |
164 | + setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(virq))); | |
165 | + | |
166 | + spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); | |
167 | +} | |
168 | + | |
169 | +static void mpc8xxx_irq_mask(unsigned int virq) | |
170 | +{ | |
171 | + struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq); | |
172 | + struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; | |
173 | + unsigned long flags; | |
174 | + | |
175 | + spin_lock_irqsave(&mpc8xxx_gc->lock, flags); | |
176 | + | |
177 | + clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(virq))); | |
178 | + | |
179 | + spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); | |
180 | +} | |
181 | + | |
182 | +static void mpc8xxx_irq_ack(unsigned int virq) | |
183 | +{ | |
184 | + struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq); | |
185 | + struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; | |
186 | + | |
187 | + out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(virq_to_hw(virq))); | |
188 | +} | |
189 | + | |
190 | +static int mpc8xxx_irq_set_type(unsigned int virq, unsigned int flow_type) | |
191 | +{ | |
192 | + struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq); | |
193 | + struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; | |
194 | + unsigned long flags; | |
195 | + | |
196 | + switch (flow_type) { | |
197 | + case IRQ_TYPE_EDGE_FALLING: | |
198 | + spin_lock_irqsave(&mpc8xxx_gc->lock, flags); | |
199 | + setbits32(mm->regs + GPIO_ICR, | |
200 | + mpc8xxx_gpio2mask(virq_to_hw(virq))); | |
201 | + spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); | |
202 | + break; | |
203 | + | |
204 | + case IRQ_TYPE_EDGE_BOTH: | |
205 | + spin_lock_irqsave(&mpc8xxx_gc->lock, flags); | |
206 | + clrbits32(mm->regs + GPIO_ICR, | |
207 | + mpc8xxx_gpio2mask(virq_to_hw(virq))); | |
208 | + spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); | |
209 | + break; | |
210 | + | |
211 | + default: | |
212 | + return -EINVAL; | |
213 | + } | |
214 | + | |
215 | + return 0; | |
216 | +} | |
217 | + | |
218 | +static struct irq_chip mpc8xxx_irq_chip = { | |
219 | + .name = "mpc8xxx-gpio", | |
220 | + .unmask = mpc8xxx_irq_unmask, | |
221 | + .mask = mpc8xxx_irq_mask, | |
222 | + .ack = mpc8xxx_irq_ack, | |
223 | + .set_type = mpc8xxx_irq_set_type, | |
224 | +}; | |
225 | + | |
226 | +static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq, | |
227 | + irq_hw_number_t hw) | |
228 | +{ | |
229 | + set_irq_chip_data(virq, h->host_data); | |
230 | + set_irq_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq); | |
231 | + set_irq_type(virq, IRQ_TYPE_NONE); | |
232 | + | |
233 | + return 0; | |
234 | +} | |
235 | + | |
236 | +static int mpc8xxx_gpio_irq_xlate(struct irq_host *h, struct device_node *ct, | |
237 | + const u32 *intspec, unsigned int intsize, | |
238 | + irq_hw_number_t *out_hwirq, | |
239 | + unsigned int *out_flags) | |
240 | + | |
241 | +{ | |
242 | + /* interrupt sense values coming from the device tree equal either | |
243 | + * EDGE_FALLING or EDGE_BOTH | |
244 | + */ | |
245 | + *out_hwirq = intspec[0]; | |
246 | + *out_flags = intspec[1]; | |
247 | + | |
248 | + return 0; | |
249 | +} | |
250 | + | |
251 | +static struct irq_host_ops mpc8xxx_gpio_irq_ops = { | |
252 | + .map = mpc8xxx_gpio_irq_map, | |
253 | + .xlate = mpc8xxx_gpio_irq_xlate, | |
254 | +}; | |
255 | + | |
131 | 256 | static void __init mpc8xxx_add_controller(struct device_node *np) |
132 | 257 | { |
133 | 258 | struct mpc8xxx_gpio_chip *mpc8xxx_gc; |
134 | 259 | struct of_mm_gpio_chip *mm_gc; |
135 | 260 | struct of_gpio_chip *of_gc; |
136 | 261 | struct gpio_chip *gc; |
262 | + unsigned hwirq; | |
137 | 263 | int ret; |
138 | 264 | |
139 | 265 | mpc8xxx_gc = kzalloc(sizeof(*mpc8xxx_gc), GFP_KERNEL); |
140 | 266 | |
... | ... | @@ -158,11 +284,32 @@ |
158 | 284 | else |
159 | 285 | gc->get = mpc8xxx_gpio_get; |
160 | 286 | gc->set = mpc8xxx_gpio_set; |
287 | + gc->to_irq = mpc8xxx_gpio_to_irq; | |
161 | 288 | |
162 | 289 | ret = of_mm_gpiochip_add(np, mm_gc); |
163 | 290 | if (ret) |
164 | 291 | goto err; |
165 | 292 | |
293 | + hwirq = irq_of_parse_and_map(np, 0); | |
294 | + if (hwirq == NO_IRQ) | |
295 | + goto skip_irq; | |
296 | + | |
297 | + mpc8xxx_gc->irq = | |
298 | + irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, MPC8XXX_GPIO_PINS, | |
299 | + &mpc8xxx_gpio_irq_ops, MPC8XXX_GPIO_PINS); | |
300 | + if (!mpc8xxx_gc->irq) | |
301 | + goto skip_irq; | |
302 | + | |
303 | + mpc8xxx_gc->irq->host_data = mpc8xxx_gc; | |
304 | + | |
305 | + /* ack and mask all irqs */ | |
306 | + out_be32(mm_gc->regs + GPIO_IER, 0xffffffff); | |
307 | + out_be32(mm_gc->regs + GPIO_IMR, 0); | |
308 | + | |
309 | + set_irq_data(hwirq, mpc8xxx_gc); | |
310 | + set_irq_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade); | |
311 | + | |
312 | +skip_irq: | |
166 | 313 | return; |
167 | 314 | |
168 | 315 | err: |
arch/powerpc/sysdev/mpic.c
... | ... | @@ -568,12 +568,12 @@ |
568 | 568 | #endif /* CONFIG_MPIC_U3_HT_IRQS */ |
569 | 569 | |
570 | 570 | #ifdef CONFIG_SMP |
571 | -static int irq_choose_cpu(const cpumask_t *mask) | |
571 | +static int irq_choose_cpu(const struct cpumask *mask) | |
572 | 572 | { |
573 | 573 | int cpuid; |
574 | 574 | |
575 | 575 | if (cpumask_equal(mask, cpu_all_mask)) { |
576 | - static int irq_rover; | |
576 | + static int irq_rover = 0; | |
577 | 577 | static DEFINE_RAW_SPINLOCK(irq_rover_lock); |
578 | 578 | unsigned long flags; |
579 | 579 | |
580 | 580 | |
... | ... | @@ -581,15 +581,11 @@ |
581 | 581 | do_round_robin: |
582 | 582 | raw_spin_lock_irqsave(&irq_rover_lock, flags); |
583 | 583 | |
584 | - while (!cpu_online(irq_rover)) { | |
585 | - if (++irq_rover >= NR_CPUS) | |
586 | - irq_rover = 0; | |
587 | - } | |
584 | + irq_rover = cpumask_next(irq_rover, cpu_online_mask); | |
585 | + if (irq_rover >= nr_cpu_ids) | |
586 | + irq_rover = cpumask_first(cpu_online_mask); | |
587 | + | |
588 | 588 | cpuid = irq_rover; |
589 | - do { | |
590 | - if (++irq_rover >= NR_CPUS) | |
591 | - irq_rover = 0; | |
592 | - } while (!cpu_online(irq_rover)); | |
593 | 589 | |
594 | 590 | raw_spin_unlock_irqrestore(&irq_rover_lock, flags); |
595 | 591 | } else { |
... | ... | @@ -601,7 +597,7 @@ |
601 | 597 | return get_hard_smp_processor_id(cpuid); |
602 | 598 | } |
603 | 599 | #else |
604 | -static int irq_choose_cpu(const cpumask_t *mask) | |
600 | +static int irq_choose_cpu(const struct cpumask *mask) | |
605 | 601 | { |
606 | 602 | return hard_smp_processor_id(); |
607 | 603 | } |
608 | 604 | |
609 | 605 | |
610 | 606 | |
... | ... | @@ -814,12 +810,16 @@ |
814 | 810 | |
815 | 811 | mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); |
816 | 812 | } else { |
817 | - cpumask_t tmp; | |
813 | + cpumask_var_t tmp; | |
818 | 814 | |
819 | - cpumask_and(&tmp, cpumask, cpu_online_mask); | |
815 | + alloc_cpumask_var(&tmp, GFP_KERNEL); | |
820 | 816 | |
817 | + cpumask_and(tmp, cpumask, cpu_online_mask); | |
818 | + | |
821 | 819 | mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), |
822 | - mpic_physmask(cpus_addr(tmp)[0])); | |
820 | + mpic_physmask(cpumask_bits(tmp)[0])); | |
821 | + | |
822 | + free_cpumask_var(tmp); | |
823 | 823 | } |
824 | 824 | |
825 | 825 | return 0; |
... | ... | @@ -1479,21 +1479,6 @@ |
1479 | 1479 | } |
1480 | 1480 | |
1481 | 1481 | |
1482 | -void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask) | |
1483 | -{ | |
1484 | - struct mpic *mpic = mpic_primary; | |
1485 | - | |
1486 | - BUG_ON(mpic == NULL); | |
1487 | - | |
1488 | -#ifdef DEBUG_IPI | |
1489 | - DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no); | |
1490 | -#endif | |
1491 | - | |
1492 | - mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) + | |
1493 | - ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), | |
1494 | - mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0])); | |
1495 | -} | |
1496 | - | |
1497 | 1482 | static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg) |
1498 | 1483 | { |
1499 | 1484 | u32 src; |
1500 | 1485 | |
... | ... | @@ -1589,8 +1574,25 @@ |
1589 | 1574 | } |
1590 | 1575 | } |
1591 | 1576 | |
1577 | +static void mpic_send_ipi(unsigned int ipi_no, const struct cpumask *cpu_mask) | |
1578 | +{ | |
1579 | + struct mpic *mpic = mpic_primary; | |
1580 | + | |
1581 | + BUG_ON(mpic == NULL); | |
1582 | + | |
1583 | +#ifdef DEBUG_IPI | |
1584 | + DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no); | |
1585 | +#endif | |
1586 | + | |
1587 | + mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) + | |
1588 | + ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), | |
1589 | + mpic_physmask(cpumask_bits(cpu_mask)[0])); | |
1590 | +} | |
1591 | + | |
1592 | 1592 | void smp_mpic_message_pass(int target, int msg) |
1593 | 1593 | { |
1594 | + cpumask_var_t tmp; | |
1595 | + | |
1594 | 1596 | /* make sure we're sending something that translates to an IPI */ |
1595 | 1597 | if ((unsigned int)msg > 3) { |
1596 | 1598 | printk("SMP %d: smp_message_pass: unknown msg %d\n", |
1597 | 1599 | |
1598 | 1600 | |
... | ... | @@ -1599,13 +1601,17 @@ |
1599 | 1601 | } |
1600 | 1602 | switch (target) { |
1601 | 1603 | case MSG_ALL: |
1602 | - mpic_send_ipi(msg, 0xffffffff); | |
1604 | + mpic_send_ipi(msg, cpu_online_mask); | |
1603 | 1605 | break; |
1604 | 1606 | case MSG_ALL_BUT_SELF: |
1605 | - mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id())); | |
1607 | + alloc_cpumask_var(&tmp, GFP_NOWAIT); | |
1608 | + cpumask_andnot(tmp, cpu_online_mask, | |
1609 | + cpumask_of(smp_processor_id())); | |
1610 | + mpic_send_ipi(msg, tmp); | |
1611 | + free_cpumask_var(tmp); | |
1606 | 1612 | break; |
1607 | 1613 | default: |
1608 | - mpic_send_ipi(msg, 1 << target); | |
1614 | + mpic_send_ipi(msg, cpumask_of(target)); | |
1609 | 1615 | break; |
1610 | 1616 | } |
1611 | 1617 | } |
... | ... | @@ -1616,7 +1622,7 @@ |
1616 | 1622 | |
1617 | 1623 | DBG("smp_mpic_probe()...\n"); |
1618 | 1624 | |
1619 | - nr_cpus = cpus_weight(cpu_possible_map); | |
1625 | + nr_cpus = cpumask_weight(cpu_possible_mask); | |
1620 | 1626 | |
1621 | 1627 | DBG("nr_cpus: %d\n", nr_cpus); |
1622 | 1628 |
arch/powerpc/sysdev/ppc4xx_soc.c
... | ... | @@ -191,11 +191,31 @@ |
191 | 191 | arch_initcall(ppc4xx_l2c_probe); |
192 | 192 | |
193 | 193 | /* |
194 | - * At present, this routine just applies a system reset. | |
194 | + * Apply a system reset. Alternatively a board specific value may be | |
195 | + * provided via the "reset-type" property in the cpu node. | |
195 | 196 | */ |
196 | 197 | void ppc4xx_reset_system(char *cmd) |
197 | 198 | { |
198 | - mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_RST_SYSTEM); | |
199 | + struct device_node *np; | |
200 | + u32 reset_type = DBCR0_RST_SYSTEM; | |
201 | + const u32 *prop; | |
202 | + | |
203 | + np = of_find_node_by_type(NULL, "cpu"); | |
204 | + if (np) { | |
205 | + prop = of_get_property(np, "reset-type", NULL); | |
206 | + | |
207 | + /* | |
208 | + * Check if property exists and if it is in range: | |
209 | + * 1 - PPC4xx core reset | |
210 | + * 2 - PPC4xx chip reset | |
211 | + * 3 - PPC4xx system reset (default) | |
212 | + */ | |
213 | + if ((prop) && ((prop[0] >= 1) && (prop[0] <= 3))) | |
214 | + reset_type = prop[0] << 28; | |
215 | + } | |
216 | + | |
217 | + mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | reset_type); | |
218 | + | |
199 | 219 | while (1) |
200 | 220 | ; /* Just in case the reset doesn't work */ |
201 | 221 | } |
drivers/macintosh/macio-adb.c
drivers/macintosh/smu.c
... | ... | @@ -1183,8 +1183,10 @@ |
1183 | 1183 | return -EOVERFLOW; |
1184 | 1184 | spin_lock_irqsave(&pp->lock, flags); |
1185 | 1185 | if (pp->cmd.status == 1) { |
1186 | - if (file->f_flags & O_NONBLOCK) | |
1186 | + if (file->f_flags & O_NONBLOCK) { | |
1187 | + spin_unlock_irqrestore(&pp->lock, flags); | |
1187 | 1188 | return -EAGAIN; |
1189 | + } | |
1188 | 1190 | add_wait_queue(&pp->wait, &wait); |
1189 | 1191 | for (;;) { |
1190 | 1192 | set_current_state(TASK_INTERRUPTIBLE); |
drivers/macintosh/therm_adt746x.c
... | ... | @@ -182,6 +182,7 @@ |
182 | 182 | |
183 | 183 | thermostat = NULL; |
184 | 184 | |
185 | + i2c_set_clientdata(client, NULL); | |
185 | 186 | kfree(th); |
186 | 187 | |
187 | 188 | return 0; |
... | ... | @@ -399,6 +400,7 @@ |
399 | 400 | rc = read_reg(th, CONFIG_REG); |
400 | 401 | if (rc < 0) { |
401 | 402 | dev_err(&client->dev, "Thermostat failed to read config!\n"); |
403 | + i2c_set_clientdata(client, NULL); | |
402 | 404 | kfree(th); |
403 | 405 | return -ENODEV; |
404 | 406 | } |
drivers/macintosh/windfarm_pm81.c
... | ... | @@ -757,10 +757,8 @@ |
757 | 757 | wf_put_control(cpufreq_clamp); |
758 | 758 | |
759 | 759 | /* Destroy control loops state structures */ |
760 | - if (wf_smu_sys_fans) | |
761 | - kfree(wf_smu_sys_fans); | |
762 | - if (wf_smu_cpu_fans) | |
763 | - kfree(wf_smu_cpu_fans); | |
760 | + kfree(wf_smu_sys_fans); | |
761 | + kfree(wf_smu_cpu_fans); | |
764 | 762 | |
765 | 763 | return 0; |
766 | 764 | } |
drivers/macintosh/windfarm_pm91.c
... | ... | @@ -687,12 +687,9 @@ |
687 | 687 | wf_put_control(cpufreq_clamp); |
688 | 688 | |
689 | 689 | /* Destroy control loops state structures */ |
690 | - if (wf_smu_slots_fans) | |
691 | - kfree(wf_smu_cpu_fans); | |
692 | - if (wf_smu_drive_fans) | |
693 | - kfree(wf_smu_cpu_fans); | |
694 | - if (wf_smu_cpu_fans) | |
695 | - kfree(wf_smu_cpu_fans); | |
690 | + kfree(wf_smu_slots_fans); | |
691 | + kfree(wf_smu_drive_fans); | |
692 | + kfree(wf_smu_cpu_fans); | |
696 | 693 | |
697 | 694 | return 0; |
698 | 695 | } |
drivers/misc/Makefile
drivers/misc/hdpuftrs/Makefile
1 | -obj-$(CONFIG_HDPU_FEATURES) := hdpu_cpustate.o hdpu_nexus.o |
drivers/misc/hdpuftrs/hdpu_cpustate.c
1 | -/* | |
2 | - * Sky CPU State Driver | |
3 | - * | |
4 | - * Copyright (C) 2002 Brian Waite | |
5 | - * | |
6 | - * This driver allows use of the CPU state bits | |
7 | - * It exports the /dev/sky_cpustate and also | |
8 | - * /proc/sky_cpustate pseudo-file for status information. | |
9 | - * | |
10 | - * This program is free software; you can redistribute it and/or | |
11 | - * modify it under the terms of the GNU General Public License | |
12 | - * as published by the Free Software Foundation; either version | |
13 | - * 2 of the License, or (at your option) any later version. | |
14 | - * | |
15 | - */ | |
16 | - | |
17 | -#include <linux/module.h> | |
18 | -#include <linux/kernel.h> | |
19 | -#include <linux/spinlock.h> | |
20 | -#include <linux/smp_lock.h> | |
21 | -#include <linux/miscdevice.h> | |
22 | -#include <linux/proc_fs.h> | |
23 | -#include <linux/hdpu_features.h> | |
24 | -#include <linux/platform_device.h> | |
25 | -#include <asm/uaccess.h> | |
26 | -#include <linux/seq_file.h> | |
27 | -#include <asm/io.h> | |
28 | - | |
29 | -#define SKY_CPUSTATE_VERSION "1.1" | |
30 | - | |
31 | -static int hdpu_cpustate_probe(struct platform_device *pdev); | |
32 | -static int hdpu_cpustate_remove(struct platform_device *pdev); | |
33 | - | |
34 | -static unsigned char cpustate_get_state(void); | |
35 | -static int cpustate_proc_open(struct inode *inode, struct file *file); | |
36 | -static int cpustate_proc_read(struct seq_file *seq, void *offset); | |
37 | - | |
38 | -static struct cpustate_t cpustate; | |
39 | - | |
40 | -static const struct file_operations proc_cpustate = { | |
41 | - .open = cpustate_proc_open, | |
42 | - .read = seq_read, | |
43 | - .llseek = seq_lseek, | |
44 | - .release = single_release, | |
45 | - .owner = THIS_MODULE, | |
46 | -}; | |
47 | - | |
48 | -static int cpustate_proc_open(struct inode *inode, struct file *file) | |
49 | -{ | |
50 | - return single_open(file, cpustate_proc_read, NULL); | |
51 | -} | |
52 | - | |
53 | -static int cpustate_proc_read(struct seq_file *seq, void *offset) | |
54 | -{ | |
55 | - seq_printf(seq, "CPU State: %04x\n", cpustate_get_state()); | |
56 | - return 0; | |
57 | -} | |
58 | - | |
59 | -static int cpustate_get_ref(int excl) | |
60 | -{ | |
61 | - | |
62 | - int retval = -EBUSY; | |
63 | - | |
64 | - spin_lock(&cpustate.lock); | |
65 | - | |
66 | - if (cpustate.excl) | |
67 | - goto out_busy; | |
68 | - | |
69 | - if (excl) { | |
70 | - if (cpustate.open_count) | |
71 | - goto out_busy; | |
72 | - cpustate.excl = 1; | |
73 | - } | |
74 | - | |
75 | - cpustate.open_count++; | |
76 | - retval = 0; | |
77 | - | |
78 | - out_busy: | |
79 | - spin_unlock(&cpustate.lock); | |
80 | - return retval; | |
81 | -} | |
82 | - | |
83 | -static int cpustate_free_ref(void) | |
84 | -{ | |
85 | - | |
86 | - spin_lock(&cpustate.lock); | |
87 | - | |
88 | - cpustate.excl = 0; | |
89 | - cpustate.open_count--; | |
90 | - | |
91 | - spin_unlock(&cpustate.lock); | |
92 | - return 0; | |
93 | -} | |
94 | - | |
95 | -static unsigned char cpustate_get_state(void) | |
96 | -{ | |
97 | - | |
98 | - return cpustate.cached_val; | |
99 | -} | |
100 | - | |
101 | -static void cpustate_set_state(unsigned char new_state) | |
102 | -{ | |
103 | - unsigned int state = (new_state << 21); | |
104 | - | |
105 | -#ifdef DEBUG_CPUSTATE | |
106 | - printk("CPUSTATE -> 0x%x\n", new_state); | |
107 | -#endif | |
108 | - spin_lock(&cpustate.lock); | |
109 | - cpustate.cached_val = new_state; | |
110 | - writel((0xff << 21), cpustate.clr_addr); | |
111 | - writel(state, cpustate.set_addr); | |
112 | - spin_unlock(&cpustate.lock); | |
113 | -} | |
114 | - | |
115 | -/* | |
116 | - * Now all the various file operations that we export. | |
117 | - */ | |
118 | - | |
119 | -static ssize_t cpustate_read(struct file *file, char *buf, | |
120 | - size_t count, loff_t * ppos) | |
121 | -{ | |
122 | - unsigned char data; | |
123 | - | |
124 | - if (count < 0) | |
125 | - return -EFAULT; | |
126 | - if (count == 0) | |
127 | - return 0; | |
128 | - | |
129 | - data = cpustate_get_state(); | |
130 | - if (copy_to_user(buf, &data, sizeof(unsigned char))) | |
131 | - return -EFAULT; | |
132 | - return sizeof(unsigned char); | |
133 | -} | |
134 | - | |
135 | -static ssize_t cpustate_write(struct file *file, const char *buf, | |
136 | - size_t count, loff_t * ppos) | |
137 | -{ | |
138 | - unsigned char data; | |
139 | - | |
140 | - if (count < 0) | |
141 | - return -EFAULT; | |
142 | - | |
143 | - if (count == 0) | |
144 | - return 0; | |
145 | - | |
146 | - if (copy_from_user((unsigned char *)&data, buf, sizeof(unsigned char))) | |
147 | - return -EFAULT; | |
148 | - | |
149 | - cpustate_set_state(data); | |
150 | - return sizeof(unsigned char); | |
151 | -} | |
152 | - | |
153 | -static int cpustate_open(struct inode *inode, struct file *file) | |
154 | -{ | |
155 | - int ret; | |
156 | - | |
157 | - lock_kernel(); | |
158 | - ret = cpustate_get_ref((file->f_flags & O_EXCL)); | |
159 | - unlock_kernel(); | |
160 | - | |
161 | - return ret; | |
162 | -} | |
163 | - | |
164 | -static int cpustate_release(struct inode *inode, struct file *file) | |
165 | -{ | |
166 | - return cpustate_free_ref(); | |
167 | -} | |
168 | - | |
169 | -static struct platform_driver hdpu_cpustate_driver = { | |
170 | - .probe = hdpu_cpustate_probe, | |
171 | - .remove = hdpu_cpustate_remove, | |
172 | - .driver = { | |
173 | - .name = HDPU_CPUSTATE_NAME, | |
174 | - .owner = THIS_MODULE, | |
175 | - }, | |
176 | -}; | |
177 | - | |
178 | -/* | |
179 | - * The various file operations we support. | |
180 | - */ | |
181 | -static const struct file_operations cpustate_fops = { | |
182 | - .owner = THIS_MODULE, | |
183 | - .open = cpustate_open, | |
184 | - .release = cpustate_release, | |
185 | - .read = cpustate_read, | |
186 | - .write = cpustate_write, | |
187 | - .llseek = no_llseek, | |
188 | -}; | |
189 | - | |
190 | -static struct miscdevice cpustate_dev = { | |
191 | - .minor = MISC_DYNAMIC_MINOR, | |
192 | - .name = "sky_cpustate", | |
193 | - .fops = &cpustate_fops, | |
194 | -}; | |
195 | - | |
196 | -static int hdpu_cpustate_probe(struct platform_device *pdev) | |
197 | -{ | |
198 | - struct resource *res; | |
199 | - struct proc_dir_entry *proc_de; | |
200 | - int ret; | |
201 | - | |
202 | - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
203 | - if (!res) { | |
204 | - printk(KERN_ERR "sky_cpustate: " | |
205 | - "Invalid memory resource.\n"); | |
206 | - return -EINVAL; | |
207 | - } | |
208 | - cpustate.set_addr = (unsigned long *)res->start; | |
209 | - cpustate.clr_addr = (unsigned long *)res->end - 1; | |
210 | - | |
211 | - ret = misc_register(&cpustate_dev); | |
212 | - if (ret) { | |
213 | - printk(KERN_WARNING "sky_cpustate: " | |
214 | - "Unable to register misc device.\n"); | |
215 | - cpustate.set_addr = NULL; | |
216 | - cpustate.clr_addr = NULL; | |
217 | - return ret; | |
218 | - } | |
219 | - | |
220 | - proc_de = proc_create("sky_cpustate", 0666, NULL, &proc_cpustate); | |
221 | - if (!proc_de) { | |
222 | - printk(KERN_WARNING "sky_cpustate: " | |
223 | - "Unable to create proc entry\n"); | |
224 | - } | |
225 | - | |
226 | - printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n"); | |
227 | - return 0; | |
228 | -} | |
229 | - | |
230 | -static int hdpu_cpustate_remove(struct platform_device *pdev) | |
231 | -{ | |
232 | - cpustate.set_addr = NULL; | |
233 | - cpustate.clr_addr = NULL; | |
234 | - | |
235 | - remove_proc_entry("sky_cpustate", NULL); | |
236 | - misc_deregister(&cpustate_dev); | |
237 | - | |
238 | - return 0; | |
239 | -} | |
240 | - | |
241 | -static int __init cpustate_init(void) | |
242 | -{ | |
243 | - return platform_driver_register(&hdpu_cpustate_driver); | |
244 | -} | |
245 | - | |
246 | -static void __exit cpustate_exit(void) | |
247 | -{ | |
248 | - platform_driver_unregister(&hdpu_cpustate_driver); | |
249 | -} | |
250 | - | |
251 | -module_init(cpustate_init); | |
252 | -module_exit(cpustate_exit); | |
253 | - | |
254 | -MODULE_AUTHOR("Brian Waite"); | |
255 | -MODULE_LICENSE("GPL"); | |
256 | -MODULE_ALIAS("platform:" HDPU_CPUSTATE_NAME); |
drivers/misc/hdpuftrs/hdpu_nexus.c
1 | -/* | |
2 | - * Sky Nexus Register Driver | |
3 | - * | |
4 | - * Copyright (C) 2002 Brian Waite | |
5 | - * | |
6 | - * This driver allows reading the Nexus register | |
7 | - * It exports the /proc/sky_chassis_id and also | |
8 | - * /proc/sky_slot_id pseudo-file for status information. | |
9 | - * | |
10 | - * This program is free software; you can redistribute it and/or | |
11 | - * modify it under the terms of the GNU General Public License | |
12 | - * as published by the Free Software Foundation; either version | |
13 | - * 2 of the License, or (at your option) any later version. | |
14 | - * | |
15 | - */ | |
16 | - | |
17 | -#include <linux/module.h> | |
18 | -#include <linux/kernel.h> | |
19 | -#include <linux/proc_fs.h> | |
20 | -#include <linux/hdpu_features.h> | |
21 | -#include <linux/platform_device.h> | |
22 | -#include <linux/seq_file.h> | |
23 | -#include <asm/io.h> | |
24 | - | |
25 | -static int hdpu_nexus_probe(struct platform_device *pdev); | |
26 | -static int hdpu_nexus_remove(struct platform_device *pdev); | |
27 | -static int hdpu_slot_id_open(struct inode *inode, struct file *file); | |
28 | -static int hdpu_slot_id_read(struct seq_file *seq, void *offset); | |
29 | -static int hdpu_chassis_id_open(struct inode *inode, struct file *file); | |
30 | -static int hdpu_chassis_id_read(struct seq_file *seq, void *offset); | |
31 | - | |
32 | -static struct proc_dir_entry *hdpu_slot_id; | |
33 | -static struct proc_dir_entry *hdpu_chassis_id; | |
34 | -static int slot_id = -1; | |
35 | -static int chassis_id = -1; | |
36 | - | |
37 | -static const struct file_operations proc_slot_id = { | |
38 | - .open = hdpu_slot_id_open, | |
39 | - .read = seq_read, | |
40 | - .llseek = seq_lseek, | |
41 | - .release = single_release, | |
42 | - .owner = THIS_MODULE, | |
43 | -}; | |
44 | - | |
45 | -static const struct file_operations proc_chassis_id = { | |
46 | - .open = hdpu_chassis_id_open, | |
47 | - .read = seq_read, | |
48 | - .llseek = seq_lseek, | |
49 | - .release = single_release, | |
50 | - .owner = THIS_MODULE, | |
51 | -}; | |
52 | - | |
53 | -static struct platform_driver hdpu_nexus_driver = { | |
54 | - .probe = hdpu_nexus_probe, | |
55 | - .remove = hdpu_nexus_remove, | |
56 | - .driver = { | |
57 | - .name = HDPU_NEXUS_NAME, | |
58 | - .owner = THIS_MODULE, | |
59 | - }, | |
60 | -}; | |
61 | - | |
62 | -static int hdpu_slot_id_open(struct inode *inode, struct file *file) | |
63 | -{ | |
64 | - return single_open(file, hdpu_slot_id_read, NULL); | |
65 | -} | |
66 | - | |
67 | -static int hdpu_slot_id_read(struct seq_file *seq, void *offset) | |
68 | -{ | |
69 | - seq_printf(seq, "%d\n", slot_id); | |
70 | - return 0; | |
71 | -} | |
72 | - | |
73 | -static int hdpu_chassis_id_open(struct inode *inode, struct file *file) | |
74 | -{ | |
75 | - return single_open(file, hdpu_chassis_id_read, NULL); | |
76 | -} | |
77 | - | |
78 | -static int hdpu_chassis_id_read(struct seq_file *seq, void *offset) | |
79 | -{ | |
80 | - seq_printf(seq, "%d\n", chassis_id); | |
81 | - return 0; | |
82 | -} | |
83 | - | |
84 | -static int hdpu_nexus_probe(struct platform_device *pdev) | |
85 | -{ | |
86 | - struct resource *res; | |
87 | - int *nexus_id_addr; | |
88 | - | |
89 | - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
90 | - if (!res) { | |
91 | - printk(KERN_ERR "sky_nexus: " | |
92 | - "Invalid memory resource.\n"); | |
93 | - return -EINVAL; | |
94 | - } | |
95 | - nexus_id_addr = ioremap(res->start, | |
96 | - (unsigned long)(res->end - res->start)); | |
97 | - if (nexus_id_addr) { | |
98 | - slot_id = (*nexus_id_addr >> 8) & 0x1f; | |
99 | - chassis_id = *nexus_id_addr & 0xff; | |
100 | - iounmap(nexus_id_addr); | |
101 | - } else { | |
102 | - printk(KERN_ERR "sky_nexus: Could not map slot id\n"); | |
103 | - } | |
104 | - | |
105 | - hdpu_slot_id = proc_create("sky_slot_id", 0666, NULL, &proc_slot_id); | |
106 | - if (!hdpu_slot_id) { | |
107 | - printk(KERN_WARNING "sky_nexus: " | |
108 | - "Unable to create proc dir entry: sky_slot_id\n"); | |
109 | - } | |
110 | - | |
111 | - hdpu_chassis_id = proc_create("sky_chassis_id", 0666, NULL, | |
112 | - &proc_chassis_id); | |
113 | - if (!hdpu_chassis_id) | |
114 | - printk(KERN_WARNING "sky_nexus: " | |
115 | - "Unable to create proc dir entry: sky_chassis_id\n"); | |
116 | - | |
117 | - return 0; | |
118 | -} | |
119 | - | |
120 | -static int hdpu_nexus_remove(struct platform_device *pdev) | |
121 | -{ | |
122 | - slot_id = -1; | |
123 | - chassis_id = -1; | |
124 | - | |
125 | - remove_proc_entry("sky_slot_id", NULL); | |
126 | - remove_proc_entry("sky_chassis_id", NULL); | |
127 | - | |
128 | - hdpu_slot_id = 0; | |
129 | - hdpu_chassis_id = 0; | |
130 | - | |
131 | - return 0; | |
132 | -} | |
133 | - | |
134 | -static int __init nexus_init(void) | |
135 | -{ | |
136 | - return platform_driver_register(&hdpu_nexus_driver); | |
137 | -} | |
138 | - | |
139 | -static void __exit nexus_exit(void) | |
140 | -{ | |
141 | - platform_driver_unregister(&hdpu_nexus_driver); | |
142 | -} | |
143 | - | |
144 | -module_init(nexus_init); | |
145 | -module_exit(nexus_exit); | |
146 | - | |
147 | -MODULE_AUTHOR("Brian Waite"); | |
148 | -MODULE_LICENSE("GPL"); | |
149 | -MODULE_ALIAS("platform:" HDPU_NEXUS_NAME); |
drivers/serial/mpsc.c
include/linux/hdpu_features.h
1 | -#include <linux/spinlock.h> | |
2 | - | |
3 | -struct cpustate_t { | |
4 | - spinlock_t lock; | |
5 | - int excl; | |
6 | - int open_count; | |
7 | - unsigned char cached_val; | |
8 | - int inited; | |
9 | - unsigned long *set_addr; | |
10 | - unsigned long *clr_addr; | |
11 | -}; | |
12 | - | |
13 | - | |
14 | -#define HDPU_CPUSTATE_NAME "hdpu cpustate" | |
15 | -#define HDPU_NEXUS_NAME "hdpu nexus" | |
16 | - | |
17 | -#define CPUSTATE_KERNEL_MAJOR 0x10 | |
18 | - | |
19 | -#define CPUSTATE_KERNEL_INIT_DRV 0 /* CPU State Driver Initialized */ | |
20 | -#define CPUSTATE_KERNEL_INIT_PCI 1 /* 64360 PCI Busses Init */ | |
21 | -#define CPUSTATE_KERNEL_INIT_REG 2 /* 64360 Bridge Init */ | |
22 | -#define CPUSTATE_KERNEL_CPU1_KICK 3 /* Boot cpu 1 */ | |
23 | -#define CPUSTATE_KERNEL_CPU1_OK 4 /* Cpu 1 has checked in */ | |
24 | -#define CPUSTATE_KERNEL_OK 5 /* Terminal state */ | |
25 | -#define CPUSTATE_KERNEL_RESET 14 /* Board reset via SW*/ | |
26 | -#define CPUSTATE_KERNEL_HALT 15 /* Board halted via SW*/ |
sound/aoa/core/gpio-pmf.c
... | ... | @@ -116,12 +116,9 @@ |
116 | 116 | mutex_destroy(&rt->line_in_notify.mutex); |
117 | 117 | mutex_destroy(&rt->line_out_notify.mutex); |
118 | 118 | |
119 | - if (rt->headphone_notify.gpio_private) | |
120 | - kfree(rt->headphone_notify.gpio_private); | |
121 | - if (rt->line_in_notify.gpio_private) | |
122 | - kfree(rt->line_in_notify.gpio_private); | |
123 | - if (rt->line_out_notify.gpio_private) | |
124 | - kfree(rt->line_out_notify.gpio_private); | |
119 | + kfree(rt->headphone_notify.gpio_private); | |
120 | + kfree(rt->line_in_notify.gpio_private); | |
121 | + kfree(rt->line_out_notify.gpio_private); | |
125 | 122 | } |
126 | 123 | |
127 | 124 | static void pmf_handle_notify_irq(void *data) |