Commit 5dc626358f76e32c4f111904f165bb28e2b447c0

Authored by Linus Torvalds

Merge tag 'pci-v3.18-fixes-4' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci

Pull PCI fix from Bjorn Helgaas:
 "This fixes a Tegra20 regression that we introduced during the v3.18
  merge window"

* tag 'pci-v3.18-fixes-4' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci:
  PCI: tegra: Use physical range for I/O mapping

Showing 1 changed file Inline Diff

drivers/pci/host/pci-tegra.c
1 /* 1 /*
2 * PCIe host controller driver for Tegra SoCs 2 * PCIe host controller driver for Tegra SoCs
3 * 3 *
4 * Copyright (c) 2010, CompuLab, Ltd. 4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il> 5 * Author: Mike Rapoport <mike@compulab.co.il>
6 * 6 *
7 * Based on NVIDIA PCIe driver 7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2009, NVIDIA Corporation. 8 * Copyright (c) 2008-2009, NVIDIA Corporation.
9 * 9 *
10 * Bits taken from arch/arm/mach-dove/pcie.c 10 * Bits taken from arch/arm/mach-dove/pcie.c
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or 14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version. 15 * (at your option) any later version.
16 * 16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT 17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details. 20 * more details.
21 * 21 *
22 * You should have received a copy of the GNU General Public License along 22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc., 23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
25 */ 25 */
26 26
27 #include <linux/clk.h> 27 #include <linux/clk.h>
28 #include <linux/debugfs.h> 28 #include <linux/debugfs.h>
29 #include <linux/delay.h> 29 #include <linux/delay.h>
30 #include <linux/export.h> 30 #include <linux/export.h>
31 #include <linux/interrupt.h> 31 #include <linux/interrupt.h>
32 #include <linux/irq.h> 32 #include <linux/irq.h>
33 #include <linux/irqdomain.h> 33 #include <linux/irqdomain.h>
34 #include <linux/kernel.h> 34 #include <linux/kernel.h>
35 #include <linux/module.h> 35 #include <linux/module.h>
36 #include <linux/msi.h> 36 #include <linux/msi.h>
37 #include <linux/of_address.h> 37 #include <linux/of_address.h>
38 #include <linux/of_pci.h> 38 #include <linux/of_pci.h>
39 #include <linux/of_platform.h> 39 #include <linux/of_platform.h>
40 #include <linux/pci.h> 40 #include <linux/pci.h>
41 #include <linux/phy/phy.h> 41 #include <linux/phy/phy.h>
42 #include <linux/platform_device.h> 42 #include <linux/platform_device.h>
43 #include <linux/reset.h> 43 #include <linux/reset.h>
44 #include <linux/sizes.h> 44 #include <linux/sizes.h>
45 #include <linux/slab.h> 45 #include <linux/slab.h>
46 #include <linux/vmalloc.h> 46 #include <linux/vmalloc.h>
47 #include <linux/regulator/consumer.h> 47 #include <linux/regulator/consumer.h>
48 48
49 #include <soc/tegra/cpuidle.h> 49 #include <soc/tegra/cpuidle.h>
50 #include <soc/tegra/pmc.h> 50 #include <soc/tegra/pmc.h>
51 51
52 #include <asm/mach/irq.h> 52 #include <asm/mach/irq.h>
53 #include <asm/mach/map.h> 53 #include <asm/mach/map.h>
54 #include <asm/mach/pci.h> 54 #include <asm/mach/pci.h>
55 55
56 #define INT_PCI_MSI_NR (8 * 32) 56 #define INT_PCI_MSI_NR (8 * 32)
57 57
58 /* register definitions */ 58 /* register definitions */
59 59
60 #define AFI_AXI_BAR0_SZ 0x00 60 #define AFI_AXI_BAR0_SZ 0x00
61 #define AFI_AXI_BAR1_SZ 0x04 61 #define AFI_AXI_BAR1_SZ 0x04
62 #define AFI_AXI_BAR2_SZ 0x08 62 #define AFI_AXI_BAR2_SZ 0x08
63 #define AFI_AXI_BAR3_SZ 0x0c 63 #define AFI_AXI_BAR3_SZ 0x0c
64 #define AFI_AXI_BAR4_SZ 0x10 64 #define AFI_AXI_BAR4_SZ 0x10
65 #define AFI_AXI_BAR5_SZ 0x14 65 #define AFI_AXI_BAR5_SZ 0x14
66 66
67 #define AFI_AXI_BAR0_START 0x18 67 #define AFI_AXI_BAR0_START 0x18
68 #define AFI_AXI_BAR1_START 0x1c 68 #define AFI_AXI_BAR1_START 0x1c
69 #define AFI_AXI_BAR2_START 0x20 69 #define AFI_AXI_BAR2_START 0x20
70 #define AFI_AXI_BAR3_START 0x24 70 #define AFI_AXI_BAR3_START 0x24
71 #define AFI_AXI_BAR4_START 0x28 71 #define AFI_AXI_BAR4_START 0x28
72 #define AFI_AXI_BAR5_START 0x2c 72 #define AFI_AXI_BAR5_START 0x2c
73 73
74 #define AFI_FPCI_BAR0 0x30 74 #define AFI_FPCI_BAR0 0x30
75 #define AFI_FPCI_BAR1 0x34 75 #define AFI_FPCI_BAR1 0x34
76 #define AFI_FPCI_BAR2 0x38 76 #define AFI_FPCI_BAR2 0x38
77 #define AFI_FPCI_BAR3 0x3c 77 #define AFI_FPCI_BAR3 0x3c
78 #define AFI_FPCI_BAR4 0x40 78 #define AFI_FPCI_BAR4 0x40
79 #define AFI_FPCI_BAR5 0x44 79 #define AFI_FPCI_BAR5 0x44
80 80
81 #define AFI_CACHE_BAR0_SZ 0x48 81 #define AFI_CACHE_BAR0_SZ 0x48
82 #define AFI_CACHE_BAR0_ST 0x4c 82 #define AFI_CACHE_BAR0_ST 0x4c
83 #define AFI_CACHE_BAR1_SZ 0x50 83 #define AFI_CACHE_BAR1_SZ 0x50
84 #define AFI_CACHE_BAR1_ST 0x54 84 #define AFI_CACHE_BAR1_ST 0x54
85 85
86 #define AFI_MSI_BAR_SZ 0x60 86 #define AFI_MSI_BAR_SZ 0x60
87 #define AFI_MSI_FPCI_BAR_ST 0x64 87 #define AFI_MSI_FPCI_BAR_ST 0x64
88 #define AFI_MSI_AXI_BAR_ST 0x68 88 #define AFI_MSI_AXI_BAR_ST 0x68
89 89
90 #define AFI_MSI_VEC0 0x6c 90 #define AFI_MSI_VEC0 0x6c
91 #define AFI_MSI_VEC1 0x70 91 #define AFI_MSI_VEC1 0x70
92 #define AFI_MSI_VEC2 0x74 92 #define AFI_MSI_VEC2 0x74
93 #define AFI_MSI_VEC3 0x78 93 #define AFI_MSI_VEC3 0x78
94 #define AFI_MSI_VEC4 0x7c 94 #define AFI_MSI_VEC4 0x7c
95 #define AFI_MSI_VEC5 0x80 95 #define AFI_MSI_VEC5 0x80
96 #define AFI_MSI_VEC6 0x84 96 #define AFI_MSI_VEC6 0x84
97 #define AFI_MSI_VEC7 0x88 97 #define AFI_MSI_VEC7 0x88
98 98
99 #define AFI_MSI_EN_VEC0 0x8c 99 #define AFI_MSI_EN_VEC0 0x8c
100 #define AFI_MSI_EN_VEC1 0x90 100 #define AFI_MSI_EN_VEC1 0x90
101 #define AFI_MSI_EN_VEC2 0x94 101 #define AFI_MSI_EN_VEC2 0x94
102 #define AFI_MSI_EN_VEC3 0x98 102 #define AFI_MSI_EN_VEC3 0x98
103 #define AFI_MSI_EN_VEC4 0x9c 103 #define AFI_MSI_EN_VEC4 0x9c
104 #define AFI_MSI_EN_VEC5 0xa0 104 #define AFI_MSI_EN_VEC5 0xa0
105 #define AFI_MSI_EN_VEC6 0xa4 105 #define AFI_MSI_EN_VEC6 0xa4
106 #define AFI_MSI_EN_VEC7 0xa8 106 #define AFI_MSI_EN_VEC7 0xa8
107 107
108 #define AFI_CONFIGURATION 0xac 108 #define AFI_CONFIGURATION 0xac
109 #define AFI_CONFIGURATION_EN_FPCI (1 << 0) 109 #define AFI_CONFIGURATION_EN_FPCI (1 << 0)
110 110
111 #define AFI_FPCI_ERROR_MASKS 0xb0 111 #define AFI_FPCI_ERROR_MASKS 0xb0
112 112
113 #define AFI_INTR_MASK 0xb4 113 #define AFI_INTR_MASK 0xb4
114 #define AFI_INTR_MASK_INT_MASK (1 << 0) 114 #define AFI_INTR_MASK_INT_MASK (1 << 0)
115 #define AFI_INTR_MASK_MSI_MASK (1 << 8) 115 #define AFI_INTR_MASK_MSI_MASK (1 << 8)
116 116
117 #define AFI_INTR_CODE 0xb8 117 #define AFI_INTR_CODE 0xb8
118 #define AFI_INTR_CODE_MASK 0xf 118 #define AFI_INTR_CODE_MASK 0xf
119 #define AFI_INTR_INI_SLAVE_ERROR 1 119 #define AFI_INTR_INI_SLAVE_ERROR 1
120 #define AFI_INTR_INI_DECODE_ERROR 2 120 #define AFI_INTR_INI_DECODE_ERROR 2
121 #define AFI_INTR_TARGET_ABORT 3 121 #define AFI_INTR_TARGET_ABORT 3
122 #define AFI_INTR_MASTER_ABORT 4 122 #define AFI_INTR_MASTER_ABORT 4
123 #define AFI_INTR_INVALID_WRITE 5 123 #define AFI_INTR_INVALID_WRITE 5
124 #define AFI_INTR_LEGACY 6 124 #define AFI_INTR_LEGACY 6
125 #define AFI_INTR_FPCI_DECODE_ERROR 7 125 #define AFI_INTR_FPCI_DECODE_ERROR 7
126 #define AFI_INTR_AXI_DECODE_ERROR 8 126 #define AFI_INTR_AXI_DECODE_ERROR 8
127 #define AFI_INTR_FPCI_TIMEOUT 9 127 #define AFI_INTR_FPCI_TIMEOUT 9
128 #define AFI_INTR_PE_PRSNT_SENSE 10 128 #define AFI_INTR_PE_PRSNT_SENSE 10
129 #define AFI_INTR_PE_CLKREQ_SENSE 11 129 #define AFI_INTR_PE_CLKREQ_SENSE 11
130 #define AFI_INTR_CLKCLAMP_SENSE 12 130 #define AFI_INTR_CLKCLAMP_SENSE 12
131 #define AFI_INTR_RDY4PD_SENSE 13 131 #define AFI_INTR_RDY4PD_SENSE 13
132 #define AFI_INTR_P2P_ERROR 14 132 #define AFI_INTR_P2P_ERROR 14
133 133
134 #define AFI_INTR_SIGNATURE 0xbc 134 #define AFI_INTR_SIGNATURE 0xbc
135 #define AFI_UPPER_FPCI_ADDRESS 0xc0 135 #define AFI_UPPER_FPCI_ADDRESS 0xc0
136 #define AFI_SM_INTR_ENABLE 0xc4 136 #define AFI_SM_INTR_ENABLE 0xc4
137 #define AFI_SM_INTR_INTA_ASSERT (1 << 0) 137 #define AFI_SM_INTR_INTA_ASSERT (1 << 0)
138 #define AFI_SM_INTR_INTB_ASSERT (1 << 1) 138 #define AFI_SM_INTR_INTB_ASSERT (1 << 1)
139 #define AFI_SM_INTR_INTC_ASSERT (1 << 2) 139 #define AFI_SM_INTR_INTC_ASSERT (1 << 2)
140 #define AFI_SM_INTR_INTD_ASSERT (1 << 3) 140 #define AFI_SM_INTR_INTD_ASSERT (1 << 3)
141 #define AFI_SM_INTR_INTA_DEASSERT (1 << 4) 141 #define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
142 #define AFI_SM_INTR_INTB_DEASSERT (1 << 5) 142 #define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
143 #define AFI_SM_INTR_INTC_DEASSERT (1 << 6) 143 #define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
144 #define AFI_SM_INTR_INTD_DEASSERT (1 << 7) 144 #define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
145 145
146 #define AFI_AFI_INTR_ENABLE 0xc8 146 #define AFI_AFI_INTR_ENABLE 0xc8
147 #define AFI_INTR_EN_INI_SLVERR (1 << 0) 147 #define AFI_INTR_EN_INI_SLVERR (1 << 0)
148 #define AFI_INTR_EN_INI_DECERR (1 << 1) 148 #define AFI_INTR_EN_INI_DECERR (1 << 1)
149 #define AFI_INTR_EN_TGT_SLVERR (1 << 2) 149 #define AFI_INTR_EN_TGT_SLVERR (1 << 2)
150 #define AFI_INTR_EN_TGT_DECERR (1 << 3) 150 #define AFI_INTR_EN_TGT_DECERR (1 << 3)
151 #define AFI_INTR_EN_TGT_WRERR (1 << 4) 151 #define AFI_INTR_EN_TGT_WRERR (1 << 4)
152 #define AFI_INTR_EN_DFPCI_DECERR (1 << 5) 152 #define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
153 #define AFI_INTR_EN_AXI_DECERR (1 << 6) 153 #define AFI_INTR_EN_AXI_DECERR (1 << 6)
154 #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7) 154 #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
155 #define AFI_INTR_EN_PRSNT_SENSE (1 << 8) 155 #define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
156 156
157 #define AFI_PCIE_CONFIG 0x0f8 157 #define AFI_PCIE_CONFIG 0x0f8
158 #define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1)) 158 #define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
159 #define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe 159 #define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
160 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20) 160 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
161 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20) 161 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
162 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20) 162 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
163 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20) 163 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
164 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20) 164 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
165 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20) 165 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
166 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20) 166 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
167 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20) 167 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
168 168
169 #define AFI_FUSE 0x104 169 #define AFI_FUSE 0x104
170 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2) 170 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
171 171
172 #define AFI_PEX0_CTRL 0x110 172 #define AFI_PEX0_CTRL 0x110
173 #define AFI_PEX1_CTRL 0x118 173 #define AFI_PEX1_CTRL 0x118
174 #define AFI_PEX2_CTRL 0x128 174 #define AFI_PEX2_CTRL 0x128
175 #define AFI_PEX_CTRL_RST (1 << 0) 175 #define AFI_PEX_CTRL_RST (1 << 0)
176 #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1) 176 #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
177 #define AFI_PEX_CTRL_REFCLK_EN (1 << 3) 177 #define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
178 #define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4) 178 #define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
179 179
180 #define AFI_PLLE_CONTROL 0x160 180 #define AFI_PLLE_CONTROL 0x160
181 #define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9) 181 #define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
182 #define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1) 182 #define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
183 183
184 #define AFI_PEXBIAS_CTRL_0 0x168 184 #define AFI_PEXBIAS_CTRL_0 0x168
185 185
186 #define RP_VEND_XP 0x00000F00 186 #define RP_VEND_XP 0x00000F00
187 #define RP_VEND_XP_DL_UP (1 << 30) 187 #define RP_VEND_XP_DL_UP (1 << 30)
188 188
189 #define RP_PRIV_MISC 0x00000FE0 189 #define RP_PRIV_MISC 0x00000FE0
190 #define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0) 190 #define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
191 #define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0) 191 #define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
192 192
193 #define RP_LINK_CONTROL_STATUS 0x00000090 193 #define RP_LINK_CONTROL_STATUS 0x00000090
194 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000 194 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
195 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000 195 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
196 196
197 #define PADS_CTL_SEL 0x0000009C 197 #define PADS_CTL_SEL 0x0000009C
198 198
199 #define PADS_CTL 0x000000A0 199 #define PADS_CTL 0x000000A0
200 #define PADS_CTL_IDDQ_1L (1 << 0) 200 #define PADS_CTL_IDDQ_1L (1 << 0)
201 #define PADS_CTL_TX_DATA_EN_1L (1 << 6) 201 #define PADS_CTL_TX_DATA_EN_1L (1 << 6)
202 #define PADS_CTL_RX_DATA_EN_1L (1 << 10) 202 #define PADS_CTL_RX_DATA_EN_1L (1 << 10)
203 203
204 #define PADS_PLL_CTL_TEGRA20 0x000000B8 204 #define PADS_PLL_CTL_TEGRA20 0x000000B8
205 #define PADS_PLL_CTL_TEGRA30 0x000000B4 205 #define PADS_PLL_CTL_TEGRA30 0x000000B4
206 #define PADS_PLL_CTL_RST_B4SM (1 << 1) 206 #define PADS_PLL_CTL_RST_B4SM (1 << 1)
207 #define PADS_PLL_CTL_LOCKDET (1 << 8) 207 #define PADS_PLL_CTL_LOCKDET (1 << 8)
208 #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16) 208 #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
209 #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16) 209 #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
210 #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16) 210 #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
211 #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16) 211 #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
212 #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20) 212 #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
213 #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20) 213 #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
214 #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20) 214 #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
215 #define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22) 215 #define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
216 216
217 #define PADS_REFCLK_CFG0 0x000000C8 217 #define PADS_REFCLK_CFG0 0x000000C8
218 #define PADS_REFCLK_CFG1 0x000000CC 218 #define PADS_REFCLK_CFG1 0x000000CC
219 #define PADS_REFCLK_BIAS 0x000000D0 219 #define PADS_REFCLK_BIAS 0x000000D0
220 220
221 /* 221 /*
222 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit 222 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
223 * entries, one entry per PCIe port. These field definitions and desired 223 * entries, one entry per PCIe port. These field definitions and desired
224 * values aren't in the TRM, but do come from NVIDIA. 224 * values aren't in the TRM, but do come from NVIDIA.
225 */ 225 */
226 #define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */ 226 #define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
227 #define PADS_REFCLK_CFG_E_TERM_SHIFT 7 227 #define PADS_REFCLK_CFG_E_TERM_SHIFT 7
228 #define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */ 228 #define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
229 #define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */ 229 #define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
230 230
231 /* Default value provided by HW engineering is 0xfa5c */ 231 /* Default value provided by HW engineering is 0xfa5c */
232 #define PADS_REFCLK_CFG_VALUE \ 232 #define PADS_REFCLK_CFG_VALUE \
233 ( \ 233 ( \
234 (0x17 << PADS_REFCLK_CFG_TERM_SHIFT) | \ 234 (0x17 << PADS_REFCLK_CFG_TERM_SHIFT) | \
235 (0 << PADS_REFCLK_CFG_E_TERM_SHIFT) | \ 235 (0 << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
236 (0xa << PADS_REFCLK_CFG_PREDI_SHIFT) | \ 236 (0xa << PADS_REFCLK_CFG_PREDI_SHIFT) | \
237 (0xf << PADS_REFCLK_CFG_DRVI_SHIFT) \ 237 (0xf << PADS_REFCLK_CFG_DRVI_SHIFT) \
238 ) 238 )
239 239
240 struct tegra_msi { 240 struct tegra_msi {
241 struct msi_chip chip; 241 struct msi_chip chip;
242 DECLARE_BITMAP(used, INT_PCI_MSI_NR); 242 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
243 struct irq_domain *domain; 243 struct irq_domain *domain;
244 unsigned long pages; 244 unsigned long pages;
245 struct mutex lock; 245 struct mutex lock;
246 int irq; 246 int irq;
247 }; 247 };
248 248
249 /* used to differentiate between Tegra SoC generations */ 249 /* used to differentiate between Tegra SoC generations */
250 struct tegra_pcie_soc_data { 250 struct tegra_pcie_soc_data {
251 unsigned int num_ports; 251 unsigned int num_ports;
252 unsigned int msi_base_shift; 252 unsigned int msi_base_shift;
253 u32 pads_pll_ctl; 253 u32 pads_pll_ctl;
254 u32 tx_ref_sel; 254 u32 tx_ref_sel;
255 bool has_pex_clkreq_en; 255 bool has_pex_clkreq_en;
256 bool has_pex_bias_ctrl; 256 bool has_pex_bias_ctrl;
257 bool has_intr_prsnt_sense; 257 bool has_intr_prsnt_sense;
258 bool has_cml_clk; 258 bool has_cml_clk;
259 bool has_gen2; 259 bool has_gen2;
260 }; 260 };
261 261
262 static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip) 262 static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip)
263 { 263 {
264 return container_of(chip, struct tegra_msi, chip); 264 return container_of(chip, struct tegra_msi, chip);
265 } 265 }
266 266
267 struct tegra_pcie { 267 struct tegra_pcie {
268 struct device *dev; 268 struct device *dev;
269 269
270 void __iomem *pads; 270 void __iomem *pads;
271 void __iomem *afi; 271 void __iomem *afi;
272 int irq; 272 int irq;
273 273
274 struct list_head buses; 274 struct list_head buses;
275 struct resource *cs; 275 struct resource *cs;
276 276
277 struct resource all; 277 struct resource all;
278 struct resource io; 278 struct resource io;
279 struct resource pio;
279 struct resource mem; 280 struct resource mem;
280 struct resource prefetch; 281 struct resource prefetch;
281 struct resource busn; 282 struct resource busn;
282 283
283 struct clk *pex_clk; 284 struct clk *pex_clk;
284 struct clk *afi_clk; 285 struct clk *afi_clk;
285 struct clk *pll_e; 286 struct clk *pll_e;
286 struct clk *cml_clk; 287 struct clk *cml_clk;
287 288
288 struct reset_control *pex_rst; 289 struct reset_control *pex_rst;
289 struct reset_control *afi_rst; 290 struct reset_control *afi_rst;
290 struct reset_control *pcie_xrst; 291 struct reset_control *pcie_xrst;
291 292
292 struct phy *phy; 293 struct phy *phy;
293 294
294 struct tegra_msi msi; 295 struct tegra_msi msi;
295 296
296 struct list_head ports; 297 struct list_head ports;
297 unsigned int num_ports; 298 unsigned int num_ports;
298 u32 xbar_config; 299 u32 xbar_config;
299 300
300 struct regulator_bulk_data *supplies; 301 struct regulator_bulk_data *supplies;
301 unsigned int num_supplies; 302 unsigned int num_supplies;
302 303
303 const struct tegra_pcie_soc_data *soc_data; 304 const struct tegra_pcie_soc_data *soc_data;
304 struct dentry *debugfs; 305 struct dentry *debugfs;
305 }; 306 };
306 307
307 struct tegra_pcie_port { 308 struct tegra_pcie_port {
308 struct tegra_pcie *pcie; 309 struct tegra_pcie *pcie;
309 struct list_head list; 310 struct list_head list;
310 struct resource regs; 311 struct resource regs;
311 void __iomem *base; 312 void __iomem *base;
312 unsigned int index; 313 unsigned int index;
313 unsigned int lanes; 314 unsigned int lanes;
314 }; 315 };
315 316
316 struct tegra_pcie_bus { 317 struct tegra_pcie_bus {
317 struct vm_struct *area; 318 struct vm_struct *area;
318 struct list_head list; 319 struct list_head list;
319 unsigned int nr; 320 unsigned int nr;
320 }; 321 };
321 322
322 static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys) 323 static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
323 { 324 {
324 return sys->private_data; 325 return sys->private_data;
325 } 326 }
326 327
327 static inline void afi_writel(struct tegra_pcie *pcie, u32 value, 328 static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
328 unsigned long offset) 329 unsigned long offset)
329 { 330 {
330 writel(value, pcie->afi + offset); 331 writel(value, pcie->afi + offset);
331 } 332 }
332 333
333 static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset) 334 static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
334 { 335 {
335 return readl(pcie->afi + offset); 336 return readl(pcie->afi + offset);
336 } 337 }
337 338
338 static inline void pads_writel(struct tegra_pcie *pcie, u32 value, 339 static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
339 unsigned long offset) 340 unsigned long offset)
340 { 341 {
341 writel(value, pcie->pads + offset); 342 writel(value, pcie->pads + offset);
342 } 343 }
343 344
344 static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset) 345 static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
345 { 346 {
346 return readl(pcie->pads + offset); 347 return readl(pcie->pads + offset);
347 } 348 }
348 349
349 /* 350 /*
350 * The configuration space mapping on Tegra is somewhat similar to the ECAM 351 * The configuration space mapping on Tegra is somewhat similar to the ECAM
351 * defined by PCIe. However it deviates a bit in how the 4 bits for extended 352 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
352 * register accesses are mapped: 353 * register accesses are mapped:
353 * 354 *
354 * [27:24] extended register number 355 * [27:24] extended register number
355 * [23:16] bus number 356 * [23:16] bus number
356 * [15:11] device number 357 * [15:11] device number
357 * [10: 8] function number 358 * [10: 8] function number
358 * [ 7: 0] register number 359 * [ 7: 0] register number
359 * 360 *
360 * Mapping the whole extended configuration space would require 256 MiB of 361 * Mapping the whole extended configuration space would require 256 MiB of
361 * virtual address space, only a small part of which will actually be used. 362 * virtual address space, only a small part of which will actually be used.
362 * To work around this, a 1 MiB of virtual addresses are allocated per bus 363 * To work around this, a 1 MiB of virtual addresses are allocated per bus
363 * when the bus is first accessed. When the physical range is mapped, the 364 * when the bus is first accessed. When the physical range is mapped, the
364 * the bus number bits are hidden so that the extended register number bits 365 * the bus number bits are hidden so that the extended register number bits
365 * appear as bits [19:16]. Therefore the virtual mapping looks like this: 366 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
366 * 367 *
367 * [19:16] extended register number 368 * [19:16] extended register number
368 * [15:11] device number 369 * [15:11] device number
369 * [10: 8] function number 370 * [10: 8] function number
370 * [ 7: 0] register number 371 * [ 7: 0] register number
371 * 372 *
372 * This is achieved by stitching together 16 chunks of 64 KiB of physical 373 * This is achieved by stitching together 16 chunks of 64 KiB of physical
373 * address space via the MMU. 374 * address space via the MMU.
374 */ 375 */
375 static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where) 376 static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
376 { 377 {
377 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) | 378 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
378 (PCI_FUNC(devfn) << 8) | (where & 0xfc); 379 (PCI_FUNC(devfn) << 8) | (where & 0xfc);
379 } 380 }
380 381
381 static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie, 382 static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
382 unsigned int busnr) 383 unsigned int busnr)
383 { 384 {
384 pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN | 385 pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
385 L_PTE_MT_DEV_SHARED | L_PTE_SHARED; 386 L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
386 phys_addr_t cs = pcie->cs->start; 387 phys_addr_t cs = pcie->cs->start;
387 struct tegra_pcie_bus *bus; 388 struct tegra_pcie_bus *bus;
388 unsigned int i; 389 unsigned int i;
389 int err; 390 int err;
390 391
391 bus = kzalloc(sizeof(*bus), GFP_KERNEL); 392 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
392 if (!bus) 393 if (!bus)
393 return ERR_PTR(-ENOMEM); 394 return ERR_PTR(-ENOMEM);
394 395
395 INIT_LIST_HEAD(&bus->list); 396 INIT_LIST_HEAD(&bus->list);
396 bus->nr = busnr; 397 bus->nr = busnr;
397 398
398 /* allocate 1 MiB of virtual addresses */ 399 /* allocate 1 MiB of virtual addresses */
399 bus->area = get_vm_area(SZ_1M, VM_IOREMAP); 400 bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
400 if (!bus->area) { 401 if (!bus->area) {
401 err = -ENOMEM; 402 err = -ENOMEM;
402 goto free; 403 goto free;
403 } 404 }
404 405
405 /* map each of the 16 chunks of 64 KiB each */ 406 /* map each of the 16 chunks of 64 KiB each */
406 for (i = 0; i < 16; i++) { 407 for (i = 0; i < 16; i++) {
407 unsigned long virt = (unsigned long)bus->area->addr + 408 unsigned long virt = (unsigned long)bus->area->addr +
408 i * SZ_64K; 409 i * SZ_64K;
409 phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K; 410 phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
410 411
411 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot); 412 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
412 if (err < 0) { 413 if (err < 0) {
413 dev_err(pcie->dev, "ioremap_page_range() failed: %d\n", 414 dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
414 err); 415 err);
415 goto unmap; 416 goto unmap;
416 } 417 }
417 } 418 }
418 419
419 return bus; 420 return bus;
420 421
421 unmap: 422 unmap:
422 vunmap(bus->area->addr); 423 vunmap(bus->area->addr);
423 free: 424 free:
424 kfree(bus); 425 kfree(bus);
425 return ERR_PTR(err); 426 return ERR_PTR(err);
426 } 427 }
427 428
428 /* 429 /*
429 * Look up a virtual address mapping for the specified bus number. If no such 430 * Look up a virtual address mapping for the specified bus number. If no such
430 * mapping exists, try to create one. 431 * mapping exists, try to create one.
431 */ 432 */
432 static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie, 433 static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
433 unsigned int busnr) 434 unsigned int busnr)
434 { 435 {
435 struct tegra_pcie_bus *bus; 436 struct tegra_pcie_bus *bus;
436 437
437 list_for_each_entry(bus, &pcie->buses, list) 438 list_for_each_entry(bus, &pcie->buses, list)
438 if (bus->nr == busnr) 439 if (bus->nr == busnr)
439 return (void __iomem *)bus->area->addr; 440 return (void __iomem *)bus->area->addr;
440 441
441 bus = tegra_pcie_bus_alloc(pcie, busnr); 442 bus = tegra_pcie_bus_alloc(pcie, busnr);
442 if (IS_ERR(bus)) 443 if (IS_ERR(bus))
443 return NULL; 444 return NULL;
444 445
445 list_add_tail(&bus->list, &pcie->buses); 446 list_add_tail(&bus->list, &pcie->buses);
446 447
447 return (void __iomem *)bus->area->addr; 448 return (void __iomem *)bus->area->addr;
448 } 449 }
449 450
450 static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus, 451 static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
451 unsigned int devfn, 452 unsigned int devfn,
452 int where) 453 int where)
453 { 454 {
454 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata); 455 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
455 void __iomem *addr = NULL; 456 void __iomem *addr = NULL;
456 457
457 if (bus->number == 0) { 458 if (bus->number == 0) {
458 unsigned int slot = PCI_SLOT(devfn); 459 unsigned int slot = PCI_SLOT(devfn);
459 struct tegra_pcie_port *port; 460 struct tegra_pcie_port *port;
460 461
461 list_for_each_entry(port, &pcie->ports, list) { 462 list_for_each_entry(port, &pcie->ports, list) {
462 if (port->index + 1 == slot) { 463 if (port->index + 1 == slot) {
463 addr = port->base + (where & ~3); 464 addr = port->base + (where & ~3);
464 break; 465 break;
465 } 466 }
466 } 467 }
467 } else { 468 } else {
468 addr = tegra_pcie_bus_map(pcie, bus->number); 469 addr = tegra_pcie_bus_map(pcie, bus->number);
469 if (!addr) { 470 if (!addr) {
470 dev_err(pcie->dev, 471 dev_err(pcie->dev,
471 "failed to map cfg. space for bus %u\n", 472 "failed to map cfg. space for bus %u\n",
472 bus->number); 473 bus->number);
473 return NULL; 474 return NULL;
474 } 475 }
475 476
476 addr += tegra_pcie_conf_offset(devfn, where); 477 addr += tegra_pcie_conf_offset(devfn, where);
477 } 478 }
478 479
479 return addr; 480 return addr;
480 } 481 }
481 482
482 static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, 483 static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
483 int where, int size, u32 *value) 484 int where, int size, u32 *value)
484 { 485 {
485 void __iomem *addr; 486 void __iomem *addr;
486 487
487 addr = tegra_pcie_conf_address(bus, devfn, where); 488 addr = tegra_pcie_conf_address(bus, devfn, where);
488 if (!addr) { 489 if (!addr) {
489 *value = 0xffffffff; 490 *value = 0xffffffff;
490 return PCIBIOS_DEVICE_NOT_FOUND; 491 return PCIBIOS_DEVICE_NOT_FOUND;
491 } 492 }
492 493
493 *value = readl(addr); 494 *value = readl(addr);
494 495
495 if (size == 1) 496 if (size == 1)
496 *value = (*value >> (8 * (where & 3))) & 0xff; 497 *value = (*value >> (8 * (where & 3))) & 0xff;
497 else if (size == 2) 498 else if (size == 2)
498 *value = (*value >> (8 * (where & 3))) & 0xffff; 499 *value = (*value >> (8 * (where & 3))) & 0xffff;
499 500
500 return PCIBIOS_SUCCESSFUL; 501 return PCIBIOS_SUCCESSFUL;
501 } 502 }
502 503
503 static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn, 504 static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
504 int where, int size, u32 value) 505 int where, int size, u32 value)
505 { 506 {
506 void __iomem *addr; 507 void __iomem *addr;
507 u32 mask, tmp; 508 u32 mask, tmp;
508 509
509 addr = tegra_pcie_conf_address(bus, devfn, where); 510 addr = tegra_pcie_conf_address(bus, devfn, where);
510 if (!addr) 511 if (!addr)
511 return PCIBIOS_DEVICE_NOT_FOUND; 512 return PCIBIOS_DEVICE_NOT_FOUND;
512 513
513 if (size == 4) { 514 if (size == 4) {
514 writel(value, addr); 515 writel(value, addr);
515 return PCIBIOS_SUCCESSFUL; 516 return PCIBIOS_SUCCESSFUL;
516 } 517 }
517 518
518 if (size == 2) 519 if (size == 2)
519 mask = ~(0xffff << ((where & 0x3) * 8)); 520 mask = ~(0xffff << ((where & 0x3) * 8));
520 else if (size == 1) 521 else if (size == 1)
521 mask = ~(0xff << ((where & 0x3) * 8)); 522 mask = ~(0xff << ((where & 0x3) * 8));
522 else 523 else
523 return PCIBIOS_BAD_REGISTER_NUMBER; 524 return PCIBIOS_BAD_REGISTER_NUMBER;
524 525
525 tmp = readl(addr) & mask; 526 tmp = readl(addr) & mask;
526 tmp |= value << ((where & 0x3) * 8); 527 tmp |= value << ((where & 0x3) * 8);
527 writel(tmp, addr); 528 writel(tmp, addr);
528 529
529 return PCIBIOS_SUCCESSFUL; 530 return PCIBIOS_SUCCESSFUL;
530 } 531 }
531 532
532 static struct pci_ops tegra_pcie_ops = { 533 static struct pci_ops tegra_pcie_ops = {
533 .read = tegra_pcie_read_conf, 534 .read = tegra_pcie_read_conf,
534 .write = tegra_pcie_write_conf, 535 .write = tegra_pcie_write_conf,
535 }; 536 };
536 537
537 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port) 538 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
538 { 539 {
539 unsigned long ret = 0; 540 unsigned long ret = 0;
540 541
541 switch (port->index) { 542 switch (port->index) {
542 case 0: 543 case 0:
543 ret = AFI_PEX0_CTRL; 544 ret = AFI_PEX0_CTRL;
544 break; 545 break;
545 546
546 case 1: 547 case 1:
547 ret = AFI_PEX1_CTRL; 548 ret = AFI_PEX1_CTRL;
548 break; 549 break;
549 550
550 case 2: 551 case 2:
551 ret = AFI_PEX2_CTRL; 552 ret = AFI_PEX2_CTRL;
552 break; 553 break;
553 } 554 }
554 555
555 return ret; 556 return ret;
556 } 557 }
557 558
558 static void tegra_pcie_port_reset(struct tegra_pcie_port *port) 559 static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
559 { 560 {
560 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); 561 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
561 unsigned long value; 562 unsigned long value;
562 563
563 /* pulse reset signal */ 564 /* pulse reset signal */
564 value = afi_readl(port->pcie, ctrl); 565 value = afi_readl(port->pcie, ctrl);
565 value &= ~AFI_PEX_CTRL_RST; 566 value &= ~AFI_PEX_CTRL_RST;
566 afi_writel(port->pcie, value, ctrl); 567 afi_writel(port->pcie, value, ctrl);
567 568
568 usleep_range(1000, 2000); 569 usleep_range(1000, 2000);
569 570
570 value = afi_readl(port->pcie, ctrl); 571 value = afi_readl(port->pcie, ctrl);
571 value |= AFI_PEX_CTRL_RST; 572 value |= AFI_PEX_CTRL_RST;
572 afi_writel(port->pcie, value, ctrl); 573 afi_writel(port->pcie, value, ctrl);
573 } 574 }
574 575
575 static void tegra_pcie_port_enable(struct tegra_pcie_port *port) 576 static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
576 { 577 {
577 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data; 578 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
578 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); 579 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
579 unsigned long value; 580 unsigned long value;
580 581
581 /* enable reference clock */ 582 /* enable reference clock */
582 value = afi_readl(port->pcie, ctrl); 583 value = afi_readl(port->pcie, ctrl);
583 value |= AFI_PEX_CTRL_REFCLK_EN; 584 value |= AFI_PEX_CTRL_REFCLK_EN;
584 585
585 if (soc->has_pex_clkreq_en) 586 if (soc->has_pex_clkreq_en)
586 value |= AFI_PEX_CTRL_CLKREQ_EN; 587 value |= AFI_PEX_CTRL_CLKREQ_EN;
587 588
588 value |= AFI_PEX_CTRL_OVERRIDE_EN; 589 value |= AFI_PEX_CTRL_OVERRIDE_EN;
589 590
590 afi_writel(port->pcie, value, ctrl); 591 afi_writel(port->pcie, value, ctrl);
591 592
592 tegra_pcie_port_reset(port); 593 tegra_pcie_port_reset(port);
593 } 594 }
594 595
595 static void tegra_pcie_port_disable(struct tegra_pcie_port *port) 596 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
596 { 597 {
597 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data; 598 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
598 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); 599 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
599 unsigned long value; 600 unsigned long value;
600 601
601 /* assert port reset */ 602 /* assert port reset */
602 value = afi_readl(port->pcie, ctrl); 603 value = afi_readl(port->pcie, ctrl);
603 value &= ~AFI_PEX_CTRL_RST; 604 value &= ~AFI_PEX_CTRL_RST;
604 afi_writel(port->pcie, value, ctrl); 605 afi_writel(port->pcie, value, ctrl);
605 606
606 /* disable reference clock */ 607 /* disable reference clock */
607 value = afi_readl(port->pcie, ctrl); 608 value = afi_readl(port->pcie, ctrl);
608 609
609 if (soc->has_pex_clkreq_en) 610 if (soc->has_pex_clkreq_en)
610 value &= ~AFI_PEX_CTRL_CLKREQ_EN; 611 value &= ~AFI_PEX_CTRL_CLKREQ_EN;
611 612
612 value &= ~AFI_PEX_CTRL_REFCLK_EN; 613 value &= ~AFI_PEX_CTRL_REFCLK_EN;
613 afi_writel(port->pcie, value, ctrl); 614 afi_writel(port->pcie, value, ctrl);
614 } 615 }
615 616
616 static void tegra_pcie_port_free(struct tegra_pcie_port *port) 617 static void tegra_pcie_port_free(struct tegra_pcie_port *port)
617 { 618 {
618 struct tegra_pcie *pcie = port->pcie; 619 struct tegra_pcie *pcie = port->pcie;
619 620
620 devm_iounmap(pcie->dev, port->base); 621 devm_iounmap(pcie->dev, port->base);
621 devm_release_mem_region(pcie->dev, port->regs.start, 622 devm_release_mem_region(pcie->dev, port->regs.start,
622 resource_size(&port->regs)); 623 resource_size(&port->regs));
623 list_del(&port->list); 624 list_del(&port->list);
624 devm_kfree(pcie->dev, port); 625 devm_kfree(pcie->dev, port);
625 } 626 }
626 627
627 static void tegra_pcie_fixup_bridge(struct pci_dev *dev) 628 static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
628 { 629 {
629 u16 reg; 630 u16 reg;
630 631
631 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) { 632 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
632 pci_read_config_word(dev, PCI_COMMAND, &reg); 633 pci_read_config_word(dev, PCI_COMMAND, &reg);
633 reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY | 634 reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
634 PCI_COMMAND_MASTER | PCI_COMMAND_SERR); 635 PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
635 pci_write_config_word(dev, PCI_COMMAND, reg); 636 pci_write_config_word(dev, PCI_COMMAND, reg);
636 } 637 }
637 } 638 }
638 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge); 639 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
639 640
640 /* Tegra PCIE root complex wrongly reports device class */ 641 /* Tegra PCIE root complex wrongly reports device class */
641 static void tegra_pcie_fixup_class(struct pci_dev *dev) 642 static void tegra_pcie_fixup_class(struct pci_dev *dev)
642 { 643 {
643 dev->class = PCI_CLASS_BRIDGE_PCI << 8; 644 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
644 } 645 }
645 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class); 646 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
646 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class); 647 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
647 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class); 648 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
648 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class); 649 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
649 650
650 /* Tegra PCIE requires relaxed ordering */ 651 /* Tegra PCIE requires relaxed ordering */
651 static void tegra_pcie_relax_enable(struct pci_dev *dev) 652 static void tegra_pcie_relax_enable(struct pci_dev *dev)
652 { 653 {
653 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); 654 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
654 } 655 }
655 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable); 656 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
656 657
657 static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) 658 static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
658 { 659 {
659 struct tegra_pcie *pcie = sys_to_pcie(sys); 660 struct tegra_pcie *pcie = sys_to_pcie(sys);
660 int err; 661 int err;
661 phys_addr_t io_start;
662 662
663 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem); 663 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
664 if (err < 0) 664 if (err < 0)
665 return err; 665 return err;
666 666
667 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch); 667 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch);
668 if (err) 668 if (err)
669 return err; 669 return err;
670 670
671 io_start = pci_pio_to_address(pcie->io.start);
672
673 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); 671 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
674 pci_add_resource_offset(&sys->resources, &pcie->prefetch, 672 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
675 sys->mem_offset); 673 sys->mem_offset);
676 pci_add_resource(&sys->resources, &pcie->busn); 674 pci_add_resource(&sys->resources, &pcie->busn);
677 675
678 pci_ioremap_io(nr * SZ_64K, io_start); 676 pci_ioremap_io(pcie->pio.start, pcie->io.start);
679 677
680 return 1; 678 return 1;
681 } 679 }
682 680
683 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin) 681 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
684 { 682 {
685 struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata); 683 struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
686 int irq; 684 int irq;
687 685
688 tegra_cpuidle_pcie_irqs_in_use(); 686 tegra_cpuidle_pcie_irqs_in_use();
689 687
690 irq = of_irq_parse_and_map_pci(pdev, slot, pin); 688 irq = of_irq_parse_and_map_pci(pdev, slot, pin);
691 if (!irq) 689 if (!irq)
692 irq = pcie->irq; 690 irq = pcie->irq;
693 691
694 return irq; 692 return irq;
695 } 693 }
696 694
697 static void tegra_pcie_add_bus(struct pci_bus *bus) 695 static void tegra_pcie_add_bus(struct pci_bus *bus)
698 { 696 {
699 if (IS_ENABLED(CONFIG_PCI_MSI)) { 697 if (IS_ENABLED(CONFIG_PCI_MSI)) {
700 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata); 698 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
701 699
702 bus->msi = &pcie->msi.chip; 700 bus->msi = &pcie->msi.chip;
703 } 701 }
704 } 702 }
705 703
706 static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys) 704 static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
707 { 705 {
708 struct tegra_pcie *pcie = sys_to_pcie(sys); 706 struct tegra_pcie *pcie = sys_to_pcie(sys);
709 struct pci_bus *bus; 707 struct pci_bus *bus;
710 708
711 bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys, 709 bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
712 &sys->resources); 710 &sys->resources);
713 if (!bus) 711 if (!bus)
714 return NULL; 712 return NULL;
715 713
716 pci_scan_child_bus(bus); 714 pci_scan_child_bus(bus);
717 715
718 return bus; 716 return bus;
719 } 717 }
720 718
721 static irqreturn_t tegra_pcie_isr(int irq, void *arg) 719 static irqreturn_t tegra_pcie_isr(int irq, void *arg)
722 { 720 {
723 const char *err_msg[] = { 721 const char *err_msg[] = {
724 "Unknown", 722 "Unknown",
725 "AXI slave error", 723 "AXI slave error",
726 "AXI decode error", 724 "AXI decode error",
727 "Target abort", 725 "Target abort",
728 "Master abort", 726 "Master abort",
729 "Invalid write", 727 "Invalid write",
730 "Legacy interrupt", 728 "Legacy interrupt",
731 "Response decoding error", 729 "Response decoding error",
732 "AXI response decoding error", 730 "AXI response decoding error",
733 "Transaction timeout", 731 "Transaction timeout",
734 "Slot present pin change", 732 "Slot present pin change",
735 "Slot clock request change", 733 "Slot clock request change",
736 "TMS clock ramp change", 734 "TMS clock ramp change",
737 "TMS ready for power down", 735 "TMS ready for power down",
738 "Peer2Peer error", 736 "Peer2Peer error",
739 }; 737 };
740 struct tegra_pcie *pcie = arg; 738 struct tegra_pcie *pcie = arg;
741 u32 code, signature; 739 u32 code, signature;
742 740
743 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK; 741 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
744 signature = afi_readl(pcie, AFI_INTR_SIGNATURE); 742 signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
745 afi_writel(pcie, 0, AFI_INTR_CODE); 743 afi_writel(pcie, 0, AFI_INTR_CODE);
746 744
747 if (code == AFI_INTR_LEGACY) 745 if (code == AFI_INTR_LEGACY)
748 return IRQ_NONE; 746 return IRQ_NONE;
749 747
750 if (code >= ARRAY_SIZE(err_msg)) 748 if (code >= ARRAY_SIZE(err_msg))
751 code = 0; 749 code = 0;
752 750
753 /* 751 /*
754 * do not pollute kernel log with master abort reports since they 752 * do not pollute kernel log with master abort reports since they
755 * happen a lot during enumeration 753 * happen a lot during enumeration
756 */ 754 */
757 if (code == AFI_INTR_MASTER_ABORT) 755 if (code == AFI_INTR_MASTER_ABORT)
758 dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code], 756 dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
759 signature); 757 signature);
760 else 758 else
761 dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code], 759 dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
762 signature); 760 signature);
763 761
764 if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT || 762 if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
765 code == AFI_INTR_FPCI_DECODE_ERROR) { 763 code == AFI_INTR_FPCI_DECODE_ERROR) {
766 u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff; 764 u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
767 u64 address = (u64)fpci << 32 | (signature & 0xfffffffc); 765 u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
768 766
769 if (code == AFI_INTR_MASTER_ABORT) 767 if (code == AFI_INTR_MASTER_ABORT)
770 dev_dbg(pcie->dev, " FPCI address: %10llx\n", address); 768 dev_dbg(pcie->dev, " FPCI address: %10llx\n", address);
771 else 769 else
772 dev_err(pcie->dev, " FPCI address: %10llx\n", address); 770 dev_err(pcie->dev, " FPCI address: %10llx\n", address);
773 } 771 }
774 772
775 return IRQ_HANDLED; 773 return IRQ_HANDLED;
776 } 774 }
777 775
778 /* 776 /*
779 * FPCI map is as follows: 777 * FPCI map is as follows:
780 * - 0xfdfc000000: I/O space 778 * - 0xfdfc000000: I/O space
781 * - 0xfdfe000000: type 0 configuration space 779 * - 0xfdfe000000: type 0 configuration space
782 * - 0xfdff000000: type 1 configuration space 780 * - 0xfdff000000: type 1 configuration space
783 * - 0xfe00000000: type 0 extended configuration space 781 * - 0xfe00000000: type 0 extended configuration space
784 * - 0xfe10000000: type 1 extended configuration space 782 * - 0xfe10000000: type 1 extended configuration space
785 */ 783 */
786 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie) 784 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
787 { 785 {
788 u32 fpci_bar, size, axi_address; 786 u32 fpci_bar, size, axi_address;
789 phys_addr_t io_start = pci_pio_to_address(pcie->io.start);
790 787
791 /* Bar 0: type 1 extended configuration space */ 788 /* Bar 0: type 1 extended configuration space */
792 fpci_bar = 0xfe100000; 789 fpci_bar = 0xfe100000;
793 size = resource_size(pcie->cs); 790 size = resource_size(pcie->cs);
794 axi_address = pcie->cs->start; 791 axi_address = pcie->cs->start;
795 afi_writel(pcie, axi_address, AFI_AXI_BAR0_START); 792 afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
796 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ); 793 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
797 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0); 794 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
798 795
799 /* Bar 1: downstream IO bar */ 796 /* Bar 1: downstream IO bar */
800 fpci_bar = 0xfdfc0000; 797 fpci_bar = 0xfdfc0000;
801 size = resource_size(&pcie->io); 798 size = resource_size(&pcie->io);
802 axi_address = io_start; 799 axi_address = pcie->io.start;
803 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START); 800 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
804 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ); 801 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
805 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1); 802 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
806 803
807 /* Bar 2: prefetchable memory BAR */ 804 /* Bar 2: prefetchable memory BAR */
808 fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1; 805 fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
809 size = resource_size(&pcie->prefetch); 806 size = resource_size(&pcie->prefetch);
810 axi_address = pcie->prefetch.start; 807 axi_address = pcie->prefetch.start;
811 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START); 808 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
812 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ); 809 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
813 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2); 810 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
814 811
815 /* Bar 3: non prefetchable memory BAR */ 812 /* Bar 3: non prefetchable memory BAR */
816 fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1; 813 fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
817 size = resource_size(&pcie->mem); 814 size = resource_size(&pcie->mem);
818 axi_address = pcie->mem.start; 815 axi_address = pcie->mem.start;
819 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START); 816 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
820 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ); 817 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
821 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3); 818 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
822 819
823 /* NULL out the remaining BARs as they are not used */ 820 /* NULL out the remaining BARs as they are not used */
824 afi_writel(pcie, 0, AFI_AXI_BAR4_START); 821 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
825 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ); 822 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
826 afi_writel(pcie, 0, AFI_FPCI_BAR4); 823 afi_writel(pcie, 0, AFI_FPCI_BAR4);
827 824
828 afi_writel(pcie, 0, AFI_AXI_BAR5_START); 825 afi_writel(pcie, 0, AFI_AXI_BAR5_START);
829 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ); 826 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
830 afi_writel(pcie, 0, AFI_FPCI_BAR5); 827 afi_writel(pcie, 0, AFI_FPCI_BAR5);
831 828
832 /* map all upstream transactions as uncached */ 829 /* map all upstream transactions as uncached */
833 afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST); 830 afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
834 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ); 831 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
835 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST); 832 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
836 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ); 833 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
837 834
838 /* MSI translations are setup only when needed */ 835 /* MSI translations are setup only when needed */
839 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST); 836 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
840 afi_writel(pcie, 0, AFI_MSI_BAR_SZ); 837 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
841 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST); 838 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
842 afi_writel(pcie, 0, AFI_MSI_BAR_SZ); 839 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
843 } 840 }
844 841
845 static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout) 842 static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
846 { 843 {
847 const struct tegra_pcie_soc_data *soc = pcie->soc_data; 844 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
848 u32 value; 845 u32 value;
849 846
850 timeout = jiffies + msecs_to_jiffies(timeout); 847 timeout = jiffies + msecs_to_jiffies(timeout);
851 848
852 while (time_before(jiffies, timeout)) { 849 while (time_before(jiffies, timeout)) {
853 value = pads_readl(pcie, soc->pads_pll_ctl); 850 value = pads_readl(pcie, soc->pads_pll_ctl);
854 if (value & PADS_PLL_CTL_LOCKDET) 851 if (value & PADS_PLL_CTL_LOCKDET)
855 return 0; 852 return 0;
856 } 853 }
857 854
858 return -ETIMEDOUT; 855 return -ETIMEDOUT;
859 } 856 }
860 857
861 static int tegra_pcie_phy_enable(struct tegra_pcie *pcie) 858 static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
862 { 859 {
863 const struct tegra_pcie_soc_data *soc = pcie->soc_data; 860 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
864 u32 value; 861 u32 value;
865 int err; 862 int err;
866 863
867 /* initialize internal PHY, enable up to 16 PCIE lanes */ 864 /* initialize internal PHY, enable up to 16 PCIE lanes */
868 pads_writel(pcie, 0x0, PADS_CTL_SEL); 865 pads_writel(pcie, 0x0, PADS_CTL_SEL);
869 866
870 /* override IDDQ to 1 on all 4 lanes */ 867 /* override IDDQ to 1 on all 4 lanes */
871 value = pads_readl(pcie, PADS_CTL); 868 value = pads_readl(pcie, PADS_CTL);
872 value |= PADS_CTL_IDDQ_1L; 869 value |= PADS_CTL_IDDQ_1L;
873 pads_writel(pcie, value, PADS_CTL); 870 pads_writel(pcie, value, PADS_CTL);
874 871
875 /* 872 /*
876 * Set up PHY PLL inputs select PLLE output as refclock, 873 * Set up PHY PLL inputs select PLLE output as refclock,
877 * set TX ref sel to div10 (not div5). 874 * set TX ref sel to div10 (not div5).
878 */ 875 */
879 value = pads_readl(pcie, soc->pads_pll_ctl); 876 value = pads_readl(pcie, soc->pads_pll_ctl);
880 value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK); 877 value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
881 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel; 878 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
882 pads_writel(pcie, value, soc->pads_pll_ctl); 879 pads_writel(pcie, value, soc->pads_pll_ctl);
883 880
884 /* reset PLL */ 881 /* reset PLL */
885 value = pads_readl(pcie, soc->pads_pll_ctl); 882 value = pads_readl(pcie, soc->pads_pll_ctl);
886 value &= ~PADS_PLL_CTL_RST_B4SM; 883 value &= ~PADS_PLL_CTL_RST_B4SM;
887 pads_writel(pcie, value, soc->pads_pll_ctl); 884 pads_writel(pcie, value, soc->pads_pll_ctl);
888 885
889 usleep_range(20, 100); 886 usleep_range(20, 100);
890 887
891 /* take PLL out of reset */ 888 /* take PLL out of reset */
892 value = pads_readl(pcie, soc->pads_pll_ctl); 889 value = pads_readl(pcie, soc->pads_pll_ctl);
893 value |= PADS_PLL_CTL_RST_B4SM; 890 value |= PADS_PLL_CTL_RST_B4SM;
894 pads_writel(pcie, value, soc->pads_pll_ctl); 891 pads_writel(pcie, value, soc->pads_pll_ctl);
895 892
896 /* Configure the reference clock driver */ 893 /* Configure the reference clock driver */
897 value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16); 894 value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
898 pads_writel(pcie, value, PADS_REFCLK_CFG0); 895 pads_writel(pcie, value, PADS_REFCLK_CFG0);
899 if (soc->num_ports > 2) 896 if (soc->num_ports > 2)
900 pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1); 897 pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
901 898
902 /* wait for the PLL to lock */ 899 /* wait for the PLL to lock */
903 err = tegra_pcie_pll_wait(pcie, 500); 900 err = tegra_pcie_pll_wait(pcie, 500);
904 if (err < 0) { 901 if (err < 0) {
905 dev_err(pcie->dev, "PLL failed to lock: %d\n", err); 902 dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
906 return err; 903 return err;
907 } 904 }
908 905
909 /* turn off IDDQ override */ 906 /* turn off IDDQ override */
910 value = pads_readl(pcie, PADS_CTL); 907 value = pads_readl(pcie, PADS_CTL);
911 value &= ~PADS_CTL_IDDQ_1L; 908 value &= ~PADS_CTL_IDDQ_1L;
912 pads_writel(pcie, value, PADS_CTL); 909 pads_writel(pcie, value, PADS_CTL);
913 910
914 /* enable TX/RX data */ 911 /* enable TX/RX data */
915 value = pads_readl(pcie, PADS_CTL); 912 value = pads_readl(pcie, PADS_CTL);
916 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L; 913 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
917 pads_writel(pcie, value, PADS_CTL); 914 pads_writel(pcie, value, PADS_CTL);
918 915
919 return 0; 916 return 0;
920 } 917 }
921 918
922 static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) 919 static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
923 { 920 {
924 const struct tegra_pcie_soc_data *soc = pcie->soc_data; 921 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
925 struct tegra_pcie_port *port; 922 struct tegra_pcie_port *port;
926 unsigned long value; 923 unsigned long value;
927 int err; 924 int err;
928 925
929 /* enable PLL power down */ 926 /* enable PLL power down */
930 if (pcie->phy) { 927 if (pcie->phy) {
931 value = afi_readl(pcie, AFI_PLLE_CONTROL); 928 value = afi_readl(pcie, AFI_PLLE_CONTROL);
932 value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL; 929 value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
933 value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN; 930 value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
934 afi_writel(pcie, value, AFI_PLLE_CONTROL); 931 afi_writel(pcie, value, AFI_PLLE_CONTROL);
935 } 932 }
936 933
937 /* power down PCIe slot clock bias pad */ 934 /* power down PCIe slot clock bias pad */
938 if (soc->has_pex_bias_ctrl) 935 if (soc->has_pex_bias_ctrl)
939 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0); 936 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
940 937
941 /* configure mode and disable all ports */ 938 /* configure mode and disable all ports */
942 value = afi_readl(pcie, AFI_PCIE_CONFIG); 939 value = afi_readl(pcie, AFI_PCIE_CONFIG);
943 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK; 940 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
944 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config; 941 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
945 942
946 list_for_each_entry(port, &pcie->ports, list) 943 list_for_each_entry(port, &pcie->ports, list)
947 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index); 944 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
948 945
949 afi_writel(pcie, value, AFI_PCIE_CONFIG); 946 afi_writel(pcie, value, AFI_PCIE_CONFIG);
950 947
951 if (soc->has_gen2) { 948 if (soc->has_gen2) {
952 value = afi_readl(pcie, AFI_FUSE); 949 value = afi_readl(pcie, AFI_FUSE);
953 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS; 950 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
954 afi_writel(pcie, value, AFI_FUSE); 951 afi_writel(pcie, value, AFI_FUSE);
955 } else { 952 } else {
956 value = afi_readl(pcie, AFI_FUSE); 953 value = afi_readl(pcie, AFI_FUSE);
957 value |= AFI_FUSE_PCIE_T0_GEN2_DIS; 954 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
958 afi_writel(pcie, value, AFI_FUSE); 955 afi_writel(pcie, value, AFI_FUSE);
959 } 956 }
960 957
961 if (!pcie->phy) 958 if (!pcie->phy)
962 err = tegra_pcie_phy_enable(pcie); 959 err = tegra_pcie_phy_enable(pcie);
963 else 960 else
964 err = phy_power_on(pcie->phy); 961 err = phy_power_on(pcie->phy);
965 962
966 if (err < 0) { 963 if (err < 0) {
967 dev_err(pcie->dev, "failed to power on PHY: %d\n", err); 964 dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
968 return err; 965 return err;
969 } 966 }
970 967
971 /* take the PCIe interface module out of reset */ 968 /* take the PCIe interface module out of reset */
972 reset_control_deassert(pcie->pcie_xrst); 969 reset_control_deassert(pcie->pcie_xrst);
973 970
974 /* finally enable PCIe */ 971 /* finally enable PCIe */
975 value = afi_readl(pcie, AFI_CONFIGURATION); 972 value = afi_readl(pcie, AFI_CONFIGURATION);
976 value |= AFI_CONFIGURATION_EN_FPCI; 973 value |= AFI_CONFIGURATION_EN_FPCI;
977 afi_writel(pcie, value, AFI_CONFIGURATION); 974 afi_writel(pcie, value, AFI_CONFIGURATION);
978 975
979 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR | 976 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
980 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR | 977 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
981 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR; 978 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
982 979
983 if (soc->has_intr_prsnt_sense) 980 if (soc->has_intr_prsnt_sense)
984 value |= AFI_INTR_EN_PRSNT_SENSE; 981 value |= AFI_INTR_EN_PRSNT_SENSE;
985 982
986 afi_writel(pcie, value, AFI_AFI_INTR_ENABLE); 983 afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
987 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE); 984 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
988 985
989 /* don't enable MSI for now, only when needed */ 986 /* don't enable MSI for now, only when needed */
990 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK); 987 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
991 988
992 /* disable all exceptions */ 989 /* disable all exceptions */
993 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS); 990 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
994 991
995 return 0; 992 return 0;
996 } 993 }
997 994
998 static void tegra_pcie_power_off(struct tegra_pcie *pcie) 995 static void tegra_pcie_power_off(struct tegra_pcie *pcie)
999 { 996 {
1000 int err; 997 int err;
1001 998
1002 /* TODO: disable and unprepare clocks? */ 999 /* TODO: disable and unprepare clocks? */
1003 1000
1004 err = phy_power_off(pcie->phy); 1001 err = phy_power_off(pcie->phy);
1005 if (err < 0) 1002 if (err < 0)
1006 dev_warn(pcie->dev, "failed to power off PHY: %d\n", err); 1003 dev_warn(pcie->dev, "failed to power off PHY: %d\n", err);
1007 1004
1008 reset_control_assert(pcie->pcie_xrst); 1005 reset_control_assert(pcie->pcie_xrst);
1009 reset_control_assert(pcie->afi_rst); 1006 reset_control_assert(pcie->afi_rst);
1010 reset_control_assert(pcie->pex_rst); 1007 reset_control_assert(pcie->pex_rst);
1011 1008
1012 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); 1009 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1013 1010
1014 err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies); 1011 err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1015 if (err < 0) 1012 if (err < 0)
1016 dev_warn(pcie->dev, "failed to disable regulators: %d\n", err); 1013 dev_warn(pcie->dev, "failed to disable regulators: %d\n", err);
1017 } 1014 }
1018 1015
1019 static int tegra_pcie_power_on(struct tegra_pcie *pcie) 1016 static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1020 { 1017 {
1021 const struct tegra_pcie_soc_data *soc = pcie->soc_data; 1018 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1022 int err; 1019 int err;
1023 1020
1024 reset_control_assert(pcie->pcie_xrst); 1021 reset_control_assert(pcie->pcie_xrst);
1025 reset_control_assert(pcie->afi_rst); 1022 reset_control_assert(pcie->afi_rst);
1026 reset_control_assert(pcie->pex_rst); 1023 reset_control_assert(pcie->pex_rst);
1027 1024
1028 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); 1025 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1029 1026
1030 /* enable regulators */ 1027 /* enable regulators */
1031 err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies); 1028 err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1032 if (err < 0) 1029 if (err < 0)
1033 dev_err(pcie->dev, "failed to enable regulators: %d\n", err); 1030 dev_err(pcie->dev, "failed to enable regulators: %d\n", err);
1034 1031
1035 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE, 1032 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
1036 pcie->pex_clk, 1033 pcie->pex_clk,
1037 pcie->pex_rst); 1034 pcie->pex_rst);
1038 if (err) { 1035 if (err) {
1039 dev_err(pcie->dev, "powerup sequence failed: %d\n", err); 1036 dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
1040 return err; 1037 return err;
1041 } 1038 }
1042 1039
1043 reset_control_deassert(pcie->afi_rst); 1040 reset_control_deassert(pcie->afi_rst);
1044 1041
1045 err = clk_prepare_enable(pcie->afi_clk); 1042 err = clk_prepare_enable(pcie->afi_clk);
1046 if (err < 0) { 1043 if (err < 0) {
1047 dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err); 1044 dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
1048 return err; 1045 return err;
1049 } 1046 }
1050 1047
1051 if (soc->has_cml_clk) { 1048 if (soc->has_cml_clk) {
1052 err = clk_prepare_enable(pcie->cml_clk); 1049 err = clk_prepare_enable(pcie->cml_clk);
1053 if (err < 0) { 1050 if (err < 0) {
1054 dev_err(pcie->dev, "failed to enable CML clock: %d\n", 1051 dev_err(pcie->dev, "failed to enable CML clock: %d\n",
1055 err); 1052 err);
1056 return err; 1053 return err;
1057 } 1054 }
1058 } 1055 }
1059 1056
1060 err = clk_prepare_enable(pcie->pll_e); 1057 err = clk_prepare_enable(pcie->pll_e);
1061 if (err < 0) { 1058 if (err < 0) {
1062 dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err); 1059 dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
1063 return err; 1060 return err;
1064 } 1061 }
1065 1062
1066 return 0; 1063 return 0;
1067 } 1064 }
1068 1065
1069 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie) 1066 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1070 { 1067 {
1071 const struct tegra_pcie_soc_data *soc = pcie->soc_data; 1068 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1072 1069
1073 pcie->pex_clk = devm_clk_get(pcie->dev, "pex"); 1070 pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
1074 if (IS_ERR(pcie->pex_clk)) 1071 if (IS_ERR(pcie->pex_clk))
1075 return PTR_ERR(pcie->pex_clk); 1072 return PTR_ERR(pcie->pex_clk);
1076 1073
1077 pcie->afi_clk = devm_clk_get(pcie->dev, "afi"); 1074 pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
1078 if (IS_ERR(pcie->afi_clk)) 1075 if (IS_ERR(pcie->afi_clk))
1079 return PTR_ERR(pcie->afi_clk); 1076 return PTR_ERR(pcie->afi_clk);
1080 1077
1081 pcie->pll_e = devm_clk_get(pcie->dev, "pll_e"); 1078 pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
1082 if (IS_ERR(pcie->pll_e)) 1079 if (IS_ERR(pcie->pll_e))
1083 return PTR_ERR(pcie->pll_e); 1080 return PTR_ERR(pcie->pll_e);
1084 1081
1085 if (soc->has_cml_clk) { 1082 if (soc->has_cml_clk) {
1086 pcie->cml_clk = devm_clk_get(pcie->dev, "cml"); 1083 pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
1087 if (IS_ERR(pcie->cml_clk)) 1084 if (IS_ERR(pcie->cml_clk))
1088 return PTR_ERR(pcie->cml_clk); 1085 return PTR_ERR(pcie->cml_clk);
1089 } 1086 }
1090 1087
1091 return 0; 1088 return 0;
1092 } 1089 }
1093 1090
1094 static int tegra_pcie_resets_get(struct tegra_pcie *pcie) 1091 static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1095 { 1092 {
1096 pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex"); 1093 pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
1097 if (IS_ERR(pcie->pex_rst)) 1094 if (IS_ERR(pcie->pex_rst))
1098 return PTR_ERR(pcie->pex_rst); 1095 return PTR_ERR(pcie->pex_rst);
1099 1096
1100 pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi"); 1097 pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
1101 if (IS_ERR(pcie->afi_rst)) 1098 if (IS_ERR(pcie->afi_rst))
1102 return PTR_ERR(pcie->afi_rst); 1099 return PTR_ERR(pcie->afi_rst);
1103 1100
1104 pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x"); 1101 pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
1105 if (IS_ERR(pcie->pcie_xrst)) 1102 if (IS_ERR(pcie->pcie_xrst))
1106 return PTR_ERR(pcie->pcie_xrst); 1103 return PTR_ERR(pcie->pcie_xrst);
1107 1104
1108 return 0; 1105 return 0;
1109 } 1106 }
1110 1107
1111 static int tegra_pcie_get_resources(struct tegra_pcie *pcie) 1108 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1112 { 1109 {
1113 struct platform_device *pdev = to_platform_device(pcie->dev); 1110 struct platform_device *pdev = to_platform_device(pcie->dev);
1114 struct resource *pads, *afi, *res; 1111 struct resource *pads, *afi, *res;
1115 int err; 1112 int err;
1116 1113
1117 err = tegra_pcie_clocks_get(pcie); 1114 err = tegra_pcie_clocks_get(pcie);
1118 if (err) { 1115 if (err) {
1119 dev_err(&pdev->dev, "failed to get clocks: %d\n", err); 1116 dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
1120 return err; 1117 return err;
1121 } 1118 }
1122 1119
1123 err = tegra_pcie_resets_get(pcie); 1120 err = tegra_pcie_resets_get(pcie);
1124 if (err) { 1121 if (err) {
1125 dev_err(&pdev->dev, "failed to get resets: %d\n", err); 1122 dev_err(&pdev->dev, "failed to get resets: %d\n", err);
1126 return err; 1123 return err;
1127 } 1124 }
1128 1125
1129 pcie->phy = devm_phy_optional_get(pcie->dev, "pcie"); 1126 pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
1130 if (IS_ERR(pcie->phy)) { 1127 if (IS_ERR(pcie->phy)) {
1131 err = PTR_ERR(pcie->phy); 1128 err = PTR_ERR(pcie->phy);
1132 dev_err(&pdev->dev, "failed to get PHY: %d\n", err); 1129 dev_err(&pdev->dev, "failed to get PHY: %d\n", err);
1133 return err; 1130 return err;
1134 } 1131 }
1135 1132
1136 err = phy_init(pcie->phy); 1133 err = phy_init(pcie->phy);
1137 if (err < 0) { 1134 if (err < 0) {
1138 dev_err(&pdev->dev, "failed to initialize PHY: %d\n", err); 1135 dev_err(&pdev->dev, "failed to initialize PHY: %d\n", err);
1139 return err; 1136 return err;
1140 } 1137 }
1141 1138
1142 err = tegra_pcie_power_on(pcie); 1139 err = tegra_pcie_power_on(pcie);
1143 if (err) { 1140 if (err) {
1144 dev_err(&pdev->dev, "failed to power up: %d\n", err); 1141 dev_err(&pdev->dev, "failed to power up: %d\n", err);
1145 return err; 1142 return err;
1146 } 1143 }
1147 1144
1148 pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads"); 1145 pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1149 pcie->pads = devm_ioremap_resource(&pdev->dev, pads); 1146 pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
1150 if (IS_ERR(pcie->pads)) { 1147 if (IS_ERR(pcie->pads)) {
1151 err = PTR_ERR(pcie->pads); 1148 err = PTR_ERR(pcie->pads);
1152 goto poweroff; 1149 goto poweroff;
1153 } 1150 }
1154 1151
1155 afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi"); 1152 afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1156 pcie->afi = devm_ioremap_resource(&pdev->dev, afi); 1153 pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
1157 if (IS_ERR(pcie->afi)) { 1154 if (IS_ERR(pcie->afi)) {
1158 err = PTR_ERR(pcie->afi); 1155 err = PTR_ERR(pcie->afi);
1159 goto poweroff; 1156 goto poweroff;
1160 } 1157 }
1161 1158
1162 /* request configuration space, but remap later, on demand */ 1159 /* request configuration space, but remap later, on demand */
1163 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs"); 1160 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1164 if (!res) { 1161 if (!res) {
1165 err = -EADDRNOTAVAIL; 1162 err = -EADDRNOTAVAIL;
1166 goto poweroff; 1163 goto poweroff;
1167 } 1164 }
1168 1165
1169 pcie->cs = devm_request_mem_region(pcie->dev, res->start, 1166 pcie->cs = devm_request_mem_region(pcie->dev, res->start,
1170 resource_size(res), res->name); 1167 resource_size(res), res->name);
1171 if (!pcie->cs) { 1168 if (!pcie->cs) {
1172 err = -EADDRNOTAVAIL; 1169 err = -EADDRNOTAVAIL;
1173 goto poweroff; 1170 goto poweroff;
1174 } 1171 }
1175 1172
1176 /* request interrupt */ 1173 /* request interrupt */
1177 err = platform_get_irq_byname(pdev, "intr"); 1174 err = platform_get_irq_byname(pdev, "intr");
1178 if (err < 0) { 1175 if (err < 0) {
1179 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err); 1176 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1180 goto poweroff; 1177 goto poweroff;
1181 } 1178 }
1182 1179
1183 pcie->irq = err; 1180 pcie->irq = err;
1184 1181
1185 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie); 1182 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1186 if (err) { 1183 if (err) {
1187 dev_err(&pdev->dev, "failed to register IRQ: %d\n", err); 1184 dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
1188 goto poweroff; 1185 goto poweroff;
1189 } 1186 }
1190 1187
1191 return 0; 1188 return 0;
1192 1189
1193 poweroff: 1190 poweroff:
1194 tegra_pcie_power_off(pcie); 1191 tegra_pcie_power_off(pcie);
1195 return err; 1192 return err;
1196 } 1193 }
1197 1194
1198 static int tegra_pcie_put_resources(struct tegra_pcie *pcie) 1195 static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1199 { 1196 {
1200 int err; 1197 int err;
1201 1198
1202 if (pcie->irq > 0) 1199 if (pcie->irq > 0)
1203 free_irq(pcie->irq, pcie); 1200 free_irq(pcie->irq, pcie);
1204 1201
1205 tegra_pcie_power_off(pcie); 1202 tegra_pcie_power_off(pcie);
1206 1203
1207 err = phy_exit(pcie->phy); 1204 err = phy_exit(pcie->phy);
1208 if (err < 0) 1205 if (err < 0)
1209 dev_err(pcie->dev, "failed to teardown PHY: %d\n", err); 1206 dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
1210 1207
1211 return 0; 1208 return 0;
1212 } 1209 }
1213 1210
1214 static int tegra_msi_alloc(struct tegra_msi *chip) 1211 static int tegra_msi_alloc(struct tegra_msi *chip)
1215 { 1212 {
1216 int msi; 1213 int msi;
1217 1214
1218 mutex_lock(&chip->lock); 1215 mutex_lock(&chip->lock);
1219 1216
1220 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR); 1217 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1221 if (msi < INT_PCI_MSI_NR) 1218 if (msi < INT_PCI_MSI_NR)
1222 set_bit(msi, chip->used); 1219 set_bit(msi, chip->used);
1223 else 1220 else
1224 msi = -ENOSPC; 1221 msi = -ENOSPC;
1225 1222
1226 mutex_unlock(&chip->lock); 1223 mutex_unlock(&chip->lock);
1227 1224
1228 return msi; 1225 return msi;
1229 } 1226 }
1230 1227
1231 static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq) 1228 static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1232 { 1229 {
1233 struct device *dev = chip->chip.dev; 1230 struct device *dev = chip->chip.dev;
1234 1231
1235 mutex_lock(&chip->lock); 1232 mutex_lock(&chip->lock);
1236 1233
1237 if (!test_bit(irq, chip->used)) 1234 if (!test_bit(irq, chip->used))
1238 dev_err(dev, "trying to free unused MSI#%lu\n", irq); 1235 dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1239 else 1236 else
1240 clear_bit(irq, chip->used); 1237 clear_bit(irq, chip->used);
1241 1238
1242 mutex_unlock(&chip->lock); 1239 mutex_unlock(&chip->lock);
1243 } 1240 }
1244 1241
1245 static irqreturn_t tegra_pcie_msi_irq(int irq, void *data) 1242 static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1246 { 1243 {
1247 struct tegra_pcie *pcie = data; 1244 struct tegra_pcie *pcie = data;
1248 struct tegra_msi *msi = &pcie->msi; 1245 struct tegra_msi *msi = &pcie->msi;
1249 unsigned int i, processed = 0; 1246 unsigned int i, processed = 0;
1250 1247
1251 for (i = 0; i < 8; i++) { 1248 for (i = 0; i < 8; i++) {
1252 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4); 1249 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1253 1250
1254 while (reg) { 1251 while (reg) {
1255 unsigned int offset = find_first_bit(&reg, 32); 1252 unsigned int offset = find_first_bit(&reg, 32);
1256 unsigned int index = i * 32 + offset; 1253 unsigned int index = i * 32 + offset;
1257 unsigned int irq; 1254 unsigned int irq;
1258 1255
1259 /* clear the interrupt */ 1256 /* clear the interrupt */
1260 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4); 1257 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1261 1258
1262 irq = irq_find_mapping(msi->domain, index); 1259 irq = irq_find_mapping(msi->domain, index);
1263 if (irq) { 1260 if (irq) {
1264 if (test_bit(index, msi->used)) 1261 if (test_bit(index, msi->used))
1265 generic_handle_irq(irq); 1262 generic_handle_irq(irq);
1266 else 1263 else
1267 dev_info(pcie->dev, "unhandled MSI\n"); 1264 dev_info(pcie->dev, "unhandled MSI\n");
1268 } else { 1265 } else {
1269 /* 1266 /*
1270 * that's weird who triggered this? 1267 * that's weird who triggered this?
1271 * just clear it 1268 * just clear it
1272 */ 1269 */
1273 dev_info(pcie->dev, "unexpected MSI\n"); 1270 dev_info(pcie->dev, "unexpected MSI\n");
1274 } 1271 }
1275 1272
1276 /* see if there's any more pending in this vector */ 1273 /* see if there's any more pending in this vector */
1277 reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4); 1274 reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1278 1275
1279 processed++; 1276 processed++;
1280 } 1277 }
1281 } 1278 }
1282 1279
1283 return processed > 0 ? IRQ_HANDLED : IRQ_NONE; 1280 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1284 } 1281 }
1285 1282
1286 static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, 1283 static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
1287 struct msi_desc *desc) 1284 struct msi_desc *desc)
1288 { 1285 {
1289 struct tegra_msi *msi = to_tegra_msi(chip); 1286 struct tegra_msi *msi = to_tegra_msi(chip);
1290 struct msi_msg msg; 1287 struct msi_msg msg;
1291 unsigned int irq; 1288 unsigned int irq;
1292 int hwirq; 1289 int hwirq;
1293 1290
1294 hwirq = tegra_msi_alloc(msi); 1291 hwirq = tegra_msi_alloc(msi);
1295 if (hwirq < 0) 1292 if (hwirq < 0)
1296 return hwirq; 1293 return hwirq;
1297 1294
1298 irq = irq_create_mapping(msi->domain, hwirq); 1295 irq = irq_create_mapping(msi->domain, hwirq);
1299 if (!irq) { 1296 if (!irq) {
1300 tegra_msi_free(msi, hwirq); 1297 tegra_msi_free(msi, hwirq);
1301 return -EINVAL; 1298 return -EINVAL;
1302 } 1299 }
1303 1300
1304 irq_set_msi_desc(irq, desc); 1301 irq_set_msi_desc(irq, desc);
1305 1302
1306 msg.address_lo = virt_to_phys((void *)msi->pages); 1303 msg.address_lo = virt_to_phys((void *)msi->pages);
1307 /* 32 bit address only */ 1304 /* 32 bit address only */
1308 msg.address_hi = 0; 1305 msg.address_hi = 0;
1309 msg.data = hwirq; 1306 msg.data = hwirq;
1310 1307
1311 write_msi_msg(irq, &msg); 1308 write_msi_msg(irq, &msg);
1312 1309
1313 return 0; 1310 return 0;
1314 } 1311 }
1315 1312
1316 static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) 1313 static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
1317 { 1314 {
1318 struct tegra_msi *msi = to_tegra_msi(chip); 1315 struct tegra_msi *msi = to_tegra_msi(chip);
1319 struct irq_data *d = irq_get_irq_data(irq); 1316 struct irq_data *d = irq_get_irq_data(irq);
1320 irq_hw_number_t hwirq = irqd_to_hwirq(d); 1317 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1321 1318
1322 irq_dispose_mapping(irq); 1319 irq_dispose_mapping(irq);
1323 tegra_msi_free(msi, hwirq); 1320 tegra_msi_free(msi, hwirq);
1324 } 1321 }
1325 1322
1326 static struct irq_chip tegra_msi_irq_chip = { 1323 static struct irq_chip tegra_msi_irq_chip = {
1327 .name = "Tegra PCIe MSI", 1324 .name = "Tegra PCIe MSI",
1328 .irq_enable = unmask_msi_irq, 1325 .irq_enable = unmask_msi_irq,
1329 .irq_disable = mask_msi_irq, 1326 .irq_disable = mask_msi_irq,
1330 .irq_mask = mask_msi_irq, 1327 .irq_mask = mask_msi_irq,
1331 .irq_unmask = unmask_msi_irq, 1328 .irq_unmask = unmask_msi_irq,
1332 }; 1329 };
1333 1330
1334 static int tegra_msi_map(struct irq_domain *domain, unsigned int irq, 1331 static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1335 irq_hw_number_t hwirq) 1332 irq_hw_number_t hwirq)
1336 { 1333 {
1337 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq); 1334 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1338 irq_set_chip_data(irq, domain->host_data); 1335 irq_set_chip_data(irq, domain->host_data);
1339 set_irq_flags(irq, IRQF_VALID); 1336 set_irq_flags(irq, IRQF_VALID);
1340 1337
1341 tegra_cpuidle_pcie_irqs_in_use(); 1338 tegra_cpuidle_pcie_irqs_in_use();
1342 1339
1343 return 0; 1340 return 0;
1344 } 1341 }
1345 1342
1346 static const struct irq_domain_ops msi_domain_ops = { 1343 static const struct irq_domain_ops msi_domain_ops = {
1347 .map = tegra_msi_map, 1344 .map = tegra_msi_map,
1348 }; 1345 };
1349 1346
1350 static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) 1347 static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1351 { 1348 {
1352 struct platform_device *pdev = to_platform_device(pcie->dev); 1349 struct platform_device *pdev = to_platform_device(pcie->dev);
1353 const struct tegra_pcie_soc_data *soc = pcie->soc_data; 1350 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1354 struct tegra_msi *msi = &pcie->msi; 1351 struct tegra_msi *msi = &pcie->msi;
1355 unsigned long base; 1352 unsigned long base;
1356 int err; 1353 int err;
1357 u32 reg; 1354 u32 reg;
1358 1355
1359 mutex_init(&msi->lock); 1356 mutex_init(&msi->lock);
1360 1357
1361 msi->chip.dev = pcie->dev; 1358 msi->chip.dev = pcie->dev;
1362 msi->chip.setup_irq = tegra_msi_setup_irq; 1359 msi->chip.setup_irq = tegra_msi_setup_irq;
1363 msi->chip.teardown_irq = tegra_msi_teardown_irq; 1360 msi->chip.teardown_irq = tegra_msi_teardown_irq;
1364 1361
1365 msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR, 1362 msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
1366 &msi_domain_ops, &msi->chip); 1363 &msi_domain_ops, &msi->chip);
1367 if (!msi->domain) { 1364 if (!msi->domain) {
1368 dev_err(&pdev->dev, "failed to create IRQ domain\n"); 1365 dev_err(&pdev->dev, "failed to create IRQ domain\n");
1369 return -ENOMEM; 1366 return -ENOMEM;
1370 } 1367 }
1371 1368
1372 err = platform_get_irq_byname(pdev, "msi"); 1369 err = platform_get_irq_byname(pdev, "msi");
1373 if (err < 0) { 1370 if (err < 0) {
1374 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err); 1371 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1375 goto err; 1372 goto err;
1376 } 1373 }
1377 1374
1378 msi->irq = err; 1375 msi->irq = err;
1379 1376
1380 err = request_irq(msi->irq, tegra_pcie_msi_irq, 0, 1377 err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
1381 tegra_msi_irq_chip.name, pcie); 1378 tegra_msi_irq_chip.name, pcie);
1382 if (err < 0) { 1379 if (err < 0) {
1383 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err); 1380 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
1384 goto err; 1381 goto err;
1385 } 1382 }
1386 1383
1387 /* setup AFI/FPCI range */ 1384 /* setup AFI/FPCI range */
1388 msi->pages = __get_free_pages(GFP_KERNEL, 0); 1385 msi->pages = __get_free_pages(GFP_KERNEL, 0);
1389 base = virt_to_phys((void *)msi->pages); 1386 base = virt_to_phys((void *)msi->pages);
1390 1387
1391 afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); 1388 afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1392 afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST); 1389 afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
1393 /* this register is in 4K increments */ 1390 /* this register is in 4K increments */
1394 afi_writel(pcie, 1, AFI_MSI_BAR_SZ); 1391 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1395 1392
1396 /* enable all MSI vectors */ 1393 /* enable all MSI vectors */
1397 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0); 1394 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1398 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1); 1395 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1399 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2); 1396 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1400 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3); 1397 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1401 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4); 1398 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1402 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5); 1399 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1403 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6); 1400 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1404 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7); 1401 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1405 1402
1406 /* and unmask the MSI interrupt */ 1403 /* and unmask the MSI interrupt */
1407 reg = afi_readl(pcie, AFI_INTR_MASK); 1404 reg = afi_readl(pcie, AFI_INTR_MASK);
1408 reg |= AFI_INTR_MASK_MSI_MASK; 1405 reg |= AFI_INTR_MASK_MSI_MASK;
1409 afi_writel(pcie, reg, AFI_INTR_MASK); 1406 afi_writel(pcie, reg, AFI_INTR_MASK);
1410 1407
1411 return 0; 1408 return 0;
1412 1409
1413 err: 1410 err:
1414 irq_domain_remove(msi->domain); 1411 irq_domain_remove(msi->domain);
1415 return err; 1412 return err;
1416 } 1413 }
1417 1414
1418 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie) 1415 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1419 { 1416 {
1420 struct tegra_msi *msi = &pcie->msi; 1417 struct tegra_msi *msi = &pcie->msi;
1421 unsigned int i, irq; 1418 unsigned int i, irq;
1422 u32 value; 1419 u32 value;
1423 1420
1424 /* mask the MSI interrupt */ 1421 /* mask the MSI interrupt */
1425 value = afi_readl(pcie, AFI_INTR_MASK); 1422 value = afi_readl(pcie, AFI_INTR_MASK);
1426 value &= ~AFI_INTR_MASK_MSI_MASK; 1423 value &= ~AFI_INTR_MASK_MSI_MASK;
1427 afi_writel(pcie, value, AFI_INTR_MASK); 1424 afi_writel(pcie, value, AFI_INTR_MASK);
1428 1425
1429 /* disable all MSI vectors */ 1426 /* disable all MSI vectors */
1430 afi_writel(pcie, 0, AFI_MSI_EN_VEC0); 1427 afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1431 afi_writel(pcie, 0, AFI_MSI_EN_VEC1); 1428 afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1432 afi_writel(pcie, 0, AFI_MSI_EN_VEC2); 1429 afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1433 afi_writel(pcie, 0, AFI_MSI_EN_VEC3); 1430 afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1434 afi_writel(pcie, 0, AFI_MSI_EN_VEC4); 1431 afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1435 afi_writel(pcie, 0, AFI_MSI_EN_VEC5); 1432 afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1436 afi_writel(pcie, 0, AFI_MSI_EN_VEC6); 1433 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1437 afi_writel(pcie, 0, AFI_MSI_EN_VEC7); 1434 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1438 1435
1439 free_pages(msi->pages, 0); 1436 free_pages(msi->pages, 0);
1440 1437
1441 if (msi->irq > 0) 1438 if (msi->irq > 0)
1442 free_irq(msi->irq, pcie); 1439 free_irq(msi->irq, pcie);
1443 1440
1444 for (i = 0; i < INT_PCI_MSI_NR; i++) { 1441 for (i = 0; i < INT_PCI_MSI_NR; i++) {
1445 irq = irq_find_mapping(msi->domain, i); 1442 irq = irq_find_mapping(msi->domain, i);
1446 if (irq > 0) 1443 if (irq > 0)
1447 irq_dispose_mapping(irq); 1444 irq_dispose_mapping(irq);
1448 } 1445 }
1449 1446
1450 irq_domain_remove(msi->domain); 1447 irq_domain_remove(msi->domain);
1451 1448
1452 return 0; 1449 return 0;
1453 } 1450 }
1454 1451
1455 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes, 1452 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1456 u32 *xbar) 1453 u32 *xbar)
1457 { 1454 {
1458 struct device_node *np = pcie->dev->of_node; 1455 struct device_node *np = pcie->dev->of_node;
1459 1456
1460 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) { 1457 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1461 switch (lanes) { 1458 switch (lanes) {
1462 case 0x0000104: 1459 case 0x0000104:
1463 dev_info(pcie->dev, "4x1, 1x1 configuration\n"); 1460 dev_info(pcie->dev, "4x1, 1x1 configuration\n");
1464 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1; 1461 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1465 return 0; 1462 return 0;
1466 1463
1467 case 0x0000102: 1464 case 0x0000102:
1468 dev_info(pcie->dev, "2x1, 1x1 configuration\n"); 1465 dev_info(pcie->dev, "2x1, 1x1 configuration\n");
1469 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1; 1466 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1470 return 0; 1467 return 0;
1471 } 1468 }
1472 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { 1469 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1473 switch (lanes) { 1470 switch (lanes) {
1474 case 0x00000204: 1471 case 0x00000204:
1475 dev_info(pcie->dev, "4x1, 2x1 configuration\n"); 1472 dev_info(pcie->dev, "4x1, 2x1 configuration\n");
1476 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420; 1473 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1477 return 0; 1474 return 0;
1478 1475
1479 case 0x00020202: 1476 case 0x00020202:
1480 dev_info(pcie->dev, "2x3 configuration\n"); 1477 dev_info(pcie->dev, "2x3 configuration\n");
1481 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222; 1478 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1482 return 0; 1479 return 0;
1483 1480
1484 case 0x00010104: 1481 case 0x00010104:
1485 dev_info(pcie->dev, "4x1, 1x2 configuration\n"); 1482 dev_info(pcie->dev, "4x1, 1x2 configuration\n");
1486 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411; 1483 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1487 return 0; 1484 return 0;
1488 } 1485 }
1489 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) { 1486 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1490 switch (lanes) { 1487 switch (lanes) {
1491 case 0x00000004: 1488 case 0x00000004:
1492 dev_info(pcie->dev, "single-mode configuration\n"); 1489 dev_info(pcie->dev, "single-mode configuration\n");
1493 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE; 1490 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1494 return 0; 1491 return 0;
1495 1492
1496 case 0x00000202: 1493 case 0x00000202:
1497 dev_info(pcie->dev, "dual-mode configuration\n"); 1494 dev_info(pcie->dev, "dual-mode configuration\n");
1498 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL; 1495 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1499 return 0; 1496 return 0;
1500 } 1497 }
1501 } 1498 }
1502 1499
1503 return -EINVAL; 1500 return -EINVAL;
1504 } 1501 }
1505 1502
1506 /* 1503 /*
1507 * Check whether a given set of supplies is available in a device tree node. 1504 * Check whether a given set of supplies is available in a device tree node.
1508 * This is used to check whether the new or the legacy device tree bindings 1505 * This is used to check whether the new or the legacy device tree bindings
1509 * should be used. 1506 * should be used.
1510 */ 1507 */
1511 static bool of_regulator_bulk_available(struct device_node *np, 1508 static bool of_regulator_bulk_available(struct device_node *np,
1512 struct regulator_bulk_data *supplies, 1509 struct regulator_bulk_data *supplies,
1513 unsigned int num_supplies) 1510 unsigned int num_supplies)
1514 { 1511 {
1515 char property[32]; 1512 char property[32];
1516 unsigned int i; 1513 unsigned int i;
1517 1514
1518 for (i = 0; i < num_supplies; i++) { 1515 for (i = 0; i < num_supplies; i++) {
1519 snprintf(property, 32, "%s-supply", supplies[i].supply); 1516 snprintf(property, 32, "%s-supply", supplies[i].supply);
1520 1517
1521 if (of_find_property(np, property, NULL) == NULL) 1518 if (of_find_property(np, property, NULL) == NULL)
1522 return false; 1519 return false;
1523 } 1520 }
1524 1521
1525 return true; 1522 return true;
1526 } 1523 }
1527 1524
1528 /* 1525 /*
1529 * Old versions of the device tree binding for this device used a set of power 1526 * Old versions of the device tree binding for this device used a set of power
1530 * supplies that didn't match the hardware inputs. This happened to work for a 1527 * supplies that didn't match the hardware inputs. This happened to work for a
1531 * number of cases but is not future proof. However to preserve backwards- 1528 * number of cases but is not future proof. However to preserve backwards-
1532 * compatibility with old device trees, this function will try to use the old 1529 * compatibility with old device trees, this function will try to use the old
1533 * set of supplies. 1530 * set of supplies.
1534 */ 1531 */
1535 static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie) 1532 static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1536 { 1533 {
1537 struct device_node *np = pcie->dev->of_node; 1534 struct device_node *np = pcie->dev->of_node;
1538 1535
1539 if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) 1536 if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1540 pcie->num_supplies = 3; 1537 pcie->num_supplies = 3;
1541 else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) 1538 else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1542 pcie->num_supplies = 2; 1539 pcie->num_supplies = 2;
1543 1540
1544 if (pcie->num_supplies == 0) { 1541 if (pcie->num_supplies == 0) {
1545 dev_err(pcie->dev, "device %s not supported in legacy mode\n", 1542 dev_err(pcie->dev, "device %s not supported in legacy mode\n",
1546 np->full_name); 1543 np->full_name);
1547 return -ENODEV; 1544 return -ENODEV;
1548 } 1545 }
1549 1546
1550 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, 1547 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1551 sizeof(*pcie->supplies), 1548 sizeof(*pcie->supplies),
1552 GFP_KERNEL); 1549 GFP_KERNEL);
1553 if (!pcie->supplies) 1550 if (!pcie->supplies)
1554 return -ENOMEM; 1551 return -ENOMEM;
1555 1552
1556 pcie->supplies[0].supply = "pex-clk"; 1553 pcie->supplies[0].supply = "pex-clk";
1557 pcie->supplies[1].supply = "vdd"; 1554 pcie->supplies[1].supply = "vdd";
1558 1555
1559 if (pcie->num_supplies > 2) 1556 if (pcie->num_supplies > 2)
1560 pcie->supplies[2].supply = "avdd"; 1557 pcie->supplies[2].supply = "avdd";
1561 1558
1562 return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies, 1559 return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
1563 pcie->supplies); 1560 pcie->supplies);
1564 } 1561 }
1565 1562
1566 /* 1563 /*
1567 * Obtains the list of regulators required for a particular generation of the 1564 * Obtains the list of regulators required for a particular generation of the
1568 * IP block. 1565 * IP block.
1569 * 1566 *
1570 * This would've been nice to do simply by providing static tables for use 1567 * This would've been nice to do simply by providing static tables for use
1571 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky 1568 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1572 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB) 1569 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1573 * and either seems to be optional depending on which ports are being used. 1570 * and either seems to be optional depending on which ports are being used.
1574 */ 1571 */
1575 static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) 1572 static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1576 { 1573 {
1577 struct device_node *np = pcie->dev->of_node; 1574 struct device_node *np = pcie->dev->of_node;
1578 unsigned int i = 0; 1575 unsigned int i = 0;
1579 1576
1580 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) { 1577 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1581 pcie->num_supplies = 7; 1578 pcie->num_supplies = 7;
1582 1579
1583 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, 1580 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1584 sizeof(*pcie->supplies), 1581 sizeof(*pcie->supplies),
1585 GFP_KERNEL); 1582 GFP_KERNEL);
1586 if (!pcie->supplies) 1583 if (!pcie->supplies)
1587 return -ENOMEM; 1584 return -ENOMEM;
1588 1585
1589 pcie->supplies[i++].supply = "avddio-pex"; 1586 pcie->supplies[i++].supply = "avddio-pex";
1590 pcie->supplies[i++].supply = "dvddio-pex"; 1587 pcie->supplies[i++].supply = "dvddio-pex";
1591 pcie->supplies[i++].supply = "avdd-pex-pll"; 1588 pcie->supplies[i++].supply = "avdd-pex-pll";
1592 pcie->supplies[i++].supply = "hvdd-pex"; 1589 pcie->supplies[i++].supply = "hvdd-pex";
1593 pcie->supplies[i++].supply = "hvdd-pex-pll-e"; 1590 pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1594 pcie->supplies[i++].supply = "vddio-pex-ctl"; 1591 pcie->supplies[i++].supply = "vddio-pex-ctl";
1595 pcie->supplies[i++].supply = "avdd-pll-erefe"; 1592 pcie->supplies[i++].supply = "avdd-pll-erefe";
1596 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { 1593 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1597 bool need_pexa = false, need_pexb = false; 1594 bool need_pexa = false, need_pexb = false;
1598 1595
1599 /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */ 1596 /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
1600 if (lane_mask & 0x0f) 1597 if (lane_mask & 0x0f)
1601 need_pexa = true; 1598 need_pexa = true;
1602 1599
1603 /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */ 1600 /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
1604 if (lane_mask & 0x30) 1601 if (lane_mask & 0x30)
1605 need_pexb = true; 1602 need_pexb = true;
1606 1603
1607 pcie->num_supplies = 4 + (need_pexa ? 2 : 0) + 1604 pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
1608 (need_pexb ? 2 : 0); 1605 (need_pexb ? 2 : 0);
1609 1606
1610 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, 1607 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1611 sizeof(*pcie->supplies), 1608 sizeof(*pcie->supplies),
1612 GFP_KERNEL); 1609 GFP_KERNEL);
1613 if (!pcie->supplies) 1610 if (!pcie->supplies)
1614 return -ENOMEM; 1611 return -ENOMEM;
1615 1612
1616 pcie->supplies[i++].supply = "avdd-pex-pll"; 1613 pcie->supplies[i++].supply = "avdd-pex-pll";
1617 pcie->supplies[i++].supply = "hvdd-pex"; 1614 pcie->supplies[i++].supply = "hvdd-pex";
1618 pcie->supplies[i++].supply = "vddio-pex-ctl"; 1615 pcie->supplies[i++].supply = "vddio-pex-ctl";
1619 pcie->supplies[i++].supply = "avdd-plle"; 1616 pcie->supplies[i++].supply = "avdd-plle";
1620 1617
1621 if (need_pexa) { 1618 if (need_pexa) {
1622 pcie->supplies[i++].supply = "avdd-pexa"; 1619 pcie->supplies[i++].supply = "avdd-pexa";
1623 pcie->supplies[i++].supply = "vdd-pexa"; 1620 pcie->supplies[i++].supply = "vdd-pexa";
1624 } 1621 }
1625 1622
1626 if (need_pexb) { 1623 if (need_pexb) {
1627 pcie->supplies[i++].supply = "avdd-pexb"; 1624 pcie->supplies[i++].supply = "avdd-pexb";
1628 pcie->supplies[i++].supply = "vdd-pexb"; 1625 pcie->supplies[i++].supply = "vdd-pexb";
1629 } 1626 }
1630 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) { 1627 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1631 pcie->num_supplies = 5; 1628 pcie->num_supplies = 5;
1632 1629
1633 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, 1630 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1634 sizeof(*pcie->supplies), 1631 sizeof(*pcie->supplies),
1635 GFP_KERNEL); 1632 GFP_KERNEL);
1636 if (!pcie->supplies) 1633 if (!pcie->supplies)
1637 return -ENOMEM; 1634 return -ENOMEM;
1638 1635
1639 pcie->supplies[0].supply = "avdd-pex"; 1636 pcie->supplies[0].supply = "avdd-pex";
1640 pcie->supplies[1].supply = "vdd-pex"; 1637 pcie->supplies[1].supply = "vdd-pex";
1641 pcie->supplies[2].supply = "avdd-pex-pll"; 1638 pcie->supplies[2].supply = "avdd-pex-pll";
1642 pcie->supplies[3].supply = "avdd-plle"; 1639 pcie->supplies[3].supply = "avdd-plle";
1643 pcie->supplies[4].supply = "vddio-pex-clk"; 1640 pcie->supplies[4].supply = "vddio-pex-clk";
1644 } 1641 }
1645 1642
1646 if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies, 1643 if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies,
1647 pcie->num_supplies)) 1644 pcie->num_supplies))
1648 return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies, 1645 return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
1649 pcie->supplies); 1646 pcie->supplies);
1650 1647
1651 /* 1648 /*
1652 * If not all regulators are available for this new scheme, assume 1649 * If not all regulators are available for this new scheme, assume
1653 * that the device tree complies with an older version of the device 1650 * that the device tree complies with an older version of the device
1654 * tree binding. 1651 * tree binding.
1655 */ 1652 */
1656 dev_info(pcie->dev, "using legacy DT binding for power supplies\n"); 1653 dev_info(pcie->dev, "using legacy DT binding for power supplies\n");
1657 1654
1658 devm_kfree(pcie->dev, pcie->supplies); 1655 devm_kfree(pcie->dev, pcie->supplies);
1659 pcie->num_supplies = 0; 1656 pcie->num_supplies = 0;
1660 1657
1661 return tegra_pcie_get_legacy_regulators(pcie); 1658 return tegra_pcie_get_legacy_regulators(pcie);
1662 } 1659 }
1663 1660
1664 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) 1661 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1665 { 1662 {
1666 const struct tegra_pcie_soc_data *soc = pcie->soc_data; 1663 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1667 struct device_node *np = pcie->dev->of_node, *port; 1664 struct device_node *np = pcie->dev->of_node, *port;
1668 struct of_pci_range_parser parser; 1665 struct of_pci_range_parser parser;
1669 struct of_pci_range range; 1666 struct of_pci_range range;
1670 u32 lanes = 0, mask = 0; 1667 u32 lanes = 0, mask = 0;
1671 unsigned int lane = 0; 1668 unsigned int lane = 0;
1672 struct resource res; 1669 struct resource res;
1673 int err; 1670 int err;
1674 1671
1675 memset(&pcie->all, 0, sizeof(pcie->all)); 1672 memset(&pcie->all, 0, sizeof(pcie->all));
1676 pcie->all.flags = IORESOURCE_MEM; 1673 pcie->all.flags = IORESOURCE_MEM;
1677 pcie->all.name = np->full_name; 1674 pcie->all.name = np->full_name;
1678 pcie->all.start = ~0; 1675 pcie->all.start = ~0;
1679 pcie->all.end = 0; 1676 pcie->all.end = 0;
1680 1677
1681 if (of_pci_range_parser_init(&parser, np)) { 1678 if (of_pci_range_parser_init(&parser, np)) {
1682 dev_err(pcie->dev, "missing \"ranges\" property\n"); 1679 dev_err(pcie->dev, "missing \"ranges\" property\n");
1683 return -EINVAL; 1680 return -EINVAL;
1684 } 1681 }
1685 1682
1686 for_each_of_pci_range(&parser, &range) { 1683 for_each_of_pci_range(&parser, &range) {
1687 err = of_pci_range_to_resource(&range, np, &res); 1684 err = of_pci_range_to_resource(&range, np, &res);
1688 if (err < 0) 1685 if (err < 0)
1689 return err; 1686 return err;
1690 1687
1691 switch (res.flags & IORESOURCE_TYPE_BITS) { 1688 switch (res.flags & IORESOURCE_TYPE_BITS) {
1692 case IORESOURCE_IO: 1689 case IORESOURCE_IO:
1693 memcpy(&pcie->io, &res, sizeof(res)); 1690 memcpy(&pcie->pio, &res, sizeof(res));
1694 pcie->io.name = np->full_name; 1691 pcie->pio.name = np->full_name;
1692
1693 /*
1694 * The Tegra PCIe host bridge uses this to program the
1695 * mapping of the I/O space to the physical address,
1696 * so we override the .start and .end fields here that
1697 * of_pci_range_to_resource() converted to I/O space.
1698 * We also set the IORESOURCE_MEM type to clarify that
1699 * the resource is in the physical memory space.
1700 */
1701 pcie->io.start = range.cpu_addr;
1702 pcie->io.end = range.cpu_addr + range.size - 1;
1703 pcie->io.flags = IORESOURCE_MEM;
1704 pcie->io.name = "I/O";
1705
1706 memcpy(&res, &pcie->io, sizeof(res));
1695 break; 1707 break;
1696 1708
1697 case IORESOURCE_MEM: 1709 case IORESOURCE_MEM:
1698 if (res.flags & IORESOURCE_PREFETCH) { 1710 if (res.flags & IORESOURCE_PREFETCH) {
1699 memcpy(&pcie->prefetch, &res, sizeof(res)); 1711 memcpy(&pcie->prefetch, &res, sizeof(res));
1700 pcie->prefetch.name = "prefetchable"; 1712 pcie->prefetch.name = "prefetchable";
1701 } else { 1713 } else {
1702 memcpy(&pcie->mem, &res, sizeof(res)); 1714 memcpy(&pcie->mem, &res, sizeof(res));
1703 pcie->mem.name = "non-prefetchable"; 1715 pcie->mem.name = "non-prefetchable";
1704 } 1716 }
1705 break; 1717 break;
1706 } 1718 }
1707 1719
1708 if (res.start <= pcie->all.start) 1720 if (res.start <= pcie->all.start)
1709 pcie->all.start = res.start; 1721 pcie->all.start = res.start;
1710 1722
1711 if (res.end >= pcie->all.end) 1723 if (res.end >= pcie->all.end)
1712 pcie->all.end = res.end; 1724 pcie->all.end = res.end;
1713 } 1725 }
1714 1726
1715 err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all); 1727 err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all);
1716 if (err < 0) 1728 if (err < 0)
1717 return err; 1729 return err;
1718 1730
1719 err = of_pci_parse_bus_range(np, &pcie->busn); 1731 err = of_pci_parse_bus_range(np, &pcie->busn);
1720 if (err < 0) { 1732 if (err < 0) {
1721 dev_err(pcie->dev, "failed to parse ranges property: %d\n", 1733 dev_err(pcie->dev, "failed to parse ranges property: %d\n",
1722 err); 1734 err);
1723 pcie->busn.name = np->name; 1735 pcie->busn.name = np->name;
1724 pcie->busn.start = 0; 1736 pcie->busn.start = 0;
1725 pcie->busn.end = 0xff; 1737 pcie->busn.end = 0xff;
1726 pcie->busn.flags = IORESOURCE_BUS; 1738 pcie->busn.flags = IORESOURCE_BUS;
1727 } 1739 }
1728 1740
1729 /* parse root ports */ 1741 /* parse root ports */
1730 for_each_child_of_node(np, port) { 1742 for_each_child_of_node(np, port) {
1731 struct tegra_pcie_port *rp; 1743 struct tegra_pcie_port *rp;
1732 unsigned int index; 1744 unsigned int index;
1733 u32 value; 1745 u32 value;
1734 1746
1735 err = of_pci_get_devfn(port); 1747 err = of_pci_get_devfn(port);
1736 if (err < 0) { 1748 if (err < 0) {
1737 dev_err(pcie->dev, "failed to parse address: %d\n", 1749 dev_err(pcie->dev, "failed to parse address: %d\n",
1738 err); 1750 err);
1739 return err; 1751 return err;
1740 } 1752 }
1741 1753
1742 index = PCI_SLOT(err); 1754 index = PCI_SLOT(err);
1743 1755
1744 if (index < 1 || index > soc->num_ports) { 1756 if (index < 1 || index > soc->num_ports) {
1745 dev_err(pcie->dev, "invalid port number: %d\n", index); 1757 dev_err(pcie->dev, "invalid port number: %d\n", index);
1746 return -EINVAL; 1758 return -EINVAL;
1747 } 1759 }
1748 1760
1749 index--; 1761 index--;
1750 1762
1751 err = of_property_read_u32(port, "nvidia,num-lanes", &value); 1763 err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1752 if (err < 0) { 1764 if (err < 0) {
1753 dev_err(pcie->dev, "failed to parse # of lanes: %d\n", 1765 dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
1754 err); 1766 err);
1755 return err; 1767 return err;
1756 } 1768 }
1757 1769
1758 if (value > 16) { 1770 if (value > 16) {
1759 dev_err(pcie->dev, "invalid # of lanes: %u\n", value); 1771 dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
1760 return -EINVAL; 1772 return -EINVAL;
1761 } 1773 }
1762 1774
1763 lanes |= value << (index << 3); 1775 lanes |= value << (index << 3);
1764 1776
1765 if (!of_device_is_available(port)) { 1777 if (!of_device_is_available(port)) {
1766 lane += value; 1778 lane += value;
1767 continue; 1779 continue;
1768 } 1780 }
1769 1781
1770 mask |= ((1 << value) - 1) << lane; 1782 mask |= ((1 << value) - 1) << lane;
1771 lane += value; 1783 lane += value;
1772 1784
1773 rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL); 1785 rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
1774 if (!rp) 1786 if (!rp)
1775 return -ENOMEM; 1787 return -ENOMEM;
1776 1788
1777 err = of_address_to_resource(port, 0, &rp->regs); 1789 err = of_address_to_resource(port, 0, &rp->regs);
1778 if (err < 0) { 1790 if (err < 0) {
1779 dev_err(pcie->dev, "failed to parse address: %d\n", 1791 dev_err(pcie->dev, "failed to parse address: %d\n",
1780 err); 1792 err);
1781 return err; 1793 return err;
1782 } 1794 }
1783 1795
1784 INIT_LIST_HEAD(&rp->list); 1796 INIT_LIST_HEAD(&rp->list);
1785 rp->index = index; 1797 rp->index = index;
1786 rp->lanes = value; 1798 rp->lanes = value;
1787 rp->pcie = pcie; 1799 rp->pcie = pcie;
1788 1800
1789 rp->base = devm_ioremap_resource(pcie->dev, &rp->regs); 1801 rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
1790 if (IS_ERR(rp->base)) 1802 if (IS_ERR(rp->base))
1791 return PTR_ERR(rp->base); 1803 return PTR_ERR(rp->base);
1792 1804
1793 list_add_tail(&rp->list, &pcie->ports); 1805 list_add_tail(&rp->list, &pcie->ports);
1794 } 1806 }
1795 1807
1796 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config); 1808 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
1797 if (err < 0) { 1809 if (err < 0) {
1798 dev_err(pcie->dev, "invalid lane configuration\n"); 1810 dev_err(pcie->dev, "invalid lane configuration\n");
1799 return err; 1811 return err;
1800 } 1812 }
1801 1813
1802 err = tegra_pcie_get_regulators(pcie, mask); 1814 err = tegra_pcie_get_regulators(pcie, mask);
1803 if (err < 0) 1815 if (err < 0)
1804 return err; 1816 return err;
1805 1817
1806 return 0; 1818 return 0;
1807 } 1819 }
1808 1820
1809 /* 1821 /*
1810 * FIXME: If there are no PCIe cards attached, then calling this function 1822 * FIXME: If there are no PCIe cards attached, then calling this function
1811 * can result in the increase of the bootup time as there are big timeout 1823 * can result in the increase of the bootup time as there are big timeout
1812 * loops. 1824 * loops.
1813 */ 1825 */
1814 #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */ 1826 #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
1815 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port) 1827 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1816 { 1828 {
1817 unsigned int retries = 3; 1829 unsigned int retries = 3;
1818 unsigned long value; 1830 unsigned long value;
1819 1831
1820 /* override presence detection */ 1832 /* override presence detection */
1821 value = readl(port->base + RP_PRIV_MISC); 1833 value = readl(port->base + RP_PRIV_MISC);
1822 value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT; 1834 value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
1823 value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT; 1835 value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
1824 writel(value, port->base + RP_PRIV_MISC); 1836 writel(value, port->base + RP_PRIV_MISC);
1825 1837
1826 do { 1838 do {
1827 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT; 1839 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1828 1840
1829 do { 1841 do {
1830 value = readl(port->base + RP_VEND_XP); 1842 value = readl(port->base + RP_VEND_XP);
1831 1843
1832 if (value & RP_VEND_XP_DL_UP) 1844 if (value & RP_VEND_XP_DL_UP)
1833 break; 1845 break;
1834 1846
1835 usleep_range(1000, 2000); 1847 usleep_range(1000, 2000);
1836 } while (--timeout); 1848 } while (--timeout);
1837 1849
1838 if (!timeout) { 1850 if (!timeout) {
1839 dev_err(port->pcie->dev, "link %u down, retrying\n", 1851 dev_err(port->pcie->dev, "link %u down, retrying\n",
1840 port->index); 1852 port->index);
1841 goto retry; 1853 goto retry;
1842 } 1854 }
1843 1855
1844 timeout = TEGRA_PCIE_LINKUP_TIMEOUT; 1856 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1845 1857
1846 do { 1858 do {
1847 value = readl(port->base + RP_LINK_CONTROL_STATUS); 1859 value = readl(port->base + RP_LINK_CONTROL_STATUS);
1848 1860
1849 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE) 1861 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1850 return true; 1862 return true;
1851 1863
1852 usleep_range(1000, 2000); 1864 usleep_range(1000, 2000);
1853 } while (--timeout); 1865 } while (--timeout);
1854 1866
1855 retry: 1867 retry:
1856 tegra_pcie_port_reset(port); 1868 tegra_pcie_port_reset(port);
1857 } while (--retries); 1869 } while (--retries);
1858 1870
1859 return false; 1871 return false;
1860 } 1872 }
1861 1873
1862 static int tegra_pcie_enable(struct tegra_pcie *pcie) 1874 static int tegra_pcie_enable(struct tegra_pcie *pcie)
1863 { 1875 {
1864 struct tegra_pcie_port *port, *tmp; 1876 struct tegra_pcie_port *port, *tmp;
1865 struct hw_pci hw; 1877 struct hw_pci hw;
1866 1878
1867 list_for_each_entry_safe(port, tmp, &pcie->ports, list) { 1879 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1868 dev_info(pcie->dev, "probing port %u, using %u lanes\n", 1880 dev_info(pcie->dev, "probing port %u, using %u lanes\n",
1869 port->index, port->lanes); 1881 port->index, port->lanes);
1870 1882
1871 tegra_pcie_port_enable(port); 1883 tegra_pcie_port_enable(port);
1872 1884
1873 if (tegra_pcie_port_check_link(port)) 1885 if (tegra_pcie_port_check_link(port))
1874 continue; 1886 continue;
1875 1887
1876 dev_info(pcie->dev, "link %u down, ignoring\n", port->index); 1888 dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
1877 1889
1878 tegra_pcie_port_disable(port); 1890 tegra_pcie_port_disable(port);
1879 tegra_pcie_port_free(port); 1891 tegra_pcie_port_free(port);
1880 } 1892 }
1881 1893
1882 memset(&hw, 0, sizeof(hw)); 1894 memset(&hw, 0, sizeof(hw));
1883 1895
1884 hw.nr_controllers = 1; 1896 hw.nr_controllers = 1;
1885 hw.private_data = (void **)&pcie; 1897 hw.private_data = (void **)&pcie;
1886 hw.setup = tegra_pcie_setup; 1898 hw.setup = tegra_pcie_setup;
1887 hw.map_irq = tegra_pcie_map_irq; 1899 hw.map_irq = tegra_pcie_map_irq;
1888 hw.add_bus = tegra_pcie_add_bus; 1900 hw.add_bus = tegra_pcie_add_bus;
1889 hw.scan = tegra_pcie_scan_bus; 1901 hw.scan = tegra_pcie_scan_bus;
1890 hw.ops = &tegra_pcie_ops; 1902 hw.ops = &tegra_pcie_ops;
1891 1903
1892 pci_common_init_dev(pcie->dev, &hw); 1904 pci_common_init_dev(pcie->dev, &hw);
1893 1905
1894 return 0; 1906 return 0;
1895 } 1907 }
1896 1908
1897 static const struct tegra_pcie_soc_data tegra20_pcie_data = { 1909 static const struct tegra_pcie_soc_data tegra20_pcie_data = {
1898 .num_ports = 2, 1910 .num_ports = 2,
1899 .msi_base_shift = 0, 1911 .msi_base_shift = 0,
1900 .pads_pll_ctl = PADS_PLL_CTL_TEGRA20, 1912 .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
1901 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10, 1913 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
1902 .has_pex_clkreq_en = false, 1914 .has_pex_clkreq_en = false,
1903 .has_pex_bias_ctrl = false, 1915 .has_pex_bias_ctrl = false,
1904 .has_intr_prsnt_sense = false, 1916 .has_intr_prsnt_sense = false,
1905 .has_cml_clk = false, 1917 .has_cml_clk = false,
1906 .has_gen2 = false, 1918 .has_gen2 = false,
1907 }; 1919 };
1908 1920
1909 static const struct tegra_pcie_soc_data tegra30_pcie_data = { 1921 static const struct tegra_pcie_soc_data tegra30_pcie_data = {
1910 .num_ports = 3, 1922 .num_ports = 3,
1911 .msi_base_shift = 8, 1923 .msi_base_shift = 8,
1912 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, 1924 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1913 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, 1925 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1914 .has_pex_clkreq_en = true, 1926 .has_pex_clkreq_en = true,
1915 .has_pex_bias_ctrl = true, 1927 .has_pex_bias_ctrl = true,
1916 .has_intr_prsnt_sense = true, 1928 .has_intr_prsnt_sense = true,
1917 .has_cml_clk = true, 1929 .has_cml_clk = true,
1918 .has_gen2 = false, 1930 .has_gen2 = false,
1919 }; 1931 };
1920 1932
1921 static const struct tegra_pcie_soc_data tegra124_pcie_data = { 1933 static const struct tegra_pcie_soc_data tegra124_pcie_data = {
1922 .num_ports = 2, 1934 .num_ports = 2,
1923 .msi_base_shift = 8, 1935 .msi_base_shift = 8,
1924 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, 1936 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1925 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, 1937 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1926 .has_pex_clkreq_en = true, 1938 .has_pex_clkreq_en = true,
1927 .has_pex_bias_ctrl = true, 1939 .has_pex_bias_ctrl = true,
1928 .has_intr_prsnt_sense = true, 1940 .has_intr_prsnt_sense = true,
1929 .has_cml_clk = true, 1941 .has_cml_clk = true,
1930 .has_gen2 = true, 1942 .has_gen2 = true,
1931 }; 1943 };
1932 1944
1933 static const struct of_device_id tegra_pcie_of_match[] = { 1945 static const struct of_device_id tegra_pcie_of_match[] = {
1934 { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data }, 1946 { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data },
1935 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data }, 1947 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
1936 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data }, 1948 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
1937 { }, 1949 { },
1938 }; 1950 };
1939 MODULE_DEVICE_TABLE(of, tegra_pcie_of_match); 1951 MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
1940 1952
1941 static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos) 1953 static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
1942 { 1954 {
1943 struct tegra_pcie *pcie = s->private; 1955 struct tegra_pcie *pcie = s->private;
1944 1956
1945 if (list_empty(&pcie->ports)) 1957 if (list_empty(&pcie->ports))
1946 return NULL; 1958 return NULL;
1947 1959
1948 seq_printf(s, "Index Status\n"); 1960 seq_printf(s, "Index Status\n");
1949 1961
1950 return seq_list_start(&pcie->ports, *pos); 1962 return seq_list_start(&pcie->ports, *pos);
1951 } 1963 }
1952 1964
1953 static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos) 1965 static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
1954 { 1966 {
1955 struct tegra_pcie *pcie = s->private; 1967 struct tegra_pcie *pcie = s->private;
1956 1968
1957 return seq_list_next(v, &pcie->ports, pos); 1969 return seq_list_next(v, &pcie->ports, pos);
1958 } 1970 }
1959 1971
1960 static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v) 1972 static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
1961 { 1973 {
1962 } 1974 }
1963 1975
1964 static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v) 1976 static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
1965 { 1977 {
1966 bool up = false, active = false; 1978 bool up = false, active = false;
1967 struct tegra_pcie_port *port; 1979 struct tegra_pcie_port *port;
1968 unsigned int value; 1980 unsigned int value;
1969 1981
1970 port = list_entry(v, struct tegra_pcie_port, list); 1982 port = list_entry(v, struct tegra_pcie_port, list);
1971 1983
1972 value = readl(port->base + RP_VEND_XP); 1984 value = readl(port->base + RP_VEND_XP);
1973 1985
1974 if (value & RP_VEND_XP_DL_UP) 1986 if (value & RP_VEND_XP_DL_UP)
1975 up = true; 1987 up = true;
1976 1988
1977 value = readl(port->base + RP_LINK_CONTROL_STATUS); 1989 value = readl(port->base + RP_LINK_CONTROL_STATUS);
1978 1990
1979 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE) 1991 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1980 active = true; 1992 active = true;
1981 1993
1982 seq_printf(s, "%2u ", port->index); 1994 seq_printf(s, "%2u ", port->index);
1983 1995
1984 if (up) 1996 if (up)
1985 seq_printf(s, "up"); 1997 seq_printf(s, "up");
1986 1998
1987 if (active) { 1999 if (active) {
1988 if (up) 2000 if (up)
1989 seq_printf(s, ", "); 2001 seq_printf(s, ", ");
1990 2002
1991 seq_printf(s, "active"); 2003 seq_printf(s, "active");
1992 } 2004 }
1993 2005
1994 seq_printf(s, "\n"); 2006 seq_printf(s, "\n");
1995 return 0; 2007 return 0;
1996 } 2008 }
1997 2009
1998 static const struct seq_operations tegra_pcie_ports_seq_ops = { 2010 static const struct seq_operations tegra_pcie_ports_seq_ops = {
1999 .start = tegra_pcie_ports_seq_start, 2011 .start = tegra_pcie_ports_seq_start,
2000 .next = tegra_pcie_ports_seq_next, 2012 .next = tegra_pcie_ports_seq_next,
2001 .stop = tegra_pcie_ports_seq_stop, 2013 .stop = tegra_pcie_ports_seq_stop,
2002 .show = tegra_pcie_ports_seq_show, 2014 .show = tegra_pcie_ports_seq_show,
2003 }; 2015 };
2004 2016
2005 static int tegra_pcie_ports_open(struct inode *inode, struct file *file) 2017 static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
2006 { 2018 {
2007 struct tegra_pcie *pcie = inode->i_private; 2019 struct tegra_pcie *pcie = inode->i_private;
2008 struct seq_file *s; 2020 struct seq_file *s;
2009 int err; 2021 int err;
2010 2022
2011 err = seq_open(file, &tegra_pcie_ports_seq_ops); 2023 err = seq_open(file, &tegra_pcie_ports_seq_ops);
2012 if (err) 2024 if (err)
2013 return err; 2025 return err;
2014 2026
2015 s = file->private_data; 2027 s = file->private_data;
2016 s->private = pcie; 2028 s->private = pcie;
2017 2029
2018 return 0; 2030 return 0;
2019 } 2031 }
2020 2032
2021 static const struct file_operations tegra_pcie_ports_ops = { 2033 static const struct file_operations tegra_pcie_ports_ops = {
2022 .owner = THIS_MODULE, 2034 .owner = THIS_MODULE,
2023 .open = tegra_pcie_ports_open, 2035 .open = tegra_pcie_ports_open,
2024 .read = seq_read, 2036 .read = seq_read,
2025 .llseek = seq_lseek, 2037 .llseek = seq_lseek,
2026 .release = seq_release, 2038 .release = seq_release,
2027 }; 2039 };
2028 2040
2029 static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie) 2041 static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2030 { 2042 {
2031 struct dentry *file; 2043 struct dentry *file;
2032 2044
2033 pcie->debugfs = debugfs_create_dir("pcie", NULL); 2045 pcie->debugfs = debugfs_create_dir("pcie", NULL);
2034 if (!pcie->debugfs) 2046 if (!pcie->debugfs)
2035 return -ENOMEM; 2047 return -ENOMEM;
2036 2048
2037 file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, 2049 file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
2038 pcie, &tegra_pcie_ports_ops); 2050 pcie, &tegra_pcie_ports_ops);
2039 if (!file) 2051 if (!file)
2040 goto remove; 2052 goto remove;
2041 2053
2042 return 0; 2054 return 0;
2043 2055
2044 remove: 2056 remove:
2045 debugfs_remove_recursive(pcie->debugfs); 2057 debugfs_remove_recursive(pcie->debugfs);
2046 pcie->debugfs = NULL; 2058 pcie->debugfs = NULL;
2047 return -ENOMEM; 2059 return -ENOMEM;
2048 } 2060 }
2049 2061
2050 static int tegra_pcie_probe(struct platform_device *pdev) 2062 static int tegra_pcie_probe(struct platform_device *pdev)
2051 { 2063 {
2052 const struct of_device_id *match; 2064 const struct of_device_id *match;
2053 struct tegra_pcie *pcie; 2065 struct tegra_pcie *pcie;
2054 int err; 2066 int err;
2055 2067
2056 match = of_match_device(tegra_pcie_of_match, &pdev->dev); 2068 match = of_match_device(tegra_pcie_of_match, &pdev->dev);
2057 if (!match) 2069 if (!match)
2058 return -ENODEV; 2070 return -ENODEV;
2059 2071
2060 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); 2072 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
2061 if (!pcie) 2073 if (!pcie)
2062 return -ENOMEM; 2074 return -ENOMEM;
2063 2075
2064 INIT_LIST_HEAD(&pcie->buses); 2076 INIT_LIST_HEAD(&pcie->buses);
2065 INIT_LIST_HEAD(&pcie->ports); 2077 INIT_LIST_HEAD(&pcie->ports);
2066 pcie->soc_data = match->data; 2078 pcie->soc_data = match->data;
2067 pcie->dev = &pdev->dev; 2079 pcie->dev = &pdev->dev;
2068 2080
2069 err = tegra_pcie_parse_dt(pcie); 2081 err = tegra_pcie_parse_dt(pcie);
2070 if (err < 0) 2082 if (err < 0)
2071 return err; 2083 return err;
2072 2084
2073 pcibios_min_mem = 0; 2085 pcibios_min_mem = 0;
2074 2086
2075 err = tegra_pcie_get_resources(pcie); 2087 err = tegra_pcie_get_resources(pcie);
2076 if (err < 0) { 2088 if (err < 0) {
2077 dev_err(&pdev->dev, "failed to request resources: %d\n", err); 2089 dev_err(&pdev->dev, "failed to request resources: %d\n", err);
2078 return err; 2090 return err;
2079 } 2091 }
2080 2092
2081 err = tegra_pcie_enable_controller(pcie); 2093 err = tegra_pcie_enable_controller(pcie);
2082 if (err) 2094 if (err)
2083 goto put_resources; 2095 goto put_resources;
2084 2096
2085 /* setup the AFI address translations */ 2097 /* setup the AFI address translations */
2086 tegra_pcie_setup_translations(pcie); 2098 tegra_pcie_setup_translations(pcie);
2087 2099
2088 if (IS_ENABLED(CONFIG_PCI_MSI)) { 2100 if (IS_ENABLED(CONFIG_PCI_MSI)) {
2089 err = tegra_pcie_enable_msi(pcie); 2101 err = tegra_pcie_enable_msi(pcie);
2090 if (err < 0) { 2102 if (err < 0) {
2091 dev_err(&pdev->dev, 2103 dev_err(&pdev->dev,
2092 "failed to enable MSI support: %d\n", 2104 "failed to enable MSI support: %d\n",
2093 err); 2105 err);
2094 goto put_resources; 2106 goto put_resources;
2095 } 2107 }
2096 } 2108 }
2097 2109
2098 err = tegra_pcie_enable(pcie); 2110 err = tegra_pcie_enable(pcie);
2099 if (err < 0) { 2111 if (err < 0) {
2100 dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err); 2112 dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
2101 goto disable_msi; 2113 goto disable_msi;
2102 } 2114 }
2103 2115
2104 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 2116 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2105 err = tegra_pcie_debugfs_init(pcie); 2117 err = tegra_pcie_debugfs_init(pcie);
2106 if (err < 0) 2118 if (err < 0)
2107 dev_err(&pdev->dev, "failed to setup debugfs: %d\n", 2119 dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
2108 err); 2120 err);
2109 } 2121 }
2110 2122
2111 platform_set_drvdata(pdev, pcie); 2123 platform_set_drvdata(pdev, pcie);
2112 return 0; 2124 return 0;
2113 2125
2114 disable_msi: 2126 disable_msi:
2115 if (IS_ENABLED(CONFIG_PCI_MSI)) 2127 if (IS_ENABLED(CONFIG_PCI_MSI))
2116 tegra_pcie_disable_msi(pcie); 2128 tegra_pcie_disable_msi(pcie);
2117 put_resources: 2129 put_resources:
2118 tegra_pcie_put_resources(pcie); 2130 tegra_pcie_put_resources(pcie);
2119 return err; 2131 return err;
2120 } 2132 }
2121 2133
2122 static struct platform_driver tegra_pcie_driver = { 2134 static struct platform_driver tegra_pcie_driver = {
2123 .driver = { 2135 .driver = {
2124 .name = "tegra-pcie", 2136 .name = "tegra-pcie",
2125 .owner = THIS_MODULE, 2137 .owner = THIS_MODULE,
2126 .of_match_table = tegra_pcie_of_match, 2138 .of_match_table = tegra_pcie_of_match,
2127 .suppress_bind_attrs = true, 2139 .suppress_bind_attrs = true,
2128 }, 2140 },
2129 .probe = tegra_pcie_probe, 2141 .probe = tegra_pcie_probe,
2130 }; 2142 };
2131 module_platform_driver(tegra_pcie_driver); 2143 module_platform_driver(tegra_pcie_driver);
2132 2144