Commit f229793a05f1039d9eba4286d9039403924cd1da
1 parent
11ea97fb3c
Exists in
smarc_8mq-imx_v2020.04_5.4.24_2.1.0
and in
1 other branch
MLK-24192-3 pci: pcie_imx: Enable some LPCG clocks for iMX8
Align the kernel's clocks enablement to enable the phy_per, misc_per clocks for all iMX8, and enable pciex2_per and pcie_phy_pclk for pcieb controller of iMX8QM. Signed-off-by: Ye Li <ye.li@nxp.com> Signed-off-by: Peng Fan <peng.fan@nxp.com> (cherry picked from commit 97b8d1c613f56b539328e421ba205f2221fc658e)
Showing 1 changed file with 105 additions and 39 deletions Inline Diff
drivers/pci/pcie_imx.c
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | 2 | /* |
3 | * Freescale i.MX6 PCI Express Root-Complex driver | 3 | * Freescale i.MX6 PCI Express Root-Complex driver |
4 | * | 4 | * |
5 | * Copyright (C) 2013 Marek Vasut <marex@denx.de> | 5 | * Copyright (C) 2013 Marek Vasut <marex@denx.de> |
6 | * | 6 | * |
7 | * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. | 7 | * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. |
8 | * Copyright 2019 NXP | 8 | * Copyright 2019 NXP |
9 | * | 9 | * |
10 | * Based on upstream Linux kernel driver: | 10 | * Based on upstream Linux kernel driver: |
11 | * pci-imx6.c: Sean Cross <xobs@kosagi.com> | 11 | * pci-imx6.c: Sean Cross <xobs@kosagi.com> |
12 | * pcie-designware.c: Jingoo Han <jg1.han@samsung.com> | 12 | * pcie-designware.c: Jingoo Han <jg1.han@samsung.com> |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <common.h> | 15 | #include <common.h> |
16 | #include <init.h> | 16 | #include <init.h> |
17 | #include <malloc.h> | 17 | #include <malloc.h> |
18 | #include <pci.h> | 18 | #include <pci.h> |
19 | #if CONFIG_IS_ENABLED(CLK) | 19 | #if CONFIG_IS_ENABLED(CLK) |
20 | #include <clk.h> | 20 | #include <clk.h> |
21 | #else | 21 | #else |
22 | #include <asm/arch/clock.h> | 22 | #include <asm/arch/clock.h> |
23 | #endif | 23 | #endif |
24 | #include <asm/arch/iomux.h> | 24 | #include <asm/arch/iomux.h> |
25 | #ifdef CONFIG_MX6 | 25 | #ifdef CONFIG_MX6 |
26 | #include <asm/arch/crm_regs.h> | 26 | #include <asm/arch/crm_regs.h> |
27 | #endif | 27 | #endif |
28 | #include <asm/gpio.h> | 28 | #include <asm/gpio.h> |
29 | #include <asm/io.h> | 29 | #include <asm/io.h> |
30 | #include <dm.h> | 30 | #include <dm.h> |
31 | #include <linux/sizes.h> | 31 | #include <linux/sizes.h> |
32 | #include <linux/ioport.h> | 32 | #include <linux/ioport.h> |
33 | #include <errno.h> | 33 | #include <errno.h> |
34 | #include <asm/arch/sys_proto.h> | 34 | #include <asm/arch/sys_proto.h> |
35 | #include <syscon.h> | 35 | #include <syscon.h> |
36 | #include <regmap.h> | 36 | #include <regmap.h> |
37 | #include <asm-generic/gpio.h> | 37 | #include <asm-generic/gpio.h> |
38 | #include <dt-bindings/soc/imx8_hsio.h> | 38 | #include <dt-bindings/soc/imx8_hsio.h> |
39 | #include <power/regulator.h> | 39 | #include <power/regulator.h> |
40 | #include <dm/device_compat.h> | 40 | #include <dm/device_compat.h> |
41 | 41 | ||
42 | enum imx_pcie_variants { | 42 | enum imx_pcie_variants { |
43 | IMX6Q, | 43 | IMX6Q, |
44 | IMX6SX, | 44 | IMX6SX, |
45 | IMX6QP, | 45 | IMX6QP, |
46 | IMX8QM, | 46 | IMX8QM, |
47 | IMX8QXP, | 47 | IMX8QXP, |
48 | }; | 48 | }; |
49 | 49 | ||
50 | #define PCI_ACCESS_READ 0 | 50 | #define PCI_ACCESS_READ 0 |
51 | #define PCI_ACCESS_WRITE 1 | 51 | #define PCI_ACCESS_WRITE 1 |
52 | 52 | ||
53 | #ifdef CONFIG_MX6SX | 53 | #ifdef CONFIG_MX6SX |
54 | #define MX6_DBI_ADDR 0x08ffc000 | 54 | #define MX6_DBI_ADDR 0x08ffc000 |
55 | #define MX6_IO_ADDR 0x08f80000 | 55 | #define MX6_IO_ADDR 0x08f80000 |
56 | #define MX6_MEM_ADDR 0x08000000 | 56 | #define MX6_MEM_ADDR 0x08000000 |
57 | #define MX6_ROOT_ADDR 0x08f00000 | 57 | #define MX6_ROOT_ADDR 0x08f00000 |
58 | #else | 58 | #else |
59 | #define MX6_DBI_ADDR 0x01ffc000 | 59 | #define MX6_DBI_ADDR 0x01ffc000 |
60 | #define MX6_IO_ADDR 0x01f80000 | 60 | #define MX6_IO_ADDR 0x01f80000 |
61 | #define MX6_MEM_ADDR 0x01000000 | 61 | #define MX6_MEM_ADDR 0x01000000 |
62 | #define MX6_ROOT_ADDR 0x01f00000 | 62 | #define MX6_ROOT_ADDR 0x01f00000 |
63 | #endif | 63 | #endif |
64 | #define MX6_DBI_SIZE 0x4000 | 64 | #define MX6_DBI_SIZE 0x4000 |
65 | #define MX6_IO_SIZE 0x10000 | 65 | #define MX6_IO_SIZE 0x10000 |
66 | #define MX6_MEM_SIZE 0xf00000 | 66 | #define MX6_MEM_SIZE 0xf00000 |
67 | #define MX6_ROOT_SIZE 0x80000 | 67 | #define MX6_ROOT_SIZE 0x80000 |
68 | 68 | ||
69 | /* PCIe Port Logic registers (memory-mapped) */ | 69 | /* PCIe Port Logic registers (memory-mapped) */ |
70 | #define PL_OFFSET 0x700 | 70 | #define PL_OFFSET 0x700 |
71 | #define PCIE_PL_PFLR (PL_OFFSET + 0x08) | 71 | #define PCIE_PL_PFLR (PL_OFFSET + 0x08) |
72 | #define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16) | 72 | #define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16) |
73 | #define PCIE_PL_PFLR_FORCE_LINK (1 << 15) | 73 | #define PCIE_PL_PFLR_FORCE_LINK (1 << 15) |
74 | #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) | 74 | #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) |
75 | #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) | 75 | #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) |
76 | #define PCIE_PHY_DEBUG_R1_LINK_UP (1 << 4) | 76 | #define PCIE_PHY_DEBUG_R1_LINK_UP (1 << 4) |
77 | #define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (1 << 29) | 77 | #define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (1 << 29) |
78 | 78 | ||
79 | #define PCIE_PORT_LINK_CONTROL 0x710 | 79 | #define PCIE_PORT_LINK_CONTROL 0x710 |
80 | #define PORT_LINK_MODE_MASK (0x3f << 16) | 80 | #define PORT_LINK_MODE_MASK (0x3f << 16) |
81 | #define PORT_LINK_MODE_1_LANES (0x1 << 16) | 81 | #define PORT_LINK_MODE_1_LANES (0x1 << 16) |
82 | #define PORT_LINK_MODE_2_LANES (0x3 << 16) | 82 | #define PORT_LINK_MODE_2_LANES (0x3 << 16) |
83 | #define PORT_LINK_MODE_4_LANES (0x7 << 16) | 83 | #define PORT_LINK_MODE_4_LANES (0x7 << 16) |
84 | #define PORT_LINK_MODE_8_LANES (0xf << 16) | 84 | #define PORT_LINK_MODE_8_LANES (0xf << 16) |
85 | 85 | ||
86 | 86 | ||
87 | #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C | 87 | #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C |
88 | #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) | 88 | #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) |
89 | #define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8) | 89 | #define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8) |
90 | #define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) | 90 | #define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) |
91 | #define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) | 91 | #define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) |
92 | #define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) | 92 | #define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) |
93 | #define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8) | 93 | #define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8) |
94 | 94 | ||
95 | #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) | 95 | #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) |
96 | #define PCIE_PHY_CTRL_DATA_LOC 0 | 96 | #define PCIE_PHY_CTRL_DATA_LOC 0 |
97 | #define PCIE_PHY_CTRL_CAP_ADR_LOC 16 | 97 | #define PCIE_PHY_CTRL_CAP_ADR_LOC 16 |
98 | #define PCIE_PHY_CTRL_CAP_DAT_LOC 17 | 98 | #define PCIE_PHY_CTRL_CAP_DAT_LOC 17 |
99 | #define PCIE_PHY_CTRL_WR_LOC 18 | 99 | #define PCIE_PHY_CTRL_WR_LOC 18 |
100 | #define PCIE_PHY_CTRL_RD_LOC 19 | 100 | #define PCIE_PHY_CTRL_RD_LOC 19 |
101 | 101 | ||
102 | #define PCIE_PHY_STAT (PL_OFFSET + 0x110) | 102 | #define PCIE_PHY_STAT (PL_OFFSET + 0x110) |
103 | #define PCIE_PHY_STAT_DATA_LOC 0 | 103 | #define PCIE_PHY_STAT_DATA_LOC 0 |
104 | #define PCIE_PHY_STAT_ACK_LOC 16 | 104 | #define PCIE_PHY_STAT_ACK_LOC 16 |
105 | 105 | ||
106 | /* PHY registers (not memory-mapped) */ | 106 | /* PHY registers (not memory-mapped) */ |
107 | #define PCIE_PHY_RX_ASIC_OUT 0x100D | 107 | #define PCIE_PHY_RX_ASIC_OUT 0x100D |
108 | 108 | ||
109 | #define PHY_RX_OVRD_IN_LO 0x1005 | 109 | #define PHY_RX_OVRD_IN_LO 0x1005 |
110 | #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) | 110 | #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) |
111 | #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) | 111 | #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) |
112 | 112 | ||
113 | #define PCIE_PHY_PUP_REQ (1 << 7) | 113 | #define PCIE_PHY_PUP_REQ (1 << 7) |
114 | 114 | ||
115 | /* iATU registers */ | 115 | /* iATU registers */ |
116 | #define PCIE_ATU_VIEWPORT 0x900 | 116 | #define PCIE_ATU_VIEWPORT 0x900 |
117 | #define PCIE_ATU_REGION_INBOUND (0x1 << 31) | 117 | #define PCIE_ATU_REGION_INBOUND (0x1 << 31) |
118 | #define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) | 118 | #define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) |
119 | #define PCIE_ATU_REGION_INDEX2 (0x2 << 0) | 119 | #define PCIE_ATU_REGION_INDEX2 (0x2 << 0) |
120 | #define PCIE_ATU_REGION_INDEX1 (0x1 << 0) | 120 | #define PCIE_ATU_REGION_INDEX1 (0x1 << 0) |
121 | #define PCIE_ATU_REGION_INDEX0 (0x0 << 0) | 121 | #define PCIE_ATU_REGION_INDEX0 (0x0 << 0) |
122 | #define PCIE_ATU_CR1 0x904 | 122 | #define PCIE_ATU_CR1 0x904 |
123 | #define PCIE_ATU_TYPE_MEM (0x0 << 0) | 123 | #define PCIE_ATU_TYPE_MEM (0x0 << 0) |
124 | #define PCIE_ATU_TYPE_IO (0x2 << 0) | 124 | #define PCIE_ATU_TYPE_IO (0x2 << 0) |
125 | #define PCIE_ATU_TYPE_CFG0 (0x4 << 0) | 125 | #define PCIE_ATU_TYPE_CFG0 (0x4 << 0) |
126 | #define PCIE_ATU_TYPE_CFG1 (0x5 << 0) | 126 | #define PCIE_ATU_TYPE_CFG1 (0x5 << 0) |
127 | #define PCIE_ATU_CR2 0x908 | 127 | #define PCIE_ATU_CR2 0x908 |
128 | #define PCIE_ATU_ENABLE (0x1 << 31) | 128 | #define PCIE_ATU_ENABLE (0x1 << 31) |
129 | #define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) | 129 | #define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) |
130 | #define PCIE_ATU_LOWER_BASE 0x90C | 130 | #define PCIE_ATU_LOWER_BASE 0x90C |
131 | #define PCIE_ATU_UPPER_BASE 0x910 | 131 | #define PCIE_ATU_UPPER_BASE 0x910 |
132 | #define PCIE_ATU_LIMIT 0x914 | 132 | #define PCIE_ATU_LIMIT 0x914 |
133 | #define PCIE_ATU_LOWER_TARGET 0x918 | 133 | #define PCIE_ATU_LOWER_TARGET 0x918 |
134 | #define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) | 134 | #define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) |
135 | #define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) | 135 | #define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) |
136 | #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) | 136 | #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) |
137 | #define PCIE_ATU_UPPER_TARGET 0x91C | 137 | #define PCIE_ATU_UPPER_TARGET 0x91C |
138 | 138 | ||
139 | #define PCIE_MISC_CTRL (PL_OFFSET + 0x1BC) | 139 | #define PCIE_MISC_CTRL (PL_OFFSET + 0x1BC) |
140 | #define PCIE_MISC_DBI_RO_WR_EN BIT(0) | 140 | #define PCIE_MISC_DBI_RO_WR_EN BIT(0) |
141 | 141 | ||
142 | /* iMX8 HSIO registers */ | 142 | /* iMX8 HSIO registers */ |
143 | #define IMX8QM_LPCG_PHYX2_OFFSET 0x00000 | 143 | #define IMX8QM_LPCG_PHYX2_OFFSET 0x00000 |
144 | #define IMX8QM_CSR_PHYX2_OFFSET 0x90000 | 144 | #define IMX8QM_CSR_PHYX2_OFFSET 0x90000 |
145 | #define IMX8QM_CSR_PHYX1_OFFSET 0xA0000 | 145 | #define IMX8QM_CSR_PHYX1_OFFSET 0xA0000 |
146 | #define IMX8QM_CSR_PHYX_STTS0_OFFSET 0x4 | 146 | #define IMX8QM_CSR_PHYX_STTS0_OFFSET 0x4 |
147 | #define IMX8QM_CSR_PCIEA_OFFSET 0xB0000 | 147 | #define IMX8QM_CSR_PCIEA_OFFSET 0xB0000 |
148 | #define IMX8QM_CSR_PCIEB_OFFSET 0xC0000 | 148 | #define IMX8QM_CSR_PCIEB_OFFSET 0xC0000 |
149 | #define IMX8QM_CSR_PCIE_CTRL1_OFFSET 0x4 | 149 | #define IMX8QM_CSR_PCIE_CTRL1_OFFSET 0x4 |
150 | #define IMX8QM_CSR_PCIE_CTRL2_OFFSET 0x8 | 150 | #define IMX8QM_CSR_PCIE_CTRL2_OFFSET 0x8 |
151 | #define IMX8QM_CSR_PCIE_STTS0_OFFSET 0xC | 151 | #define IMX8QM_CSR_PCIE_STTS0_OFFSET 0xC |
152 | #define IMX8QM_CSR_MISC_OFFSET 0xE0000 | 152 | #define IMX8QM_CSR_MISC_OFFSET 0xE0000 |
153 | 153 | ||
154 | #define IMX8QM_LPCG_PHY_PCG0 BIT(1) | 154 | #define IMX8QM_LPCG_PHY_PCG0 BIT(1) |
155 | #define IMX8QM_LPCG_PHY_PCG1 BIT(5) | 155 | #define IMX8QM_LPCG_PHY_PCG1 BIT(5) |
156 | 156 | ||
157 | #define IMX8QM_CTRL_LTSSM_ENABLE BIT(4) | 157 | #define IMX8QM_CTRL_LTSSM_ENABLE BIT(4) |
158 | #define IMX8QM_CTRL_READY_ENTR_L23 BIT(5) | 158 | #define IMX8QM_CTRL_READY_ENTR_L23 BIT(5) |
159 | #define IMX8QM_CTRL_PM_XMT_TURNOFF BIT(9) | 159 | #define IMX8QM_CTRL_PM_XMT_TURNOFF BIT(9) |
160 | #define IMX8QM_CTRL_BUTTON_RST_N BIT(21) | 160 | #define IMX8QM_CTRL_BUTTON_RST_N BIT(21) |
161 | #define IMX8QM_CTRL_PERST_N BIT(22) | 161 | #define IMX8QM_CTRL_PERST_N BIT(22) |
162 | #define IMX8QM_CTRL_POWER_UP_RST_N BIT(23) | 162 | #define IMX8QM_CTRL_POWER_UP_RST_N BIT(23) |
163 | 163 | ||
164 | #define IMX8QM_CTRL_STTS0_PM_LINKST_IN_L2 BIT(13) | 164 | #define IMX8QM_CTRL_STTS0_PM_LINKST_IN_L2 BIT(13) |
165 | #define IMX8QM_CTRL_STTS0_PM_REQ_CORE_RST BIT(19) | 165 | #define IMX8QM_CTRL_STTS0_PM_REQ_CORE_RST BIT(19) |
166 | #define IMX8QM_STTS0_LANE0_TX_PLL_LOCK BIT(4) | 166 | #define IMX8QM_STTS0_LANE0_TX_PLL_LOCK BIT(4) |
167 | #define IMX8QM_STTS0_LANE1_TX_PLL_LOCK BIT(12) | 167 | #define IMX8QM_STTS0_LANE1_TX_PLL_LOCK BIT(12) |
168 | 168 | ||
169 | #define IMX8QM_PCIE_TYPE_MASK (0xF << 24) | 169 | #define IMX8QM_PCIE_TYPE_MASK (0xF << 24) |
170 | 170 | ||
171 | #define IMX8QM_PHYX2_CTRL0_APB_MASK 0x3 | 171 | #define IMX8QM_PHYX2_CTRL0_APB_MASK 0x3 |
172 | #define IMX8QM_PHY_APB_RSTN_0 BIT(0) | 172 | #define IMX8QM_PHY_APB_RSTN_0 BIT(0) |
173 | #define IMX8QM_PHY_APB_RSTN_1 BIT(1) | 173 | #define IMX8QM_PHY_APB_RSTN_1 BIT(1) |
174 | 174 | ||
175 | #define IMX8QM_MISC_IOB_RXENA BIT(0) | 175 | #define IMX8QM_MISC_IOB_RXENA BIT(0) |
176 | #define IMX8QM_MISC_IOB_TXENA BIT(1) | 176 | #define IMX8QM_MISC_IOB_TXENA BIT(1) |
177 | #define IMX8QM_CSR_MISC_IOB_A_0_TXOE BIT(2) | 177 | #define IMX8QM_CSR_MISC_IOB_A_0_TXOE BIT(2) |
178 | #define IMX8QM_CSR_MISC_IOB_A_0_M1M0_MASK (0x3 << 3) | 178 | #define IMX8QM_CSR_MISC_IOB_A_0_M1M0_MASK (0x3 << 3) |
179 | #define IMX8QM_CSR_MISC_IOB_A_0_M1M0_2 BIT(4) | 179 | #define IMX8QM_CSR_MISC_IOB_A_0_M1M0_2 BIT(4) |
180 | #define IMX8QM_MISC_PHYX1_EPCS_SEL BIT(12) | 180 | #define IMX8QM_MISC_PHYX1_EPCS_SEL BIT(12) |
181 | #define IMX8QM_MISC_PCIE_AB_SELECT BIT(13) | 181 | #define IMX8QM_MISC_PCIE_AB_SELECT BIT(13) |
182 | 182 | ||
183 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_MASK (0xF << 13) | 183 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_MASK (0xF << 13) |
184 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_0 BIT(13) | 184 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_0 BIT(13) |
185 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_1 BIT(14) | 185 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_1 BIT(14) |
186 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_2 BIT(15) | 186 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_2 BIT(15) |
187 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_3 BIT(16) | 187 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_3 BIT(16) |
188 | 188 | ||
189 | #define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000 | 189 | #define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000 |
190 | 190 | ||
191 | #ifdef DEBUG | 191 | #ifdef DEBUG |
192 | 192 | ||
193 | #ifdef DEBUG_STRESS_WR /* warm-reset stress tests */ | 193 | #ifdef DEBUG_STRESS_WR /* warm-reset stress tests */ |
194 | #define SNVS_LPGRP 0x020cc068 | 194 | #define SNVS_LPGRP 0x020cc068 |
195 | #endif | 195 | #endif |
196 | 196 | ||
197 | #define DBGF(x...) printf(x) | 197 | #define DBGF(x...) printf(x) |
198 | 198 | ||
199 | static void print_regs(int contain_pcie_reg) | 199 | static void print_regs(int contain_pcie_reg) |
200 | { | 200 | { |
201 | #ifdef CONFIG_MX6 | 201 | #ifdef CONFIG_MX6 |
202 | u32 val; | 202 | u32 val; |
203 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; | 203 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; |
204 | struct mxc_ccm_reg *ccm_regs = (struct mxc_ccm_reg *)CCM_BASE_ADDR; | 204 | struct mxc_ccm_reg *ccm_regs = (struct mxc_ccm_reg *)CCM_BASE_ADDR; |
205 | val = readl(&iomuxc_regs->gpr[1]); | 205 | val = readl(&iomuxc_regs->gpr[1]); |
206 | DBGF("GPR01 a:0x%08x v:0x%08x\n", (u32)&iomuxc_regs->gpr[1], val); | 206 | DBGF("GPR01 a:0x%08x v:0x%08x\n", (u32)&iomuxc_regs->gpr[1], val); |
207 | val = readl(&iomuxc_regs->gpr[5]); | 207 | val = readl(&iomuxc_regs->gpr[5]); |
208 | DBGF("GPR05 a:0x%08x v:0x%08x\n", (u32)&iomuxc_regs->gpr[5], val); | 208 | DBGF("GPR05 a:0x%08x v:0x%08x\n", (u32)&iomuxc_regs->gpr[5], val); |
209 | val = readl(&iomuxc_regs->gpr[8]); | 209 | val = readl(&iomuxc_regs->gpr[8]); |
210 | DBGF("GPR08 a:0x%08x v:0x%08x\n", (u32)&iomuxc_regs->gpr[8], val); | 210 | DBGF("GPR08 a:0x%08x v:0x%08x\n", (u32)&iomuxc_regs->gpr[8], val); |
211 | val = readl(&iomuxc_regs->gpr[12]); | 211 | val = readl(&iomuxc_regs->gpr[12]); |
212 | DBGF("GPR12 a:0x%08x v:0x%08x\n", (u32)&iomuxc_regs->gpr[12], val); | 212 | DBGF("GPR12 a:0x%08x v:0x%08x\n", (u32)&iomuxc_regs->gpr[12], val); |
213 | val = readl(&ccm_regs->analog_pll_enet); | 213 | val = readl(&ccm_regs->analog_pll_enet); |
214 | DBGF("PLL06 a:0x%08x v:0x%08x\n", (u32)&ccm_regs->analog_pll_enet, val); | 214 | DBGF("PLL06 a:0x%08x v:0x%08x\n", (u32)&ccm_regs->analog_pll_enet, val); |
215 | val = readl(&ccm_regs->ana_misc1); | 215 | val = readl(&ccm_regs->ana_misc1); |
216 | DBGF("MISC1 a:0x%08x v:0x%08x\n", (u32)&ccm_regs->ana_misc1, val); | 216 | DBGF("MISC1 a:0x%08x v:0x%08x\n", (u32)&ccm_regs->ana_misc1, val); |
217 | if (contain_pcie_reg) { | 217 | if (contain_pcie_reg) { |
218 | val = readl(MX6_DBI_ADDR + 0x728); | 218 | val = readl(MX6_DBI_ADDR + 0x728); |
219 | DBGF("dbr0 offset 0x728 %08x\n", val); | 219 | DBGF("dbr0 offset 0x728 %08x\n", val); |
220 | val = readl(MX6_DBI_ADDR + 0x72c); | 220 | val = readl(MX6_DBI_ADDR + 0x72c); |
221 | DBGF("dbr1 offset 0x72c %08x\n", val); | 221 | DBGF("dbr1 offset 0x72c %08x\n", val); |
222 | } | 222 | } |
223 | #endif | 223 | #endif |
224 | } | 224 | } |
225 | #else | 225 | #else |
226 | #define DBGF(x...) | 226 | #define DBGF(x...) |
227 | static void print_regs(int contain_pcie_reg) {} | 227 | static void print_regs(int contain_pcie_reg) {} |
228 | #endif | 228 | #endif |
229 | 229 | ||
230 | struct imx_pcie_priv { | 230 | struct imx_pcie_priv { |
231 | void __iomem *dbi_base; | 231 | void __iomem *dbi_base; |
232 | void __iomem *cfg_base; | 232 | void __iomem *cfg_base; |
233 | void __iomem *cfg1_base; | 233 | void __iomem *cfg1_base; |
234 | enum imx_pcie_variants variant; | 234 | enum imx_pcie_variants variant; |
235 | struct regmap *iomuxc_gpr; | 235 | struct regmap *iomuxc_gpr; |
236 | u32 hsio_cfg; | 236 | u32 hsio_cfg; |
237 | u32 ctrl_id; | 237 | u32 ctrl_id; |
238 | u32 ext_osc; | 238 | u32 ext_osc; |
239 | u32 cpu_base; | 239 | u32 cpu_base; |
240 | u32 lanes; | 240 | u32 lanes; |
241 | u32 cfg_size; | 241 | u32 cfg_size; |
242 | int cpu_addr_offset; | 242 | int cpu_addr_offset; |
243 | struct gpio_desc clkreq_gpio; | 243 | struct gpio_desc clkreq_gpio; |
244 | struct gpio_desc dis_gpio; | 244 | struct gpio_desc dis_gpio; |
245 | struct gpio_desc reset_gpio; | 245 | struct gpio_desc reset_gpio; |
246 | struct gpio_desc power_on_gpio; | 246 | struct gpio_desc power_on_gpio; |
247 | 247 | ||
248 | struct pci_region *io; | 248 | struct pci_region *io; |
249 | struct pci_region *mem; | 249 | struct pci_region *mem; |
250 | struct pci_region *pref; | 250 | struct pci_region *pref; |
251 | 251 | ||
252 | #if CONFIG_IS_ENABLED(CLK) | 252 | #if CONFIG_IS_ENABLED(CLK) |
253 | struct clk pcie_bus; | 253 | struct clk pcie_bus; |
254 | struct clk pcie_phy; | 254 | struct clk pcie_phy; |
255 | struct clk pcie_phy_pclk; | ||
255 | struct clk pcie_inbound_axi; | 256 | struct clk pcie_inbound_axi; |
256 | struct clk pcie_per; | 257 | struct clk pcie_per; |
258 | struct clk pciex2_per; | ||
257 | struct clk phy_per; | 259 | struct clk phy_per; |
258 | struct clk misc_per; | 260 | struct clk misc_per; |
259 | struct clk pcie; | 261 | struct clk pcie; |
260 | struct clk pcie_ext_src; | 262 | struct clk pcie_ext_src; |
261 | #endif | 263 | #endif |
262 | 264 | ||
263 | #if CONFIG_IS_ENABLED(DM_REGULATOR) | 265 | #if CONFIG_IS_ENABLED(DM_REGULATOR) |
264 | struct udevice *epdev_on; | 266 | struct udevice *epdev_on; |
265 | struct udevice *pcie_bus_regulator; | 267 | struct udevice *pcie_bus_regulator; |
266 | struct udevice *pcie_phy_regulator; | 268 | struct udevice *pcie_phy_regulator; |
267 | #endif | 269 | #endif |
268 | }; | 270 | }; |
269 | 271 | ||
270 | /* | 272 | /* |
271 | * PHY access functions | 273 | * PHY access functions |
272 | */ | 274 | */ |
273 | static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val) | 275 | static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val) |
274 | { | 276 | { |
275 | u32 val; | 277 | u32 val; |
276 | u32 max_iterations = 10; | 278 | u32 max_iterations = 10; |
277 | u32 wait_counter = 0; | 279 | u32 wait_counter = 0; |
278 | 280 | ||
279 | do { | 281 | do { |
280 | val = readl(dbi_base + PCIE_PHY_STAT); | 282 | val = readl(dbi_base + PCIE_PHY_STAT); |
281 | val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1; | 283 | val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1; |
282 | wait_counter++; | 284 | wait_counter++; |
283 | 285 | ||
284 | if (val == exp_val) | 286 | if (val == exp_val) |
285 | return 0; | 287 | return 0; |
286 | 288 | ||
287 | udelay(1); | 289 | udelay(1); |
288 | } while (wait_counter < max_iterations); | 290 | } while (wait_counter < max_iterations); |
289 | 291 | ||
290 | return -ETIMEDOUT; | 292 | return -ETIMEDOUT; |
291 | } | 293 | } |
292 | 294 | ||
293 | static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr) | 295 | static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr) |
294 | { | 296 | { |
295 | u32 val; | 297 | u32 val; |
296 | int ret; | 298 | int ret; |
297 | 299 | ||
298 | val = addr << PCIE_PHY_CTRL_DATA_LOC; | 300 | val = addr << PCIE_PHY_CTRL_DATA_LOC; |
299 | writel(val, dbi_base + PCIE_PHY_CTRL); | 301 | writel(val, dbi_base + PCIE_PHY_CTRL); |
300 | 302 | ||
301 | val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC); | 303 | val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC); |
302 | writel(val, dbi_base + PCIE_PHY_CTRL); | 304 | writel(val, dbi_base + PCIE_PHY_CTRL); |
303 | 305 | ||
304 | ret = pcie_phy_poll_ack(dbi_base, 1); | 306 | ret = pcie_phy_poll_ack(dbi_base, 1); |
305 | if (ret) | 307 | if (ret) |
306 | return ret; | 308 | return ret; |
307 | 309 | ||
308 | val = addr << PCIE_PHY_CTRL_DATA_LOC; | 310 | val = addr << PCIE_PHY_CTRL_DATA_LOC; |
309 | writel(val, dbi_base + PCIE_PHY_CTRL); | 311 | writel(val, dbi_base + PCIE_PHY_CTRL); |
310 | 312 | ||
311 | ret = pcie_phy_poll_ack(dbi_base, 0); | 313 | ret = pcie_phy_poll_ack(dbi_base, 0); |
312 | if (ret) | 314 | if (ret) |
313 | return ret; | 315 | return ret; |
314 | 316 | ||
315 | return 0; | 317 | return 0; |
316 | } | 318 | } |
317 | 319 | ||
318 | /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ | 320 | /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ |
319 | static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data) | 321 | static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data) |
320 | { | 322 | { |
321 | u32 val, phy_ctl; | 323 | u32 val, phy_ctl; |
322 | int ret; | 324 | int ret; |
323 | 325 | ||
324 | ret = pcie_phy_wait_ack(dbi_base, addr); | 326 | ret = pcie_phy_wait_ack(dbi_base, addr); |
325 | if (ret) | 327 | if (ret) |
326 | return ret; | 328 | return ret; |
327 | 329 | ||
328 | /* assert Read signal */ | 330 | /* assert Read signal */ |
329 | phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC; | 331 | phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC; |
330 | writel(phy_ctl, dbi_base + PCIE_PHY_CTRL); | 332 | writel(phy_ctl, dbi_base + PCIE_PHY_CTRL); |
331 | 333 | ||
332 | ret = pcie_phy_poll_ack(dbi_base, 1); | 334 | ret = pcie_phy_poll_ack(dbi_base, 1); |
333 | if (ret) | 335 | if (ret) |
334 | return ret; | 336 | return ret; |
335 | 337 | ||
336 | val = readl(dbi_base + PCIE_PHY_STAT); | 338 | val = readl(dbi_base + PCIE_PHY_STAT); |
337 | *data = val & 0xffff; | 339 | *data = val & 0xffff; |
338 | 340 | ||
339 | /* deassert Read signal */ | 341 | /* deassert Read signal */ |
340 | writel(0x00, dbi_base + PCIE_PHY_CTRL); | 342 | writel(0x00, dbi_base + PCIE_PHY_CTRL); |
341 | 343 | ||
342 | ret = pcie_phy_poll_ack(dbi_base, 0); | 344 | ret = pcie_phy_poll_ack(dbi_base, 0); |
343 | if (ret) | 345 | if (ret) |
344 | return ret; | 346 | return ret; |
345 | 347 | ||
346 | return 0; | 348 | return 0; |
347 | } | 349 | } |
348 | 350 | ||
349 | static int pcie_phy_write(void __iomem *dbi_base, int addr, int data) | 351 | static int pcie_phy_write(void __iomem *dbi_base, int addr, int data) |
350 | { | 352 | { |
351 | u32 var; | 353 | u32 var; |
352 | int ret; | 354 | int ret; |
353 | 355 | ||
354 | /* write addr */ | 356 | /* write addr */ |
355 | /* cap addr */ | 357 | /* cap addr */ |
356 | ret = pcie_phy_wait_ack(dbi_base, addr); | 358 | ret = pcie_phy_wait_ack(dbi_base, addr); |
357 | if (ret) | 359 | if (ret) |
358 | return ret; | 360 | return ret; |
359 | 361 | ||
360 | var = data << PCIE_PHY_CTRL_DATA_LOC; | 362 | var = data << PCIE_PHY_CTRL_DATA_LOC; |
361 | writel(var, dbi_base + PCIE_PHY_CTRL); | 363 | writel(var, dbi_base + PCIE_PHY_CTRL); |
362 | 364 | ||
363 | /* capture data */ | 365 | /* capture data */ |
364 | var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC); | 366 | var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC); |
365 | writel(var, dbi_base + PCIE_PHY_CTRL); | 367 | writel(var, dbi_base + PCIE_PHY_CTRL); |
366 | 368 | ||
367 | ret = pcie_phy_poll_ack(dbi_base, 1); | 369 | ret = pcie_phy_poll_ack(dbi_base, 1); |
368 | if (ret) | 370 | if (ret) |
369 | return ret; | 371 | return ret; |
370 | 372 | ||
371 | /* deassert cap data */ | 373 | /* deassert cap data */ |
372 | var = data << PCIE_PHY_CTRL_DATA_LOC; | 374 | var = data << PCIE_PHY_CTRL_DATA_LOC; |
373 | writel(var, dbi_base + PCIE_PHY_CTRL); | 375 | writel(var, dbi_base + PCIE_PHY_CTRL); |
374 | 376 | ||
375 | /* wait for ack de-assertion */ | 377 | /* wait for ack de-assertion */ |
376 | ret = pcie_phy_poll_ack(dbi_base, 0); | 378 | ret = pcie_phy_poll_ack(dbi_base, 0); |
377 | if (ret) | 379 | if (ret) |
378 | return ret; | 380 | return ret; |
379 | 381 | ||
380 | /* assert wr signal */ | 382 | /* assert wr signal */ |
381 | var = 0x1 << PCIE_PHY_CTRL_WR_LOC; | 383 | var = 0x1 << PCIE_PHY_CTRL_WR_LOC; |
382 | writel(var, dbi_base + PCIE_PHY_CTRL); | 384 | writel(var, dbi_base + PCIE_PHY_CTRL); |
383 | 385 | ||
384 | /* wait for ack */ | 386 | /* wait for ack */ |
385 | ret = pcie_phy_poll_ack(dbi_base, 1); | 387 | ret = pcie_phy_poll_ack(dbi_base, 1); |
386 | if (ret) | 388 | if (ret) |
387 | return ret; | 389 | return ret; |
388 | 390 | ||
389 | /* deassert wr signal */ | 391 | /* deassert wr signal */ |
390 | var = data << PCIE_PHY_CTRL_DATA_LOC; | 392 | var = data << PCIE_PHY_CTRL_DATA_LOC; |
391 | writel(var, dbi_base + PCIE_PHY_CTRL); | 393 | writel(var, dbi_base + PCIE_PHY_CTRL); |
392 | 394 | ||
393 | /* wait for ack de-assertion */ | 395 | /* wait for ack de-assertion */ |
394 | ret = pcie_phy_poll_ack(dbi_base, 0); | 396 | ret = pcie_phy_poll_ack(dbi_base, 0); |
395 | if (ret) | 397 | if (ret) |
396 | return ret; | 398 | return ret; |
397 | 399 | ||
398 | writel(0x0, dbi_base + PCIE_PHY_CTRL); | 400 | writel(0x0, dbi_base + PCIE_PHY_CTRL); |
399 | 401 | ||
400 | return 0; | 402 | return 0; |
401 | } | 403 | } |
402 | 404 | ||
403 | #if !CONFIG_IS_ENABLED(DM_PCI) | 405 | #if !CONFIG_IS_ENABLED(DM_PCI) |
404 | void imx_pcie_gpr_read(struct imx_pcie_priv *priv, uint offset, uint *valp) | 406 | void imx_pcie_gpr_read(struct imx_pcie_priv *priv, uint offset, uint *valp) |
405 | { | 407 | { |
406 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; | 408 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; |
407 | *valp = readl(&iomuxc_regs->gpr[offset >> 2]); | 409 | *valp = readl(&iomuxc_regs->gpr[offset >> 2]); |
408 | } | 410 | } |
409 | 411 | ||
410 | void imx_pcie_gpr_update_bits(struct imx_pcie_priv *priv, uint offset, uint mask, uint val) | 412 | void imx_pcie_gpr_update_bits(struct imx_pcie_priv *priv, uint offset, uint mask, uint val) |
411 | { | 413 | { |
412 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; | 414 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; |
413 | clrsetbits_32(&iomuxc_regs->gpr[offset >> 2], mask, val); | 415 | clrsetbits_32(&iomuxc_regs->gpr[offset >> 2], mask, val); |
414 | } | 416 | } |
415 | 417 | ||
416 | #else | 418 | #else |
417 | void imx_pcie_gpr_read(struct imx_pcie_priv *priv, uint offset, uint *valp) | 419 | void imx_pcie_gpr_read(struct imx_pcie_priv *priv, uint offset, uint *valp) |
418 | { | 420 | { |
419 | regmap_read(priv->iomuxc_gpr, offset, valp); | 421 | regmap_read(priv->iomuxc_gpr, offset, valp); |
420 | } | 422 | } |
421 | 423 | ||
422 | void imx_pcie_gpr_update_bits(struct imx_pcie_priv *priv, uint offset, uint mask, uint val) | 424 | void imx_pcie_gpr_update_bits(struct imx_pcie_priv *priv, uint offset, uint mask, uint val) |
423 | { | 425 | { |
424 | regmap_update_bits(priv->iomuxc_gpr, offset, mask, val); | 426 | regmap_update_bits(priv->iomuxc_gpr, offset, mask, val); |
425 | } | 427 | } |
426 | 428 | ||
427 | #endif | 429 | #endif |
428 | 430 | ||
429 | static int imx6_pcie_link_up(struct imx_pcie_priv *priv) | 431 | static int imx6_pcie_link_up(struct imx_pcie_priv *priv) |
430 | { | 432 | { |
431 | u32 rc, ltssm; | 433 | u32 rc, ltssm; |
432 | int rx_valid, temp; | 434 | int rx_valid, temp; |
433 | 435 | ||
434 | /* link is debug bit 36, debug register 1 starts at bit 32 */ | 436 | /* link is debug bit 36, debug register 1 starts at bit 32 */ |
435 | rc = readl(priv->dbi_base + PCIE_PHY_DEBUG_R1); | 437 | rc = readl(priv->dbi_base + PCIE_PHY_DEBUG_R1); |
436 | if ((rc & PCIE_PHY_DEBUG_R1_LINK_UP) && | 438 | if ((rc & PCIE_PHY_DEBUG_R1_LINK_UP) && |
437 | !(rc & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)) | 439 | !(rc & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)) |
438 | return -EAGAIN; | 440 | return -EAGAIN; |
439 | 441 | ||
440 | /* | 442 | /* |
441 | * From L0, initiate MAC entry to gen2 if EP/RC supports gen2. | 443 | * From L0, initiate MAC entry to gen2 if EP/RC supports gen2. |
442 | * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2). | 444 | * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2). |
443 | * If (MAC/LTSSM.state == Recovery.RcvrLock) | 445 | * If (MAC/LTSSM.state == Recovery.RcvrLock) |
444 | * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition | 446 | * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition |
445 | * to gen2 is stuck | 447 | * to gen2 is stuck |
446 | */ | 448 | */ |
447 | pcie_phy_read(priv->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid); | 449 | pcie_phy_read(priv->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid); |
448 | ltssm = readl(priv->dbi_base + PCIE_PHY_DEBUG_R0) & 0x3F; | 450 | ltssm = readl(priv->dbi_base + PCIE_PHY_DEBUG_R0) & 0x3F; |
449 | 451 | ||
450 | if (rx_valid & 0x01) | 452 | if (rx_valid & 0x01) |
451 | return 0; | 453 | return 0; |
452 | 454 | ||
453 | if (ltssm != 0x0d) | 455 | if (ltssm != 0x0d) |
454 | return 0; | 456 | return 0; |
455 | 457 | ||
456 | printf("transition to gen2 is stuck, reset PHY!\n"); | 458 | printf("transition to gen2 is stuck, reset PHY!\n"); |
457 | 459 | ||
458 | pcie_phy_read(priv->dbi_base, PHY_RX_OVRD_IN_LO, &temp); | 460 | pcie_phy_read(priv->dbi_base, PHY_RX_OVRD_IN_LO, &temp); |
459 | temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); | 461 | temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); |
460 | pcie_phy_write(priv->dbi_base, PHY_RX_OVRD_IN_LO, temp); | 462 | pcie_phy_write(priv->dbi_base, PHY_RX_OVRD_IN_LO, temp); |
461 | 463 | ||
462 | udelay(3000); | 464 | udelay(3000); |
463 | 465 | ||
464 | pcie_phy_read(priv->dbi_base, PHY_RX_OVRD_IN_LO, &temp); | 466 | pcie_phy_read(priv->dbi_base, PHY_RX_OVRD_IN_LO, &temp); |
465 | temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); | 467 | temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); |
466 | pcie_phy_write(priv->dbi_base, PHY_RX_OVRD_IN_LO, temp); | 468 | pcie_phy_write(priv->dbi_base, PHY_RX_OVRD_IN_LO, temp); |
467 | 469 | ||
468 | return 0; | 470 | return 0; |
469 | } | 471 | } |
470 | 472 | ||
471 | /* Fix class value */ | 473 | /* Fix class value */ |
472 | static void imx_pcie_fix_class(struct imx_pcie_priv *priv) | 474 | static void imx_pcie_fix_class(struct imx_pcie_priv *priv) |
473 | { | 475 | { |
474 | writew(PCI_CLASS_BRIDGE_PCI, priv->dbi_base + PCI_CLASS_DEVICE); | 476 | writew(PCI_CLASS_BRIDGE_PCI, priv->dbi_base + PCI_CLASS_DEVICE); |
475 | } | 477 | } |
476 | 478 | ||
477 | /* Clear multi-function bit */ | 479 | /* Clear multi-function bit */ |
478 | static void imx_pcie_clear_multifunction(struct imx_pcie_priv *priv) | 480 | static void imx_pcie_clear_multifunction(struct imx_pcie_priv *priv) |
479 | { | 481 | { |
480 | writeb(PCI_HEADER_TYPE_BRIDGE, priv->dbi_base + PCI_HEADER_TYPE); | 482 | writeb(PCI_HEADER_TYPE_BRIDGE, priv->dbi_base + PCI_HEADER_TYPE); |
481 | } | 483 | } |
482 | 484 | ||
483 | static void imx_pcie_setup_ctrl(struct imx_pcie_priv *priv) | 485 | static void imx_pcie_setup_ctrl(struct imx_pcie_priv *priv) |
484 | { | 486 | { |
485 | u32 val; | 487 | u32 val; |
486 | 488 | ||
487 | writel(PCIE_MISC_DBI_RO_WR_EN, priv->dbi_base + PCIE_MISC_CTRL); | 489 | writel(PCIE_MISC_DBI_RO_WR_EN, priv->dbi_base + PCIE_MISC_CTRL); |
488 | 490 | ||
489 | /* Set the number of lanes */ | 491 | /* Set the number of lanes */ |
490 | val = readl(priv->dbi_base + PCIE_PORT_LINK_CONTROL); | 492 | val = readl(priv->dbi_base + PCIE_PORT_LINK_CONTROL); |
491 | val &= ~PORT_LINK_MODE_MASK; | 493 | val &= ~PORT_LINK_MODE_MASK; |
492 | switch (priv->lanes) { | 494 | switch (priv->lanes) { |
493 | case 1: | 495 | case 1: |
494 | val |= PORT_LINK_MODE_1_LANES; | 496 | val |= PORT_LINK_MODE_1_LANES; |
495 | break; | 497 | break; |
496 | case 2: | 498 | case 2: |
497 | val |= PORT_LINK_MODE_2_LANES; | 499 | val |= PORT_LINK_MODE_2_LANES; |
498 | break; | 500 | break; |
499 | case 4: | 501 | case 4: |
500 | val |= PORT_LINK_MODE_4_LANES; | 502 | val |= PORT_LINK_MODE_4_LANES; |
501 | break; | 503 | break; |
502 | case 8: | 504 | case 8: |
503 | val |= PORT_LINK_MODE_8_LANES; | 505 | val |= PORT_LINK_MODE_8_LANES; |
504 | break; | 506 | break; |
505 | default: | 507 | default: |
506 | printf("num-lanes %u: invalid value\n", priv->lanes); | 508 | printf("num-lanes %u: invalid value\n", priv->lanes); |
507 | return; | 509 | return; |
508 | } | 510 | } |
509 | writel(val, priv->dbi_base + PCIE_PORT_LINK_CONTROL); | 511 | writel(val, priv->dbi_base + PCIE_PORT_LINK_CONTROL); |
510 | 512 | ||
511 | /* Set link width speed control register */ | 513 | /* Set link width speed control register */ |
512 | val = readl(priv->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); | 514 | val = readl(priv->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); |
513 | val &= ~PORT_LOGIC_LINK_WIDTH_MASK; | 515 | val &= ~PORT_LOGIC_LINK_WIDTH_MASK; |
514 | switch (priv->lanes) { | 516 | switch (priv->lanes) { |
515 | case 1: | 517 | case 1: |
516 | val |= PORT_LOGIC_LINK_WIDTH_1_LANES; | 518 | val |= PORT_LOGIC_LINK_WIDTH_1_LANES; |
517 | break; | 519 | break; |
518 | case 2: | 520 | case 2: |
519 | val |= PORT_LOGIC_LINK_WIDTH_2_LANES; | 521 | val |= PORT_LOGIC_LINK_WIDTH_2_LANES; |
520 | break; | 522 | break; |
521 | case 4: | 523 | case 4: |
522 | val |= PORT_LOGIC_LINK_WIDTH_4_LANES; | 524 | val |= PORT_LOGIC_LINK_WIDTH_4_LANES; |
523 | break; | 525 | break; |
524 | case 8: | 526 | case 8: |
525 | val |= PORT_LOGIC_LINK_WIDTH_8_LANES; | 527 | val |= PORT_LOGIC_LINK_WIDTH_8_LANES; |
526 | break; | 528 | break; |
527 | } | 529 | } |
528 | writel(val, priv->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); | 530 | writel(val, priv->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); |
529 | 531 | ||
530 | /* setup RC BARs */ | 532 | /* setup RC BARs */ |
531 | writel(0, priv->dbi_base + PCI_BASE_ADDRESS_0); | 533 | writel(0, priv->dbi_base + PCI_BASE_ADDRESS_0); |
532 | writel(0, priv->dbi_base + PCI_BASE_ADDRESS_1); | 534 | writel(0, priv->dbi_base + PCI_BASE_ADDRESS_1); |
533 | 535 | ||
534 | /* setup bus numbers */ | 536 | /* setup bus numbers */ |
535 | val = readl(priv->dbi_base + PCI_PRIMARY_BUS); | 537 | val = readl(priv->dbi_base + PCI_PRIMARY_BUS); |
536 | val &= 0xff000000; | 538 | val &= 0xff000000; |
537 | val |= 0x00ff0100; | 539 | val |= 0x00ff0100; |
538 | writel(val, priv->dbi_base + PCI_PRIMARY_BUS); | 540 | writel(val, priv->dbi_base + PCI_PRIMARY_BUS); |
539 | 541 | ||
540 | /* setup command register */ | 542 | /* setup command register */ |
541 | val = readl(priv->dbi_base + PCI_COMMAND); | 543 | val = readl(priv->dbi_base + PCI_COMMAND); |
542 | val &= 0xffff0000; | 544 | val &= 0xffff0000; |
543 | val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | | 545 | val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | |
544 | PCI_COMMAND_MASTER; | 546 | PCI_COMMAND_MASTER; |
545 | writel(val, priv->dbi_base + PCI_COMMAND); | 547 | writel(val, priv->dbi_base + PCI_COMMAND); |
546 | 548 | ||
547 | imx_pcie_fix_class(priv); | 549 | imx_pcie_fix_class(priv); |
548 | imx_pcie_clear_multifunction(priv); | 550 | imx_pcie_clear_multifunction(priv); |
549 | 551 | ||
550 | writel(0, priv->dbi_base + PCIE_MISC_CTRL); | 552 | writel(0, priv->dbi_base + PCIE_MISC_CTRL); |
551 | } | 553 | } |
552 | 554 | ||
553 | static void imx_pcie_atu_outbound_set(struct imx_pcie_priv *priv, int idx, int type, | 555 | static void imx_pcie_atu_outbound_set(struct imx_pcie_priv *priv, int idx, int type, |
554 | u64 phys, u64 bus_addr, u32 size) | 556 | u64 phys, u64 bus_addr, u32 size) |
555 | { | 557 | { |
556 | writel(PCIE_ATU_REGION_OUTBOUND | idx, priv->dbi_base + PCIE_ATU_VIEWPORT); | 558 | writel(PCIE_ATU_REGION_OUTBOUND | idx, priv->dbi_base + PCIE_ATU_VIEWPORT); |
557 | writel((u32)(phys + priv->cpu_addr_offset), priv->dbi_base + PCIE_ATU_LOWER_BASE); | 559 | writel((u32)(phys + priv->cpu_addr_offset), priv->dbi_base + PCIE_ATU_LOWER_BASE); |
558 | writel((phys + priv->cpu_addr_offset) >> 32, priv->dbi_base + PCIE_ATU_UPPER_BASE); | 560 | writel((phys + priv->cpu_addr_offset) >> 32, priv->dbi_base + PCIE_ATU_UPPER_BASE); |
559 | writel((u32)(phys + priv->cpu_addr_offset) + size - 1, priv->dbi_base + PCIE_ATU_LIMIT); | 561 | writel((u32)(phys + priv->cpu_addr_offset) + size - 1, priv->dbi_base + PCIE_ATU_LIMIT); |
560 | writel((u32)bus_addr, priv->dbi_base + PCIE_ATU_LOWER_TARGET); | 562 | writel((u32)bus_addr, priv->dbi_base + PCIE_ATU_LOWER_TARGET); |
561 | writel(bus_addr >> 32, priv->dbi_base + PCIE_ATU_UPPER_TARGET); | 563 | writel(bus_addr >> 32, priv->dbi_base + PCIE_ATU_UPPER_TARGET); |
562 | writel(type, priv->dbi_base + PCIE_ATU_CR1); | 564 | writel(type, priv->dbi_base + PCIE_ATU_CR1); |
563 | writel(PCIE_ATU_ENABLE, priv->dbi_base + PCIE_ATU_CR2); | 565 | writel(PCIE_ATU_ENABLE, priv->dbi_base + PCIE_ATU_CR2); |
564 | } | 566 | } |
565 | 567 | ||
566 | /* | 568 | /* |
567 | * iATU region setup | 569 | * iATU region setup |
568 | */ | 570 | */ |
569 | static int imx_pcie_regions_setup(struct imx_pcie_priv *priv) | 571 | static int imx_pcie_regions_setup(struct imx_pcie_priv *priv) |
570 | { | 572 | { |
571 | if (priv->io) | 573 | if (priv->io) |
572 | /* ATU : OUTBOUND : IO */ | 574 | /* ATU : OUTBOUND : IO */ |
573 | imx_pcie_atu_outbound_set(priv, PCIE_ATU_REGION_INDEX2, | 575 | imx_pcie_atu_outbound_set(priv, PCIE_ATU_REGION_INDEX2, |
574 | PCIE_ATU_TYPE_IO, | 576 | PCIE_ATU_TYPE_IO, |
575 | priv->io->phys_start, | 577 | priv->io->phys_start, |
576 | priv->io->bus_start, | 578 | priv->io->bus_start, |
577 | priv->io->size); | 579 | priv->io->size); |
578 | 580 | ||
579 | if (priv->mem) | 581 | if (priv->mem) |
580 | /* ATU : OUTBOUND : MEM */ | 582 | /* ATU : OUTBOUND : MEM */ |
581 | imx_pcie_atu_outbound_set(priv, PCIE_ATU_REGION_INDEX0, | 583 | imx_pcie_atu_outbound_set(priv, PCIE_ATU_REGION_INDEX0, |
582 | PCIE_ATU_TYPE_MEM, | 584 | PCIE_ATU_TYPE_MEM, |
583 | priv->mem->phys_start, | 585 | priv->mem->phys_start, |
584 | priv->mem->bus_start, | 586 | priv->mem->bus_start, |
585 | priv->mem->size); | 587 | priv->mem->size); |
586 | 588 | ||
587 | 589 | ||
588 | return 0; | 590 | return 0; |
589 | } | 591 | } |
590 | 592 | ||
591 | /* | 593 | /* |
592 | * PCI Express accessors | 594 | * PCI Express accessors |
593 | */ | 595 | */ |
594 | static void __iomem *get_bus_address(struct imx_pcie_priv *priv, | 596 | static void __iomem *get_bus_address(struct imx_pcie_priv *priv, |
595 | pci_dev_t d, int where) | 597 | pci_dev_t d, int where) |
596 | { | 598 | { |
597 | void __iomem *va_address; | 599 | void __iomem *va_address; |
598 | 600 | ||
599 | if (PCI_BUS(d) == 0) { | 601 | if (PCI_BUS(d) == 0) { |
600 | /* Outbound TLP matched primary interface of the bridge */ | 602 | /* Outbound TLP matched primary interface of the bridge */ |
601 | va_address = priv->dbi_base; | 603 | va_address = priv->dbi_base; |
602 | } else { | 604 | } else { |
603 | if (PCI_BUS(d) < 2) { | 605 | if (PCI_BUS(d) < 2) { |
604 | /* Outbound TLP matched secondary interface of the bridge changes to CFG0 */ | 606 | /* Outbound TLP matched secondary interface of the bridge changes to CFG0 */ |
605 | imx_pcie_atu_outbound_set(priv, PCIE_ATU_REGION_INDEX1, | 607 | imx_pcie_atu_outbound_set(priv, PCIE_ATU_REGION_INDEX1, |
606 | PCIE_ATU_TYPE_CFG0, | 608 | PCIE_ATU_TYPE_CFG0, |
607 | (ulong)priv->cfg_base, | 609 | (ulong)priv->cfg_base, |
608 | d << 8, | 610 | d << 8, |
609 | priv->cfg_size >> 1); | 611 | priv->cfg_size >> 1); |
610 | va_address = priv->cfg_base; | 612 | va_address = priv->cfg_base; |
611 | } else { | 613 | } else { |
612 | /* Outbound TLP matched the bus behind the bridge uses type CFG1 */ | 614 | /* Outbound TLP matched the bus behind the bridge uses type CFG1 */ |
613 | imx_pcie_atu_outbound_set(priv, PCIE_ATU_REGION_INDEX1, | 615 | imx_pcie_atu_outbound_set(priv, PCIE_ATU_REGION_INDEX1, |
614 | PCIE_ATU_TYPE_CFG1, | 616 | PCIE_ATU_TYPE_CFG1, |
615 | (ulong)priv->cfg1_base, | 617 | (ulong)priv->cfg1_base, |
616 | d << 8, | 618 | d << 8, |
617 | priv->cfg_size >> 1); | 619 | priv->cfg_size >> 1); |
618 | va_address = priv->cfg1_base; | 620 | va_address = priv->cfg1_base; |
619 | } | 621 | } |
620 | } | 622 | } |
621 | 623 | ||
622 | va_address += (where & ~0x3); | 624 | va_address += (where & ~0x3); |
623 | 625 | ||
624 | return va_address; | 626 | return va_address; |
625 | 627 | ||
626 | } | 628 | } |
627 | 629 | ||
628 | static int imx_pcie_addr_valid(pci_dev_t d) | 630 | static int imx_pcie_addr_valid(pci_dev_t d) |
629 | { | 631 | { |
630 | if ((PCI_BUS(d) == 0) && (PCI_DEV(d) > 0)) | 632 | if ((PCI_BUS(d) == 0) && (PCI_DEV(d) > 0)) |
631 | return -EINVAL; | 633 | return -EINVAL; |
632 | /* ARI forward is not enabled, so non-zero device at downstream must be blocked */ | 634 | /* ARI forward is not enabled, so non-zero device at downstream must be blocked */ |
633 | if ((PCI_BUS(d) == 1) && (PCI_DEV(d) > 0)) | 635 | if ((PCI_BUS(d) == 1) && (PCI_DEV(d) > 0)) |
634 | return -EINVAL; | 636 | return -EINVAL; |
635 | return 0; | 637 | return 0; |
636 | } | 638 | } |
637 | 639 | ||
638 | /* | 640 | /* |
639 | * Replace the original ARM DABT handler with a simple jump-back one. | 641 | * Replace the original ARM DABT handler with a simple jump-back one. |
640 | * | 642 | * |
641 | * The problem here is that if we have a PCIe bridge attached to this PCIe | 643 | * The problem here is that if we have a PCIe bridge attached to this PCIe |
642 | * controller, but no PCIe device is connected to the bridges' downstream | 644 | * controller, but no PCIe device is connected to the bridges' downstream |
643 | * port, the attempt to read/write from/to the config space will produce | 645 | * port, the attempt to read/write from/to the config space will produce |
644 | * a DABT. This is a behavior of the controller and can not be disabled | 646 | * a DABT. This is a behavior of the controller and can not be disabled |
645 | * unfortuatelly. | 647 | * unfortuatelly. |
646 | * | 648 | * |
647 | * To work around the problem, we backup the current DABT handler address | 649 | * To work around the problem, we backup the current DABT handler address |
648 | * and replace it with our own DABT handler, which only bounces right back | 650 | * and replace it with our own DABT handler, which only bounces right back |
649 | * into the code. | 651 | * into the code. |
650 | */ | 652 | */ |
651 | static void imx_pcie_fix_dabt_handler(bool set) | 653 | static void imx_pcie_fix_dabt_handler(bool set) |
652 | { | 654 | { |
653 | #ifdef CONFIG_MX6 | 655 | #ifdef CONFIG_MX6 |
654 | extern uint32_t *_data_abort; | 656 | extern uint32_t *_data_abort; |
655 | uint32_t *data_abort_addr = (uint32_t *)&_data_abort; | 657 | uint32_t *data_abort_addr = (uint32_t *)&_data_abort; |
656 | 658 | ||
657 | static const uint32_t data_abort_bounce_handler = 0xe25ef004; | 659 | static const uint32_t data_abort_bounce_handler = 0xe25ef004; |
658 | uint32_t data_abort_bounce_addr = (uint32_t)&data_abort_bounce_handler; | 660 | uint32_t data_abort_bounce_addr = (uint32_t)&data_abort_bounce_handler; |
659 | 661 | ||
660 | static uint32_t data_abort_backup; | 662 | static uint32_t data_abort_backup; |
661 | 663 | ||
662 | if (set) { | 664 | if (set) { |
663 | data_abort_backup = *data_abort_addr; | 665 | data_abort_backup = *data_abort_addr; |
664 | *data_abort_addr = data_abort_bounce_addr; | 666 | *data_abort_addr = data_abort_bounce_addr; |
665 | } else { | 667 | } else { |
666 | *data_abort_addr = data_abort_backup; | 668 | *data_abort_addr = data_abort_backup; |
667 | } | 669 | } |
668 | #endif | 670 | #endif |
669 | } | 671 | } |
670 | 672 | ||
671 | static int imx_pcie_read_cfg(struct imx_pcie_priv *priv, pci_dev_t d, | 673 | static int imx_pcie_read_cfg(struct imx_pcie_priv *priv, pci_dev_t d, |
672 | int where, u32 *val) | 674 | int where, u32 *val) |
673 | { | 675 | { |
674 | void __iomem *va_address; | 676 | void __iomem *va_address; |
675 | int ret; | 677 | int ret; |
676 | 678 | ||
677 | ret = imx_pcie_addr_valid(d); | 679 | ret = imx_pcie_addr_valid(d); |
678 | if (ret) { | 680 | if (ret) { |
679 | *val = 0xffffffff; | 681 | *val = 0xffffffff; |
680 | return 0; | 682 | return 0; |
681 | } | 683 | } |
682 | 684 | ||
683 | va_address = get_bus_address(priv, d, where); | 685 | va_address = get_bus_address(priv, d, where); |
684 | 686 | ||
685 | /* | 687 | /* |
686 | * Read the PCIe config space. We must replace the DABT handler | 688 | * Read the PCIe config space. We must replace the DABT handler |
687 | * here in case we got data abort from the PCIe controller, see | 689 | * here in case we got data abort from the PCIe controller, see |
688 | * imx_pcie_fix_dabt_handler() description. Note that writing the | 690 | * imx_pcie_fix_dabt_handler() description. Note that writing the |
689 | * "val" with valid value is also imperative here as in case we | 691 | * "val" with valid value is also imperative here as in case we |
690 | * did got DABT, the val would contain random value. | 692 | * did got DABT, the val would contain random value. |
691 | */ | 693 | */ |
692 | imx_pcie_fix_dabt_handler(true); | 694 | imx_pcie_fix_dabt_handler(true); |
693 | writel(0xffffffff, val); | 695 | writel(0xffffffff, val); |
694 | *val = readl(va_address); | 696 | *val = readl(va_address); |
695 | imx_pcie_fix_dabt_handler(false); | 697 | imx_pcie_fix_dabt_handler(false); |
696 | 698 | ||
697 | return 0; | 699 | return 0; |
698 | } | 700 | } |
699 | 701 | ||
700 | static int imx_pcie_write_cfg(struct imx_pcie_priv *priv, pci_dev_t d, | 702 | static int imx_pcie_write_cfg(struct imx_pcie_priv *priv, pci_dev_t d, |
701 | int where, u32 val) | 703 | int where, u32 val) |
702 | { | 704 | { |
703 | void __iomem *va_address = NULL; | 705 | void __iomem *va_address = NULL; |
704 | int ret; | 706 | int ret; |
705 | 707 | ||
706 | ret = imx_pcie_addr_valid(d); | 708 | ret = imx_pcie_addr_valid(d); |
707 | if (ret) | 709 | if (ret) |
708 | return ret; | 710 | return ret; |
709 | 711 | ||
710 | va_address = get_bus_address(priv, d, where); | 712 | va_address = get_bus_address(priv, d, where); |
711 | 713 | ||
712 | /* | 714 | /* |
713 | * Write the PCIe config space. We must replace the DABT handler | 715 | * Write the PCIe config space. We must replace the DABT handler |
714 | * here in case we got data abort from the PCIe controller, see | 716 | * here in case we got data abort from the PCIe controller, see |
715 | * imx_pcie_fix_dabt_handler() description. | 717 | * imx_pcie_fix_dabt_handler() description. |
716 | */ | 718 | */ |
717 | imx_pcie_fix_dabt_handler(true); | 719 | imx_pcie_fix_dabt_handler(true); |
718 | writel(val, va_address); | 720 | writel(val, va_address); |
719 | imx_pcie_fix_dabt_handler(false); | 721 | imx_pcie_fix_dabt_handler(false); |
720 | 722 | ||
721 | return 0; | 723 | return 0; |
722 | } | 724 | } |
723 | 725 | ||
724 | static int imx8_pcie_assert_core_reset(struct imx_pcie_priv *priv, | 726 | static int imx8_pcie_assert_core_reset(struct imx_pcie_priv *priv, |
725 | bool prepare_for_boot) | 727 | bool prepare_for_boot) |
726 | { | 728 | { |
727 | u32 val; | 729 | u32 val; |
728 | 730 | ||
729 | switch (priv->variant) { | 731 | switch (priv->variant) { |
730 | case IMX8QXP: | 732 | case IMX8QXP: |
731 | val = IMX8QM_CSR_PCIEB_OFFSET; | 733 | val = IMX8QM_CSR_PCIEB_OFFSET; |
732 | imx_pcie_gpr_update_bits(priv, | 734 | imx_pcie_gpr_update_bits(priv, |
733 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, | 735 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, |
734 | IMX8QM_CTRL_BUTTON_RST_N, | 736 | IMX8QM_CTRL_BUTTON_RST_N, |
735 | IMX8QM_CTRL_BUTTON_RST_N); | 737 | IMX8QM_CTRL_BUTTON_RST_N); |
736 | imx_pcie_gpr_update_bits(priv, | 738 | imx_pcie_gpr_update_bits(priv, |
737 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, | 739 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, |
738 | IMX8QM_CTRL_PERST_N, | 740 | IMX8QM_CTRL_PERST_N, |
739 | IMX8QM_CTRL_PERST_N); | 741 | IMX8QM_CTRL_PERST_N); |
740 | imx_pcie_gpr_update_bits(priv, | 742 | imx_pcie_gpr_update_bits(priv, |
741 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, | 743 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, |
742 | IMX8QM_CTRL_POWER_UP_RST_N, | 744 | IMX8QM_CTRL_POWER_UP_RST_N, |
743 | IMX8QM_CTRL_POWER_UP_RST_N); | 745 | IMX8QM_CTRL_POWER_UP_RST_N); |
744 | break; | 746 | break; |
745 | case IMX8QM: | 747 | case IMX8QM: |
746 | val = IMX8QM_CSR_PCIEA_OFFSET + priv->ctrl_id * SZ_64K; | 748 | val = IMX8QM_CSR_PCIEA_OFFSET + priv->ctrl_id * SZ_64K; |
747 | imx_pcie_gpr_update_bits(priv, | 749 | imx_pcie_gpr_update_bits(priv, |
748 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, | 750 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, |
749 | IMX8QM_CTRL_BUTTON_RST_N, | 751 | IMX8QM_CTRL_BUTTON_RST_N, |
750 | IMX8QM_CTRL_BUTTON_RST_N); | 752 | IMX8QM_CTRL_BUTTON_RST_N); |
751 | imx_pcie_gpr_update_bits(priv, | 753 | imx_pcie_gpr_update_bits(priv, |
752 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, | 754 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, |
753 | IMX8QM_CTRL_PERST_N, | 755 | IMX8QM_CTRL_PERST_N, |
754 | IMX8QM_CTRL_PERST_N); | 756 | IMX8QM_CTRL_PERST_N); |
755 | imx_pcie_gpr_update_bits(priv, | 757 | imx_pcie_gpr_update_bits(priv, |
756 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, | 758 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, |
757 | IMX8QM_CTRL_POWER_UP_RST_N, | 759 | IMX8QM_CTRL_POWER_UP_RST_N, |
758 | IMX8QM_CTRL_POWER_UP_RST_N); | 760 | IMX8QM_CTRL_POWER_UP_RST_N); |
759 | break; | 761 | break; |
760 | default: | 762 | default: |
761 | break; | 763 | break; |
762 | } | 764 | } |
763 | 765 | ||
764 | return 0; | 766 | return 0; |
765 | } | 767 | } |
766 | 768 | ||
767 | static int imx8_pcie_init_phy(struct imx_pcie_priv *priv) | 769 | static int imx8_pcie_init_phy(struct imx_pcie_priv *priv) |
768 | { | 770 | { |
769 | u32 tmp, val; | 771 | u32 tmp, val; |
770 | 772 | ||
771 | if (priv->variant == IMX8QM | 773 | if (priv->variant == IMX8QM |
772 | || priv->variant == IMX8QXP) { | 774 | || priv->variant == IMX8QXP) { |
773 | switch (priv->hsio_cfg) { | 775 | switch (priv->hsio_cfg) { |
774 | case PCIEAX2SATA: | 776 | case PCIEAX2SATA: |
775 | /* | 777 | /* |
776 | * bit 0 rx ena 1. | 778 | * bit 0 rx ena 1. |
777 | * bit12 PHY_X1_EPCS_SEL 1. | 779 | * bit12 PHY_X1_EPCS_SEL 1. |
778 | * bit13 phy_ab_select 0. | 780 | * bit13 phy_ab_select 0. |
779 | */ | 781 | */ |
780 | imx_pcie_gpr_update_bits(priv, | 782 | imx_pcie_gpr_update_bits(priv, |
781 | IMX8QM_CSR_PHYX2_OFFSET, | 783 | IMX8QM_CSR_PHYX2_OFFSET, |
782 | IMX8QM_PHYX2_CTRL0_APB_MASK, | 784 | IMX8QM_PHYX2_CTRL0_APB_MASK, |
783 | IMX8QM_PHY_APB_RSTN_0 | 785 | IMX8QM_PHY_APB_RSTN_0 |
784 | | IMX8QM_PHY_APB_RSTN_1); | 786 | | IMX8QM_PHY_APB_RSTN_1); |
785 | 787 | ||
786 | imx_pcie_gpr_update_bits(priv, | 788 | imx_pcie_gpr_update_bits(priv, |
787 | IMX8QM_CSR_MISC_OFFSET, | 789 | IMX8QM_CSR_MISC_OFFSET, |
788 | IMX8QM_MISC_PHYX1_EPCS_SEL, | 790 | IMX8QM_MISC_PHYX1_EPCS_SEL, |
789 | IMX8QM_MISC_PHYX1_EPCS_SEL); | 791 | IMX8QM_MISC_PHYX1_EPCS_SEL); |
790 | imx_pcie_gpr_update_bits(priv, | 792 | imx_pcie_gpr_update_bits(priv, |
791 | IMX8QM_CSR_MISC_OFFSET, | 793 | IMX8QM_CSR_MISC_OFFSET, |
792 | IMX8QM_MISC_PCIE_AB_SELECT, | 794 | IMX8QM_MISC_PCIE_AB_SELECT, |
793 | 0); | 795 | 0); |
794 | break; | 796 | break; |
795 | 797 | ||
796 | case PCIEAX1PCIEBX1SATA: | 798 | case PCIEAX1PCIEBX1SATA: |
797 | tmp = IMX8QM_PHY_APB_RSTN_1; | 799 | tmp = IMX8QM_PHY_APB_RSTN_1; |
798 | tmp |= IMX8QM_PHY_APB_RSTN_0; | 800 | tmp |= IMX8QM_PHY_APB_RSTN_0; |
799 | imx_pcie_gpr_update_bits(priv, | 801 | imx_pcie_gpr_update_bits(priv, |
800 | IMX8QM_CSR_PHYX2_OFFSET, | 802 | IMX8QM_CSR_PHYX2_OFFSET, |
801 | IMX8QM_PHYX2_CTRL0_APB_MASK, tmp); | 803 | IMX8QM_PHYX2_CTRL0_APB_MASK, tmp); |
802 | 804 | ||
803 | imx_pcie_gpr_update_bits(priv, | 805 | imx_pcie_gpr_update_bits(priv, |
804 | IMX8QM_CSR_MISC_OFFSET, | 806 | IMX8QM_CSR_MISC_OFFSET, |
805 | IMX8QM_MISC_PHYX1_EPCS_SEL, | 807 | IMX8QM_MISC_PHYX1_EPCS_SEL, |
806 | IMX8QM_MISC_PHYX1_EPCS_SEL); | 808 | IMX8QM_MISC_PHYX1_EPCS_SEL); |
807 | imx_pcie_gpr_update_bits(priv, | 809 | imx_pcie_gpr_update_bits(priv, |
808 | IMX8QM_CSR_MISC_OFFSET, | 810 | IMX8QM_CSR_MISC_OFFSET, |
809 | IMX8QM_MISC_PCIE_AB_SELECT, | 811 | IMX8QM_MISC_PCIE_AB_SELECT, |
810 | IMX8QM_MISC_PCIE_AB_SELECT); | 812 | IMX8QM_MISC_PCIE_AB_SELECT); |
811 | 813 | ||
812 | imx_pcie_gpr_update_bits(priv, | 814 | imx_pcie_gpr_update_bits(priv, |
813 | IMX8QM_CSR_PHYX2_OFFSET, | 815 | IMX8QM_CSR_PHYX2_OFFSET, |
814 | HW_PHYX2_CTRL0_PIPE_LN2LK_MASK, | 816 | HW_PHYX2_CTRL0_PIPE_LN2LK_MASK, |
815 | HW_PHYX2_CTRL0_PIPE_LN2LK_3 | HW_PHYX2_CTRL0_PIPE_LN2LK_0); | 817 | HW_PHYX2_CTRL0_PIPE_LN2LK_3 | HW_PHYX2_CTRL0_PIPE_LN2LK_0); |
816 | 818 | ||
817 | break; | 819 | break; |
818 | 820 | ||
819 | case PCIEAX2PCIEBX1: | 821 | case PCIEAX2PCIEBX1: |
820 | /* | 822 | /* |
821 | * bit 0 rx ena 1. | 823 | * bit 0 rx ena 1. |
822 | * bit12 PHY_X1_EPCS_SEL 0. | 824 | * bit12 PHY_X1_EPCS_SEL 0. |
823 | * bit13 phy_ab_select 1. | 825 | * bit13 phy_ab_select 1. |
824 | */ | 826 | */ |
825 | if (priv->ctrl_id) | 827 | if (priv->ctrl_id) |
826 | imx_pcie_gpr_update_bits(priv, | 828 | imx_pcie_gpr_update_bits(priv, |
827 | IMX8QM_CSR_PHYX1_OFFSET, | 829 | IMX8QM_CSR_PHYX1_OFFSET, |
828 | IMX8QM_PHY_APB_RSTN_0, | 830 | IMX8QM_PHY_APB_RSTN_0, |
829 | IMX8QM_PHY_APB_RSTN_0); | 831 | IMX8QM_PHY_APB_RSTN_0); |
830 | else | 832 | else |
831 | imx_pcie_gpr_update_bits(priv, | 833 | imx_pcie_gpr_update_bits(priv, |
832 | IMX8QM_CSR_PHYX2_OFFSET, | 834 | IMX8QM_CSR_PHYX2_OFFSET, |
833 | IMX8QM_PHYX2_CTRL0_APB_MASK, | 835 | IMX8QM_PHYX2_CTRL0_APB_MASK, |
834 | IMX8QM_PHY_APB_RSTN_0 | 836 | IMX8QM_PHY_APB_RSTN_0 |
835 | | IMX8QM_PHY_APB_RSTN_1); | 837 | | IMX8QM_PHY_APB_RSTN_1); |
836 | 838 | ||
837 | imx_pcie_gpr_update_bits(priv, | 839 | imx_pcie_gpr_update_bits(priv, |
838 | IMX8QM_CSR_MISC_OFFSET, | 840 | IMX8QM_CSR_MISC_OFFSET, |
839 | IMX8QM_MISC_PHYX1_EPCS_SEL, | 841 | IMX8QM_MISC_PHYX1_EPCS_SEL, |
840 | 0); | 842 | 0); |
841 | imx_pcie_gpr_update_bits(priv, | 843 | imx_pcie_gpr_update_bits(priv, |
842 | IMX8QM_CSR_MISC_OFFSET, | 844 | IMX8QM_CSR_MISC_OFFSET, |
843 | IMX8QM_MISC_PCIE_AB_SELECT, | 845 | IMX8QM_MISC_PCIE_AB_SELECT, |
844 | IMX8QM_MISC_PCIE_AB_SELECT); | 846 | IMX8QM_MISC_PCIE_AB_SELECT); |
845 | break; | 847 | break; |
846 | } | 848 | } |
847 | 849 | ||
848 | if (priv->ext_osc) { | 850 | if (priv->ext_osc) { |
849 | imx_pcie_gpr_update_bits(priv, | 851 | imx_pcie_gpr_update_bits(priv, |
850 | IMX8QM_CSR_MISC_OFFSET, | 852 | IMX8QM_CSR_MISC_OFFSET, |
851 | IMX8QM_MISC_IOB_RXENA, | 853 | IMX8QM_MISC_IOB_RXENA, |
852 | IMX8QM_MISC_IOB_RXENA); | 854 | IMX8QM_MISC_IOB_RXENA); |
853 | imx_pcie_gpr_update_bits(priv, | 855 | imx_pcie_gpr_update_bits(priv, |
854 | IMX8QM_CSR_MISC_OFFSET, | 856 | IMX8QM_CSR_MISC_OFFSET, |
855 | IMX8QM_MISC_IOB_TXENA, | 857 | IMX8QM_MISC_IOB_TXENA, |
856 | 0); | 858 | 0); |
857 | } else { | 859 | } else { |
858 | /* Try to used the internal pll as ref clk */ | 860 | /* Try to used the internal pll as ref clk */ |
859 | imx_pcie_gpr_update_bits(priv, | 861 | imx_pcie_gpr_update_bits(priv, |
860 | IMX8QM_CSR_MISC_OFFSET, | 862 | IMX8QM_CSR_MISC_OFFSET, |
861 | IMX8QM_MISC_IOB_RXENA, | 863 | IMX8QM_MISC_IOB_RXENA, |
862 | 0); | 864 | 0); |
863 | imx_pcie_gpr_update_bits(priv, | 865 | imx_pcie_gpr_update_bits(priv, |
864 | IMX8QM_CSR_MISC_OFFSET, | 866 | IMX8QM_CSR_MISC_OFFSET, |
865 | IMX8QM_MISC_IOB_TXENA, | 867 | IMX8QM_MISC_IOB_TXENA, |
866 | IMX8QM_MISC_IOB_TXENA); | 868 | IMX8QM_MISC_IOB_TXENA); |
867 | imx_pcie_gpr_update_bits(priv, | 869 | imx_pcie_gpr_update_bits(priv, |
868 | IMX8QM_CSR_MISC_OFFSET, | 870 | IMX8QM_CSR_MISC_OFFSET, |
869 | IMX8QM_CSR_MISC_IOB_A_0_TXOE | 871 | IMX8QM_CSR_MISC_IOB_A_0_TXOE |
870 | | IMX8QM_CSR_MISC_IOB_A_0_M1M0_MASK, | 872 | | IMX8QM_CSR_MISC_IOB_A_0_M1M0_MASK, |
871 | IMX8QM_CSR_MISC_IOB_A_0_TXOE | 873 | IMX8QM_CSR_MISC_IOB_A_0_TXOE |
872 | | IMX8QM_CSR_MISC_IOB_A_0_M1M0_2); | 874 | | IMX8QM_CSR_MISC_IOB_A_0_M1M0_2); |
873 | } | 875 | } |
874 | 876 | ||
875 | val = IMX8QM_CSR_PCIEA_OFFSET | 877 | val = IMX8QM_CSR_PCIEA_OFFSET |
876 | + priv->ctrl_id * SZ_64K; | 878 | + priv->ctrl_id * SZ_64K; |
877 | imx_pcie_gpr_update_bits(priv, | 879 | imx_pcie_gpr_update_bits(priv, |
878 | val, IMX8QM_PCIE_TYPE_MASK, | 880 | val, IMX8QM_PCIE_TYPE_MASK, |
879 | 0x4 << 24); | 881 | 0x4 << 24); |
880 | 882 | ||
881 | mdelay(10); | 883 | mdelay(10); |
882 | } | 884 | } |
883 | 885 | ||
884 | return 0; | 886 | return 0; |
885 | } | 887 | } |
886 | 888 | ||
887 | static int imx8_pcie_wait_for_phy_pll_lock(struct imx_pcie_priv *priv) | 889 | static int imx8_pcie_wait_for_phy_pll_lock(struct imx_pcie_priv *priv) |
888 | { | 890 | { |
889 | u32 val, tmp, orig; | 891 | u32 val, tmp, orig; |
890 | unsigned int retries = 0; | 892 | unsigned int retries = 0; |
891 | 893 | ||
892 | if (priv->variant == IMX8QXP | 894 | if (priv->variant == IMX8QXP |
893 | || priv->variant == IMX8QM) { | 895 | || priv->variant == IMX8QM) { |
894 | for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; | 896 | for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; |
895 | retries++) { | 897 | retries++) { |
896 | if (priv->hsio_cfg == PCIEAX1PCIEBX1SATA) { | 898 | if (priv->hsio_cfg == PCIEAX1PCIEBX1SATA) { |
897 | imx_pcie_gpr_read(priv, | 899 | imx_pcie_gpr_read(priv, |
898 | IMX8QM_CSR_PHYX2_OFFSET + 0x4, | 900 | IMX8QM_CSR_PHYX2_OFFSET + 0x4, |
899 | &tmp); | 901 | &tmp); |
900 | if (priv->ctrl_id == 0) /* pciea 1 lanes */ | 902 | if (priv->ctrl_id == 0) /* pciea 1 lanes */ |
901 | orig = IMX8QM_STTS0_LANE0_TX_PLL_LOCK; | 903 | orig = IMX8QM_STTS0_LANE0_TX_PLL_LOCK; |
902 | else /* pcieb 1 lanes */ | 904 | else /* pcieb 1 lanes */ |
903 | orig = IMX8QM_STTS0_LANE1_TX_PLL_LOCK; | 905 | orig = IMX8QM_STTS0_LANE1_TX_PLL_LOCK; |
904 | tmp &= orig; | 906 | tmp &= orig; |
905 | if (tmp == orig) { | 907 | if (tmp == orig) { |
906 | imx_pcie_gpr_update_bits(priv, | 908 | imx_pcie_gpr_update_bits(priv, |
907 | IMX8QM_LPCG_PHYX2_OFFSET, | 909 | IMX8QM_LPCG_PHYX2_OFFSET, |
908 | IMX8QM_LPCG_PHY_PCG0 | 910 | IMX8QM_LPCG_PHY_PCG0 |
909 | | IMX8QM_LPCG_PHY_PCG1, | 911 | | IMX8QM_LPCG_PHY_PCG1, |
910 | IMX8QM_LPCG_PHY_PCG0 | 912 | IMX8QM_LPCG_PHY_PCG0 |
911 | | IMX8QM_LPCG_PHY_PCG1); | 913 | | IMX8QM_LPCG_PHY_PCG1); |
912 | break; | 914 | break; |
913 | } | 915 | } |
914 | } | 916 | } |
915 | 917 | ||
916 | if (priv->hsio_cfg == PCIEAX2PCIEBX1) { | 918 | if (priv->hsio_cfg == PCIEAX2PCIEBX1) { |
917 | val = IMX8QM_CSR_PHYX2_OFFSET | 919 | val = IMX8QM_CSR_PHYX2_OFFSET |
918 | + priv->ctrl_id * SZ_64K; | 920 | + priv->ctrl_id * SZ_64K; |
919 | imx_pcie_gpr_read(priv, | 921 | imx_pcie_gpr_read(priv, |
920 | val + IMX8QM_CSR_PHYX_STTS0_OFFSET, | 922 | val + IMX8QM_CSR_PHYX_STTS0_OFFSET, |
921 | &tmp); | 923 | &tmp); |
922 | orig = IMX8QM_STTS0_LANE0_TX_PLL_LOCK; | 924 | orig = IMX8QM_STTS0_LANE0_TX_PLL_LOCK; |
923 | if (priv->ctrl_id == 0) /* pciea 2 lanes */ | 925 | if (priv->ctrl_id == 0) /* pciea 2 lanes */ |
924 | orig |= IMX8QM_STTS0_LANE1_TX_PLL_LOCK; | 926 | orig |= IMX8QM_STTS0_LANE1_TX_PLL_LOCK; |
925 | tmp &= orig; | 927 | tmp &= orig; |
926 | if (tmp == orig) { | 928 | if (tmp == orig) { |
927 | val = IMX8QM_CSR_PHYX2_OFFSET | 929 | val = IMX8QM_CSR_PHYX2_OFFSET |
928 | + priv->ctrl_id * SZ_64K; | 930 | + priv->ctrl_id * SZ_64K; |
929 | imx_pcie_gpr_update_bits(priv, | 931 | imx_pcie_gpr_update_bits(priv, |
930 | val, IMX8QM_LPCG_PHY_PCG0, | 932 | val, IMX8QM_LPCG_PHY_PCG0, |
931 | IMX8QM_LPCG_PHY_PCG0); | 933 | IMX8QM_LPCG_PHY_PCG0); |
932 | break; | 934 | break; |
933 | } | 935 | } |
934 | } | 936 | } |
935 | udelay(10); | 937 | udelay(10); |
936 | } | 938 | } |
937 | } | 939 | } |
938 | 940 | ||
939 | if (retries >= PHY_PLL_LOCK_WAIT_MAX_RETRIES) { | 941 | if (retries >= PHY_PLL_LOCK_WAIT_MAX_RETRIES) { |
940 | printf("pcie phy pll can't be locked.\n"); | 942 | printf("pcie phy pll can't be locked.\n"); |
941 | return -ENODEV; | 943 | return -ENODEV; |
942 | } else { | 944 | } else { |
943 | debug("pcie phy pll is locked.\n"); | 945 | debug("pcie phy pll is locked.\n"); |
944 | return 0; | 946 | return 0; |
945 | } | 947 | } |
946 | } | 948 | } |
947 | 949 | ||
948 | static int imx8_pcie_deassert_core_reset(struct imx_pcie_priv *priv) | 950 | static int imx8_pcie_deassert_core_reset(struct imx_pcie_priv *priv) |
949 | { | 951 | { |
950 | int ret, i; | 952 | int ret, i; |
951 | u32 val, tmp; | 953 | u32 val, tmp; |
952 | 954 | ||
953 | #if CONFIG_IS_ENABLED(CLK) | 955 | #if CONFIG_IS_ENABLED(CLK) |
954 | ret = clk_enable(&priv->pcie); | 956 | ret = clk_enable(&priv->pcie); |
955 | if (ret) { | 957 | if (ret) { |
956 | printf("unable to enable pcie clock\n"); | 958 | printf("unable to enable pcie clock\n"); |
957 | return ret; | 959 | return ret; |
958 | } | 960 | } |
959 | 961 | ||
960 | ret = clk_enable(&priv->pcie_phy); | 962 | ret = clk_enable(&priv->pcie_phy); |
961 | if (ret) { | 963 | if (ret) { |
962 | printf("unable to enable pcie_phy clock\n"); | 964 | printf("unable to enable pcie_phy clock\n"); |
963 | goto err_pcie; | 965 | goto err_pcie; |
964 | } | 966 | } |
965 | #endif | ||
966 | 967 | ||
967 | if (priv->variant == IMX8QM | 968 | ret = clk_enable(&priv->pcie_bus); |
968 | || priv->variant == IMX8QXP) { | 969 | if (ret) { |
970 | printf("unable to enable pcie_bus clock\n"); | ||
971 | goto err_pcie_phy; | ||
972 | } | ||
969 | 973 | ||
970 | #if CONFIG_IS_ENABLED(CLK) | 974 | ret = clk_enable(&priv->pcie_inbound_axi); |
971 | ret = clk_enable(&priv->pcie_inbound_axi); | 975 | if (ret) { |
976 | printf("unable to enable pcie_axi clock\n"); | ||
977 | goto err_pcie_bus; | ||
978 | } | ||
979 | ret = clk_enable(&priv->pcie_per); | ||
980 | if (ret) { | ||
981 | printf("unable to enable pcie_per clock\n"); | ||
982 | goto err_pcie_inbound_axi; | ||
983 | } | ||
984 | |||
985 | ret = clk_enable(&priv->phy_per); | ||
986 | if (ret) { | ||
987 | printf("unable to enable phy_per clock\n"); | ||
988 | goto err_pcie_per; | ||
989 | } | ||
990 | |||
991 | ret = clk_enable(&priv->misc_per); | ||
992 | if (ret) { | ||
993 | printf("unable to enable misc_per clock\n"); | ||
994 | goto err_phy_per; | ||
995 | } | ||
996 | |||
997 | if (priv->variant == IMX8QM && priv->ctrl_id == 1) { | ||
998 | ret = clk_enable(&priv->pcie_phy_pclk); | ||
972 | if (ret) { | 999 | if (ret) { |
973 | printf("unable to enable pcie_axi clock\n"); | 1000 | printf("unable to enable pcie_phy_pclk clock\n"); |
974 | goto err_pcie_phy; | 1001 | goto err_misc_per; |
975 | } | 1002 | } |
976 | ret = clk_enable(&priv->pcie_per); | 1003 | |
1004 | ret = clk_enable(&priv->pciex2_per); | ||
977 | if (ret) { | 1005 | if (ret) { |
978 | printf("unable to enable pcie_per clock\n"); | 1006 | printf("unable to enable pciex2_per clock\n"); |
979 | clk_disable(&priv->pcie_inbound_axi); | 1007 | clk_disable(&priv->pcie_phy_pclk); |
980 | goto err_pcie_phy; | 1008 | goto err_misc_per; |
981 | } | 1009 | } |
1010 | } | ||
982 | #endif | 1011 | #endif |
983 | /* allow the clocks to stabilize */ | 1012 | /* allow the clocks to stabilize */ |
984 | udelay(200); | 1013 | udelay(200); |
985 | 1014 | ||
986 | /* bit19 PM_REQ_CORE_RST of pciex#_stts0 should be cleared. */ | 1015 | /* bit19 PM_REQ_CORE_RST of pciex#_stts0 should be cleared. */ |
987 | for (i = 0; i < 100; i++) { | 1016 | for (i = 0; i < 100; i++) { |
988 | val = IMX8QM_CSR_PCIEA_OFFSET | 1017 | val = IMX8QM_CSR_PCIEA_OFFSET |
989 | + priv->ctrl_id * SZ_64K; | 1018 | + priv->ctrl_id * SZ_64K; |
990 | imx_pcie_gpr_read(priv, | 1019 | imx_pcie_gpr_read(priv, |
991 | val + IMX8QM_CSR_PCIE_STTS0_OFFSET, | 1020 | val + IMX8QM_CSR_PCIE_STTS0_OFFSET, |
992 | &tmp); | 1021 | &tmp); |
993 | if ((tmp & IMX8QM_CTRL_STTS0_PM_REQ_CORE_RST) == 0) | 1022 | if ((tmp & IMX8QM_CTRL_STTS0_PM_REQ_CORE_RST) == 0) |
994 | break; | 1023 | break; |
995 | udelay(10); | 1024 | udelay(10); |
996 | } | 1025 | } |
997 | 1026 | ||
998 | if ((tmp & IMX8QM_CTRL_STTS0_PM_REQ_CORE_RST) != 0) | 1027 | if ((tmp & IMX8QM_CTRL_STTS0_PM_REQ_CORE_RST) != 0) |
999 | printf("ERROR PM_REQ_CORE_RST is still set.\n"); | 1028 | printf("ERROR PM_REQ_CORE_RST is still set.\n"); |
1000 | 1029 | ||
1001 | /* wait for phy pll lock firstly. */ | 1030 | /* wait for phy pll lock firstly. */ |
1002 | if (imx8_pcie_wait_for_phy_pll_lock(priv)) { | 1031 | if (imx8_pcie_wait_for_phy_pll_lock(priv)) { |
1003 | ret = -ENODEV; | 1032 | ret = -ENODEV; |
1004 | goto err_ref_clk;; | 1033 | goto err_ref_clk;; |
1005 | } | 1034 | } |
1006 | 1035 | ||
1007 | if (dm_gpio_is_valid(&priv->reset_gpio)) { | 1036 | if (dm_gpio_is_valid(&priv->reset_gpio)) { |
1008 | dm_gpio_set_value(&priv->reset_gpio, 1); | 1037 | dm_gpio_set_value(&priv->reset_gpio, 1); |
1009 | mdelay(20); | 1038 | mdelay(20); |
1010 | dm_gpio_set_value(&priv->reset_gpio, 0); | 1039 | dm_gpio_set_value(&priv->reset_gpio, 0); |
1011 | mdelay(20); | 1040 | mdelay(20); |
1012 | } | ||
1013 | |||
1014 | return 0; | ||
1015 | } | 1041 | } |
1016 | 1042 | ||
1043 | return 0; | ||
1044 | |||
1017 | err_ref_clk: | 1045 | err_ref_clk: |
1018 | #if CONFIG_IS_ENABLED(CLK) | 1046 | #if CONFIG_IS_ENABLED(CLK) |
1047 | if (priv->variant == IMX8QM && priv->ctrl_id == 1) { | ||
1048 | clk_disable(&priv->pciex2_per); | ||
1049 | clk_disable(&priv->pcie_phy_pclk); | ||
1050 | } | ||
1051 | err_misc_per: | ||
1052 | clk_disable(&priv->misc_per); | ||
1053 | err_phy_per: | ||
1054 | clk_disable(&priv->phy_per); | ||
1055 | err_pcie_per: | ||
1019 | clk_disable(&priv->pcie_per); | 1056 | clk_disable(&priv->pcie_per); |
1057 | err_pcie_inbound_axi: | ||
1020 | clk_disable(&priv->pcie_inbound_axi); | 1058 | clk_disable(&priv->pcie_inbound_axi); |
1059 | err_pcie_bus: | ||
1060 | clk_disable(&priv->pcie_bus); | ||
1021 | err_pcie_phy: | 1061 | err_pcie_phy: |
1022 | clk_disable(&priv->pcie_phy); | 1062 | clk_disable(&priv->pcie_phy); |
1023 | err_pcie: | 1063 | err_pcie: |
1024 | clk_disable(&priv->pcie); | 1064 | clk_disable(&priv->pcie); |
1025 | #endif | 1065 | #endif |
1026 | 1066 | ||
1027 | return ret; | 1067 | return ret; |
1028 | } | 1068 | } |
1029 | 1069 | ||
1030 | #ifdef CONFIG_MX6 | 1070 | #ifdef CONFIG_MX6 |
1031 | /* | 1071 | /* |
1032 | * Initial bus setup | 1072 | * Initial bus setup |
1033 | */ | 1073 | */ |
1034 | static int imx6_pcie_assert_core_reset(struct imx_pcie_priv *priv, | 1074 | static int imx6_pcie_assert_core_reset(struct imx_pcie_priv *priv, |
1035 | bool prepare_for_boot) | 1075 | bool prepare_for_boot) |
1036 | { | 1076 | { |
1037 | if (priv->variant == IMX6QP) | 1077 | if (priv->variant == IMX6QP) |
1038 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_PCIE_SW_RST, IOMUXC_GPR1_PCIE_SW_RST); | 1078 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_PCIE_SW_RST, IOMUXC_GPR1_PCIE_SW_RST); |
1039 | 1079 | ||
1040 | #if defined(CONFIG_MX6SX) | 1080 | #if defined(CONFIG_MX6SX) |
1041 | if (priv->variant == IMX6SX) { | 1081 | if (priv->variant == IMX6SX) { |
1042 | struct gpc *gpc_regs = (struct gpc *)GPC_BASE_ADDR; | 1082 | struct gpc *gpc_regs = (struct gpc *)GPC_BASE_ADDR; |
1043 | 1083 | ||
1044 | /* SSP_EN is not used on MX6SX anymore */ | 1084 | /* SSP_EN is not used on MX6SX anymore */ |
1045 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_TEST_POWERDOWN, IOMUXC_GPR12_TEST_POWERDOWN); | 1085 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_TEST_POWERDOWN, IOMUXC_GPR12_TEST_POWERDOWN); |
1046 | /* Force PCIe PHY reset */ | 1086 | /* Force PCIe PHY reset */ |
1047 | imx_pcie_gpr_update_bits(priv, 20, IOMUXC_GPR5_PCIE_BTNRST, IOMUXC_GPR5_PCIE_BTNRST); | 1087 | imx_pcie_gpr_update_bits(priv, 20, IOMUXC_GPR5_PCIE_BTNRST, IOMUXC_GPR5_PCIE_BTNRST); |
1048 | /* Power up PCIe PHY */ | 1088 | /* Power up PCIe PHY */ |
1049 | setbits_le32(&gpc_regs->cntr, PCIE_PHY_PUP_REQ); | 1089 | setbits_le32(&gpc_regs->cntr, PCIE_PHY_PUP_REQ); |
1050 | pcie_power_up(); | 1090 | pcie_power_up(); |
1051 | 1091 | ||
1052 | return 0; | 1092 | return 0; |
1053 | } | 1093 | } |
1054 | #endif | 1094 | #endif |
1055 | /* | 1095 | /* |
1056 | * If the bootloader already enabled the link we need some special | 1096 | * If the bootloader already enabled the link we need some special |
1057 | * handling to get the core back into a state where it is safe to | 1097 | * handling to get the core back into a state where it is safe to |
1058 | * touch it for configuration. As there is no dedicated reset signal | 1098 | * touch it for configuration. As there is no dedicated reset signal |
1059 | * wired up for MX6QDL, we need to manually force LTSSM into "detect" | 1099 | * wired up for MX6QDL, we need to manually force LTSSM into "detect" |
1060 | * state before completely disabling LTSSM, which is a prerequisite | 1100 | * state before completely disabling LTSSM, which is a prerequisite |
1061 | * for core configuration. | 1101 | * for core configuration. |
1062 | * | 1102 | * |
1063 | * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong | 1103 | * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong |
1064 | * indication that the bootloader activated the link. | 1104 | * indication that the bootloader activated the link. |
1065 | */ | 1105 | */ |
1066 | if (priv->variant == IMX6Q && prepare_for_boot) { | 1106 | if (priv->variant == IMX6Q && prepare_for_boot) { |
1067 | u32 val, gpr1, gpr12; | 1107 | u32 val, gpr1, gpr12; |
1068 | 1108 | ||
1069 | imx_pcie_gpr_read(priv, 4, &gpr1); | 1109 | imx_pcie_gpr_read(priv, 4, &gpr1); |
1070 | imx_pcie_gpr_read(priv, 48, &gpr12); | 1110 | imx_pcie_gpr_read(priv, 48, &gpr12); |
1071 | if ((gpr1 & IOMUXC_GPR1_PCIE_REF_CLK_EN) && | 1111 | if ((gpr1 & IOMUXC_GPR1_PCIE_REF_CLK_EN) && |
1072 | (gpr12 & IOMUXC_GPR12_PCIE_CTL_2)) { | 1112 | (gpr12 & IOMUXC_GPR12_PCIE_CTL_2)) { |
1073 | val = readl(priv->dbi_base + PCIE_PL_PFLR); | 1113 | val = readl(priv->dbi_base + PCIE_PL_PFLR); |
1074 | val &= ~PCIE_PL_PFLR_LINK_STATE_MASK; | 1114 | val &= ~PCIE_PL_PFLR_LINK_STATE_MASK; |
1075 | val |= PCIE_PL_PFLR_FORCE_LINK; | 1115 | val |= PCIE_PL_PFLR_FORCE_LINK; |
1076 | 1116 | ||
1077 | imx_pcie_fix_dabt_handler(true); | 1117 | imx_pcie_fix_dabt_handler(true); |
1078 | writel(val, priv->dbi_base + PCIE_PL_PFLR); | 1118 | writel(val, priv->dbi_base + PCIE_PL_PFLR); |
1079 | imx_pcie_fix_dabt_handler(false); | 1119 | imx_pcie_fix_dabt_handler(false); |
1080 | 1120 | ||
1081 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_PCIE_CTL_2, 0); | 1121 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_PCIE_CTL_2, 0); |
1082 | } | 1122 | } |
1083 | } | 1123 | } |
1084 | 1124 | ||
1085 | if (priv->variant == IMX6QP || priv->variant == IMX6Q) { | 1125 | if (priv->variant == IMX6QP || priv->variant == IMX6Q) { |
1086 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_TEST_POWERDOWN, | 1126 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_TEST_POWERDOWN, |
1087 | IOMUXC_GPR1_TEST_POWERDOWN); | 1127 | IOMUXC_GPR1_TEST_POWERDOWN); |
1088 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_REF_SSP_EN, 0); | 1128 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_REF_SSP_EN, 0); |
1089 | } | 1129 | } |
1090 | 1130 | ||
1091 | return 0; | 1131 | return 0; |
1092 | } | 1132 | } |
1093 | 1133 | ||
1094 | static int imx6_pcie_init_phy(struct imx_pcie_priv *priv) | 1134 | static int imx6_pcie_init_phy(struct imx_pcie_priv *priv) |
1095 | { | 1135 | { |
1096 | #ifndef DEBUG | 1136 | #ifndef DEBUG |
1097 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_APPS_LTSSM_ENABLE, 0); | 1137 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_APPS_LTSSM_ENABLE, 0); |
1098 | #endif | 1138 | #endif |
1099 | 1139 | ||
1100 | imx_pcie_gpr_update_bits(priv, 48, | 1140 | imx_pcie_gpr_update_bits(priv, 48, |
1101 | IOMUXC_GPR12_DEVICE_TYPE_MASK, | 1141 | IOMUXC_GPR12_DEVICE_TYPE_MASK, |
1102 | IOMUXC_GPR12_DEVICE_TYPE_RC); | 1142 | IOMUXC_GPR12_DEVICE_TYPE_RC); |
1103 | imx_pcie_gpr_update_bits(priv, 48, | 1143 | imx_pcie_gpr_update_bits(priv, 48, |
1104 | IOMUXC_GPR12_LOS_LEVEL_MASK, | 1144 | IOMUXC_GPR12_LOS_LEVEL_MASK, |
1105 | IOMUXC_GPR12_LOS_LEVEL_9); | 1145 | IOMUXC_GPR12_LOS_LEVEL_9); |
1106 | 1146 | ||
1107 | if (priv->variant == IMX6SX) { | 1147 | if (priv->variant == IMX6SX) { |
1108 | imx_pcie_gpr_update_bits(priv, 48, | 1148 | imx_pcie_gpr_update_bits(priv, 48, |
1109 | IOMUXC_GPR12_RX_EQ_MASK, | 1149 | IOMUXC_GPR12_RX_EQ_MASK, |
1110 | IOMUXC_GPR12_RX_EQ_2); | 1150 | IOMUXC_GPR12_RX_EQ_2); |
1111 | } | 1151 | } |
1112 | 1152 | ||
1113 | imx_pcie_gpr_update_bits(priv, 32, 0xffffffff, | 1153 | imx_pcie_gpr_update_bits(priv, 32, 0xffffffff, |
1114 | (0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN1_OFFSET) | | 1154 | (0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN1_OFFSET) | |
1115 | (0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_3P5DB_OFFSET) | | 1155 | (0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_3P5DB_OFFSET) | |
1116 | (20 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_6DB_OFFSET) | | 1156 | (20 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_6DB_OFFSET) | |
1117 | (127 << IOMUXC_GPR8_PCS_TX_SWING_FULL_OFFSET) | | 1157 | (127 << IOMUXC_GPR8_PCS_TX_SWING_FULL_OFFSET) | |
1118 | (127 << IOMUXC_GPR8_PCS_TX_SWING_LOW_OFFSET)); | 1158 | (127 << IOMUXC_GPR8_PCS_TX_SWING_LOW_OFFSET)); |
1119 | 1159 | ||
1120 | return 0; | 1160 | return 0; |
1121 | } | 1161 | } |
1122 | 1162 | ||
1123 | __weak int imx6_pcie_toggle_power(void) | 1163 | __weak int imx6_pcie_toggle_power(void) |
1124 | { | 1164 | { |
1125 | #ifdef CONFIG_PCIE_IMX_POWER_GPIO | 1165 | #ifdef CONFIG_PCIE_IMX_POWER_GPIO |
1126 | gpio_request(CONFIG_PCIE_IMX_POWER_GPIO, "pcie_power"); | 1166 | gpio_request(CONFIG_PCIE_IMX_POWER_GPIO, "pcie_power"); |
1127 | gpio_direction_output(CONFIG_PCIE_IMX_POWER_GPIO, 0); | 1167 | gpio_direction_output(CONFIG_PCIE_IMX_POWER_GPIO, 0); |
1128 | mdelay(20); | 1168 | mdelay(20); |
1129 | gpio_set_value(CONFIG_PCIE_IMX_POWER_GPIO, 1); | 1169 | gpio_set_value(CONFIG_PCIE_IMX_POWER_GPIO, 1); |
1130 | mdelay(20); | 1170 | mdelay(20); |
1131 | gpio_free(CONFIG_PCIE_IMX_POWER_GPIO); | 1171 | gpio_free(CONFIG_PCIE_IMX_POWER_GPIO); |
1132 | #endif | 1172 | #endif |
1133 | return 0; | 1173 | return 0; |
1134 | } | 1174 | } |
1135 | 1175 | ||
1136 | __weak int imx6_pcie_toggle_reset(void) | 1176 | __weak int imx6_pcie_toggle_reset(void) |
1137 | { | 1177 | { |
1138 | /* | 1178 | /* |
1139 | * See 'PCI EXPRESS BASE SPECIFICATION, REV 3.0, SECTION 6.6.1' | 1179 | * See 'PCI EXPRESS BASE SPECIFICATION, REV 3.0, SECTION 6.6.1' |
1140 | * for detailed understanding of the PCIe CR reset logic. | 1180 | * for detailed understanding of the PCIe CR reset logic. |
1141 | * | 1181 | * |
1142 | * The PCIe #PERST reset line _MUST_ be connected, otherwise your | 1182 | * The PCIe #PERST reset line _MUST_ be connected, otherwise your |
1143 | * design does not conform to the specification. You must wait at | 1183 | * design does not conform to the specification. You must wait at |
1144 | * least 20 ms after de-asserting the #PERST so the EP device can | 1184 | * least 20 ms after de-asserting the #PERST so the EP device can |
1145 | * do self-initialisation. | 1185 | * do self-initialisation. |
1146 | * | 1186 | * |
1147 | * In case your #PERST pin is connected to a plain GPIO pin of the | 1187 | * In case your #PERST pin is connected to a plain GPIO pin of the |
1148 | * CPU, you can define CONFIG_PCIE_IMX_PERST_GPIO in your board's | 1188 | * CPU, you can define CONFIG_PCIE_IMX_PERST_GPIO in your board's |
1149 | * configuration file and the condition below will handle the rest | 1189 | * configuration file and the condition below will handle the rest |
1150 | * of the reset toggling. | 1190 | * of the reset toggling. |
1151 | * | 1191 | * |
1152 | * In case your #PERST toggling logic is more complex, for example | 1192 | * In case your #PERST toggling logic is more complex, for example |
1153 | * connected via CPLD or somesuch, you can override this function | 1193 | * connected via CPLD or somesuch, you can override this function |
1154 | * in your board file and implement reset logic as needed. You must | 1194 | * in your board file and implement reset logic as needed. You must |
1155 | * not forget to wait at least 20 ms after de-asserting #PERST in | 1195 | * not forget to wait at least 20 ms after de-asserting #PERST in |
1156 | * this case either though. | 1196 | * this case either though. |
1157 | * | 1197 | * |
1158 | * In case your #PERST line of the PCIe EP device is not connected | 1198 | * In case your #PERST line of the PCIe EP device is not connected |
1159 | * at all, your design is broken and you should fix your design, | 1199 | * at all, your design is broken and you should fix your design, |
1160 | * otherwise you will observe problems like for example the link | 1200 | * otherwise you will observe problems like for example the link |
1161 | * not coming up after rebooting the system back from running Linux | 1201 | * not coming up after rebooting the system back from running Linux |
1162 | * that uses the PCIe as well OR the PCIe link might not come up in | 1202 | * that uses the PCIe as well OR the PCIe link might not come up in |
1163 | * Linux at all in the first place since it's in some non-reset | 1203 | * Linux at all in the first place since it's in some non-reset |
1164 | * state due to being previously used in U-Boot. | 1204 | * state due to being previously used in U-Boot. |
1165 | */ | 1205 | */ |
1166 | #ifdef CONFIG_PCIE_IMX_PERST_GPIO | 1206 | #ifdef CONFIG_PCIE_IMX_PERST_GPIO |
1167 | gpio_request(CONFIG_PCIE_IMX_PERST_GPIO, "pcie_reset"); | 1207 | gpio_request(CONFIG_PCIE_IMX_PERST_GPIO, "pcie_reset"); |
1168 | gpio_direction_output(CONFIG_PCIE_IMX_PERST_GPIO, 0); | 1208 | gpio_direction_output(CONFIG_PCIE_IMX_PERST_GPIO, 0); |
1169 | mdelay(20); | 1209 | mdelay(20); |
1170 | gpio_set_value(CONFIG_PCIE_IMX_PERST_GPIO, 1); | 1210 | gpio_set_value(CONFIG_PCIE_IMX_PERST_GPIO, 1); |
1171 | mdelay(20); | 1211 | mdelay(20); |
1172 | gpio_free(CONFIG_PCIE_IMX_PERST_GPIO); | 1212 | gpio_free(CONFIG_PCIE_IMX_PERST_GPIO); |
1173 | #else | 1213 | #else |
1174 | puts("WARNING: Make sure the PCIe #PERST line is connected!\n"); | 1214 | puts("WARNING: Make sure the PCIe #PERST line is connected!\n"); |
1175 | #endif | 1215 | #endif |
1176 | 1216 | ||
1177 | return 0; | 1217 | return 0; |
1178 | } | 1218 | } |
1179 | 1219 | ||
1180 | static int imx6_pcie_deassert_core_reset(struct imx_pcie_priv *priv) | 1220 | static int imx6_pcie_deassert_core_reset(struct imx_pcie_priv *priv) |
1181 | { | 1221 | { |
1182 | #if !CONFIG_IS_ENABLED(DM_PCI) | 1222 | #if !CONFIG_IS_ENABLED(DM_PCI) |
1183 | imx6_pcie_toggle_power(); | 1223 | imx6_pcie_toggle_power(); |
1184 | #endif | 1224 | #endif |
1185 | 1225 | ||
1186 | enable_pcie_clock(); | 1226 | enable_pcie_clock(); |
1187 | 1227 | ||
1188 | if (priv->variant == IMX6QP) | 1228 | if (priv->variant == IMX6QP) |
1189 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_PCIE_SW_RST, 0); | 1229 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_PCIE_SW_RST, 0); |
1190 | 1230 | ||
1191 | /* | 1231 | /* |
1192 | * Wait for the clock to settle a bit, when the clock are sourced | 1232 | * Wait for the clock to settle a bit, when the clock are sourced |
1193 | * from the CPU, we need about 30 ms to settle. | 1233 | * from the CPU, we need about 30 ms to settle. |
1194 | */ | 1234 | */ |
1195 | mdelay(50); | 1235 | mdelay(50); |
1196 | 1236 | ||
1197 | if (priv->variant == IMX6SX) { | 1237 | if (priv->variant == IMX6SX) { |
1198 | /* SSP_EN is not used on MX6SX anymore */ | 1238 | /* SSP_EN is not used on MX6SX anymore */ |
1199 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_TEST_POWERDOWN, 0); | 1239 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_TEST_POWERDOWN, 0); |
1200 | /* Clear PCIe PHY reset bit */ | 1240 | /* Clear PCIe PHY reset bit */ |
1201 | imx_pcie_gpr_update_bits(priv, 20, IOMUXC_GPR5_PCIE_BTNRST, 0); | 1241 | imx_pcie_gpr_update_bits(priv, 20, IOMUXC_GPR5_PCIE_BTNRST, 0); |
1202 | } else { | 1242 | } else { |
1203 | /* Enable PCIe */ | 1243 | /* Enable PCIe */ |
1204 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_TEST_POWERDOWN, 0); | 1244 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_TEST_POWERDOWN, 0); |
1205 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_REF_SSP_EN, IOMUXC_GPR1_REF_SSP_EN); | 1245 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_REF_SSP_EN, IOMUXC_GPR1_REF_SSP_EN); |
1206 | } | 1246 | } |
1207 | 1247 | ||
1208 | #if !CONFIG_IS_ENABLED(DM_PCI) | 1248 | #if !CONFIG_IS_ENABLED(DM_PCI) |
1209 | imx6_pcie_toggle_reset(); | 1249 | imx6_pcie_toggle_reset(); |
1210 | #else | 1250 | #else |
1211 | if (dm_gpio_is_valid(&priv->reset_gpio)) { | 1251 | if (dm_gpio_is_valid(&priv->reset_gpio)) { |
1212 | dm_gpio_set_value(&priv->reset_gpio, 1); | 1252 | dm_gpio_set_value(&priv->reset_gpio, 1); |
1213 | mdelay(20); | 1253 | mdelay(20); |
1214 | dm_gpio_set_value(&priv->reset_gpio, 0); | 1254 | dm_gpio_set_value(&priv->reset_gpio, 0); |
1215 | mdelay(20); | 1255 | mdelay(20); |
1216 | } | 1256 | } |
1217 | #endif | 1257 | #endif |
1218 | 1258 | ||
1219 | return 0; | 1259 | return 0; |
1220 | } | 1260 | } |
1221 | #endif | 1261 | #endif |
1222 | 1262 | ||
1223 | static int imx_pcie_assert_core_reset(struct imx_pcie_priv *priv, | 1263 | static int imx_pcie_assert_core_reset(struct imx_pcie_priv *priv, |
1224 | bool prepare_for_boot) | 1264 | bool prepare_for_boot) |
1225 | { | 1265 | { |
1226 | switch (priv->variant) { | 1266 | switch (priv->variant) { |
1227 | #ifdef CONFIG_MX6 | 1267 | #ifdef CONFIG_MX6 |
1228 | case IMX6Q: | 1268 | case IMX6Q: |
1229 | case IMX6QP: | 1269 | case IMX6QP: |
1230 | case IMX6SX: | 1270 | case IMX6SX: |
1231 | return imx6_pcie_assert_core_reset(priv, prepare_for_boot); | 1271 | return imx6_pcie_assert_core_reset(priv, prepare_for_boot); |
1232 | #endif | 1272 | #endif |
1233 | case IMX8QM: | 1273 | case IMX8QM: |
1234 | case IMX8QXP: | 1274 | case IMX8QXP: |
1235 | return imx8_pcie_assert_core_reset(priv, prepare_for_boot); | 1275 | return imx8_pcie_assert_core_reset(priv, prepare_for_boot); |
1236 | default: | 1276 | default: |
1237 | return -EPERM; | 1277 | return -EPERM; |
1238 | } | 1278 | } |
1239 | } | 1279 | } |
1240 | 1280 | ||
1241 | static int imx_pcie_init_phy(struct imx_pcie_priv *priv) | 1281 | static int imx_pcie_init_phy(struct imx_pcie_priv *priv) |
1242 | { | 1282 | { |
1243 | switch (priv->variant) { | 1283 | switch (priv->variant) { |
1244 | #ifdef CONFIG_MX6 | 1284 | #ifdef CONFIG_MX6 |
1245 | case IMX6Q: | 1285 | case IMX6Q: |
1246 | case IMX6QP: | 1286 | case IMX6QP: |
1247 | case IMX6SX: | 1287 | case IMX6SX: |
1248 | return imx6_pcie_init_phy(priv); | 1288 | return imx6_pcie_init_phy(priv); |
1249 | #endif | 1289 | #endif |
1250 | case IMX8QM: | 1290 | case IMX8QM: |
1251 | case IMX8QXP: | 1291 | case IMX8QXP: |
1252 | return imx8_pcie_init_phy(priv); | 1292 | return imx8_pcie_init_phy(priv); |
1253 | default: | 1293 | default: |
1254 | return -EPERM; | 1294 | return -EPERM; |
1255 | } | 1295 | } |
1256 | } | 1296 | } |
1257 | 1297 | ||
1258 | static int imx_pcie_deassert_core_reset(struct imx_pcie_priv *priv) | 1298 | static int imx_pcie_deassert_core_reset(struct imx_pcie_priv *priv) |
1259 | { | 1299 | { |
1260 | switch (priv->variant) { | 1300 | switch (priv->variant) { |
1261 | #ifdef CONFIG_MX6 | 1301 | #ifdef CONFIG_MX6 |
1262 | case IMX6Q: | 1302 | case IMX6Q: |
1263 | case IMX6QP: | 1303 | case IMX6QP: |
1264 | case IMX6SX: | 1304 | case IMX6SX: |
1265 | return imx6_pcie_deassert_core_reset(priv); | 1305 | return imx6_pcie_deassert_core_reset(priv); |
1266 | #endif | 1306 | #endif |
1267 | case IMX8QM: | 1307 | case IMX8QM: |
1268 | case IMX8QXP: | 1308 | case IMX8QXP: |
1269 | return imx8_pcie_deassert_core_reset(priv); | 1309 | return imx8_pcie_deassert_core_reset(priv); |
1270 | default: | 1310 | default: |
1271 | return -EPERM; | 1311 | return -EPERM; |
1272 | } | 1312 | } |
1273 | } | 1313 | } |
1274 | 1314 | ||
1275 | static void imx_pcie_ltssm_enable(struct imx_pcie_priv *priv, bool enable) | 1315 | static void imx_pcie_ltssm_enable(struct imx_pcie_priv *priv, bool enable) |
1276 | { | 1316 | { |
1277 | u32 val; | 1317 | u32 val; |
1278 | 1318 | ||
1279 | switch (priv->variant) { | 1319 | switch (priv->variant) { |
1280 | #ifdef CONFIG_MX6 | 1320 | #ifdef CONFIG_MX6 |
1281 | case IMX6Q: | 1321 | case IMX6Q: |
1282 | case IMX6SX: | 1322 | case IMX6SX: |
1283 | case IMX6QP: | 1323 | case IMX6QP: |
1284 | if (enable) | 1324 | if (enable) |
1285 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_APPS_LTSSM_ENABLE, | 1325 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_APPS_LTSSM_ENABLE, |
1286 | IOMUXC_GPR12_APPS_LTSSM_ENABLE); /* LTSSM enable, starting link. */ | 1326 | IOMUXC_GPR12_APPS_LTSSM_ENABLE); /* LTSSM enable, starting link. */ |
1287 | else | 1327 | else |
1288 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_APPS_LTSSM_ENABLE, 0); | 1328 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_APPS_LTSSM_ENABLE, 0); |
1289 | 1329 | ||
1290 | break; | 1330 | break; |
1291 | #endif | 1331 | #endif |
1292 | case IMX8QXP: | 1332 | case IMX8QXP: |
1293 | case IMX8QM: | 1333 | case IMX8QM: |
1294 | /* Bit4 of the CTRL2 */ | 1334 | /* Bit4 of the CTRL2 */ |
1295 | val = IMX8QM_CSR_PCIEA_OFFSET | 1335 | val = IMX8QM_CSR_PCIEA_OFFSET |
1296 | + priv->ctrl_id * SZ_64K; | 1336 | + priv->ctrl_id * SZ_64K; |
1297 | if (enable) { | 1337 | if (enable) { |
1298 | imx_pcie_gpr_update_bits(priv, | 1338 | imx_pcie_gpr_update_bits(priv, |
1299 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, | 1339 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, |
1300 | IMX8QM_CTRL_LTSSM_ENABLE, | 1340 | IMX8QM_CTRL_LTSSM_ENABLE, |
1301 | IMX8QM_CTRL_LTSSM_ENABLE); | 1341 | IMX8QM_CTRL_LTSSM_ENABLE); |
1302 | } else { | 1342 | } else { |
1303 | imx_pcie_gpr_update_bits(priv, | 1343 | imx_pcie_gpr_update_bits(priv, |
1304 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, | 1344 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, |
1305 | IMX8QM_CTRL_LTSSM_ENABLE, | 1345 | IMX8QM_CTRL_LTSSM_ENABLE, |
1306 | 0); | 1346 | 0); |
1307 | } | 1347 | } |
1308 | break; | 1348 | break; |
1309 | default: | 1349 | default: |
1310 | break; | 1350 | break; |
1311 | } | 1351 | } |
1312 | 1352 | ||
1313 | } | 1353 | } |
1314 | 1354 | ||
1315 | 1355 | ||
1316 | static int imx_pcie_link_up(struct imx_pcie_priv *priv) | 1356 | static int imx_pcie_link_up(struct imx_pcie_priv *priv) |
1317 | { | 1357 | { |
1318 | uint32_t tmp; | 1358 | uint32_t tmp; |
1319 | int count = 0; | 1359 | int count = 0; |
1320 | 1360 | ||
1321 | imx_pcie_assert_core_reset(priv, false); | 1361 | imx_pcie_assert_core_reset(priv, false); |
1322 | imx_pcie_init_phy(priv); | 1362 | imx_pcie_init_phy(priv); |
1323 | imx_pcie_deassert_core_reset(priv); | 1363 | imx_pcie_deassert_core_reset(priv); |
1324 | 1364 | ||
1325 | imx_pcie_setup_ctrl(priv); | 1365 | imx_pcie_setup_ctrl(priv); |
1326 | imx_pcie_regions_setup(priv); | 1366 | imx_pcie_regions_setup(priv); |
1327 | 1367 | ||
1328 | /* | 1368 | /* |
1329 | * By default, the subordinate is set equally to the secondary | 1369 | * By default, the subordinate is set equally to the secondary |
1330 | * bus (0x01) when the RC boots. | 1370 | * bus (0x01) when the RC boots. |
1331 | * This means that theoretically, only bus 1 is reachable from the RC. | 1371 | * This means that theoretically, only bus 1 is reachable from the RC. |
1332 | * Force the PCIe RC subordinate to 0xff, otherwise no downstream | 1372 | * Force the PCIe RC subordinate to 0xff, otherwise no downstream |
1333 | * devices will be detected if the enumeration is applied strictly. | 1373 | * devices will be detected if the enumeration is applied strictly. |
1334 | */ | 1374 | */ |
1335 | tmp = readl(priv->dbi_base + 0x18); | 1375 | tmp = readl(priv->dbi_base + 0x18); |
1336 | tmp |= (0xff << 16); | 1376 | tmp |= (0xff << 16); |
1337 | writel(tmp, priv->dbi_base + 0x18); | 1377 | writel(tmp, priv->dbi_base + 0x18); |
1338 | 1378 | ||
1339 | /* | 1379 | /* |
1340 | * FIXME: Force the PCIe RC to Gen1 operation | 1380 | * FIXME: Force the PCIe RC to Gen1 operation |
1341 | * The RC must be forced into Gen1 mode before bringing the link | 1381 | * The RC must be forced into Gen1 mode before bringing the link |
1342 | * up, otherwise no downstream devices are detected. After the | 1382 | * up, otherwise no downstream devices are detected. After the |
1343 | * link is up, a managed Gen1->Gen2 transition can be initiated. | 1383 | * link is up, a managed Gen1->Gen2 transition can be initiated. |
1344 | */ | 1384 | */ |
1345 | tmp = readl(priv->dbi_base + 0x7c); | 1385 | tmp = readl(priv->dbi_base + 0x7c); |
1346 | tmp &= ~0xf; | 1386 | tmp &= ~0xf; |
1347 | tmp |= 0x1; | 1387 | tmp |= 0x1; |
1348 | writel(tmp, priv->dbi_base + 0x7c); | 1388 | writel(tmp, priv->dbi_base + 0x7c); |
1349 | 1389 | ||
1350 | /* LTSSM enable, starting link. */ | 1390 | /* LTSSM enable, starting link. */ |
1351 | imx_pcie_ltssm_enable(priv, true); | 1391 | imx_pcie_ltssm_enable(priv, true); |
1352 | 1392 | ||
1353 | while (!imx6_pcie_link_up(priv)) { | 1393 | while (!imx6_pcie_link_up(priv)) { |
1354 | udelay(10); | 1394 | udelay(10); |
1355 | count++; | 1395 | count++; |
1356 | if (count == 1000) { | 1396 | if (count == 1000) { |
1357 | print_regs(1); | 1397 | print_regs(1); |
1358 | /* link down, try reset ep, and re-try link here */ | 1398 | /* link down, try reset ep, and re-try link here */ |
1359 | DBGF("pcie link is down, reset ep, then retry!\n"); | 1399 | DBGF("pcie link is down, reset ep, then retry!\n"); |
1360 | 1400 | ||
1361 | #if CONFIG_IS_ENABLED(DM_PCI) | 1401 | #if CONFIG_IS_ENABLED(DM_PCI) |
1362 | if (dm_gpio_is_valid(&priv->reset_gpio)) { | 1402 | if (dm_gpio_is_valid(&priv->reset_gpio)) { |
1363 | dm_gpio_set_value(&priv->reset_gpio, 1); | 1403 | dm_gpio_set_value(&priv->reset_gpio, 1); |
1364 | mdelay(20); | 1404 | mdelay(20); |
1365 | dm_gpio_set_value(&priv->reset_gpio, 0); | 1405 | dm_gpio_set_value(&priv->reset_gpio, 0); |
1366 | mdelay(20); | 1406 | mdelay(20); |
1367 | } | 1407 | } |
1368 | #elif defined(CONFIG_MX6) | 1408 | #elif defined(CONFIG_MX6) |
1369 | imx6_pcie_toggle_reset(); | 1409 | imx6_pcie_toggle_reset(); |
1370 | #endif | 1410 | #endif |
1371 | continue; | 1411 | continue; |
1372 | } | 1412 | } |
1373 | #ifdef DEBUG | 1413 | #ifdef DEBUG |
1374 | else if (count >= 2000) { | 1414 | else if (count >= 2000) { |
1375 | print_regs(1); | 1415 | print_regs(1); |
1376 | /* link is down, stop here */ | 1416 | /* link is down, stop here */ |
1377 | env_set("bootcmd", "sleep 2;"); | 1417 | env_set("bootcmd", "sleep 2;"); |
1378 | DBGF("pcie link is down, stop here!\n"); | 1418 | DBGF("pcie link is down, stop here!\n"); |
1379 | imx_pcie_ltssm_enable(priv, false); | 1419 | imx_pcie_ltssm_enable(priv, false); |
1380 | return -EINVAL; | 1420 | return -EINVAL; |
1381 | } | 1421 | } |
1382 | #endif | 1422 | #endif |
1383 | if (count >= 4000) { | 1423 | if (count >= 4000) { |
1384 | #ifdef CONFIG_PCI_SCAN_SHOW | 1424 | #ifdef CONFIG_PCI_SCAN_SHOW |
1385 | puts("PCI: pcie phy link never came up\n"); | 1425 | puts("PCI: pcie phy link never came up\n"); |
1386 | #endif | 1426 | #endif |
1387 | debug("DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", | 1427 | debug("DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", |
1388 | readl(priv->dbi_base + PCIE_PHY_DEBUG_R0), | 1428 | readl(priv->dbi_base + PCIE_PHY_DEBUG_R0), |
1389 | readl(priv->dbi_base + PCIE_PHY_DEBUG_R1)); | 1429 | readl(priv->dbi_base + PCIE_PHY_DEBUG_R1)); |
1390 | imx_pcie_ltssm_enable(priv, false); | 1430 | imx_pcie_ltssm_enable(priv, false); |
1391 | return -EINVAL; | 1431 | return -EINVAL; |
1392 | } | 1432 | } |
1393 | } | 1433 | } |
1394 | 1434 | ||
1395 | return 0; | 1435 | return 0; |
1396 | } | 1436 | } |
1397 | 1437 | ||
1398 | #if !CONFIG_IS_ENABLED(DM_PCI) | 1438 | #if !CONFIG_IS_ENABLED(DM_PCI) |
1399 | static struct imx_pcie_priv imx_pcie_priv = { | 1439 | static struct imx_pcie_priv imx_pcie_priv = { |
1400 | .dbi_base = (void __iomem *)MX6_DBI_ADDR, | 1440 | .dbi_base = (void __iomem *)MX6_DBI_ADDR, |
1401 | .cfg_base = (void __iomem *)MX6_ROOT_ADDR, | 1441 | .cfg_base = (void __iomem *)MX6_ROOT_ADDR, |
1402 | .cfg1_base = (void __iomem *)(MX6_ROOT_ADDR + MX6_ROOT_SIZE / 2), | 1442 | .cfg1_base = (void __iomem *)(MX6_ROOT_ADDR + MX6_ROOT_SIZE / 2), |
1403 | .cfg_size = MX6_ROOT_SIZE, | 1443 | .cfg_size = MX6_ROOT_SIZE, |
1404 | .lanes = 1, | 1444 | .lanes = 1, |
1405 | }; | 1445 | }; |
1406 | 1446 | ||
1407 | static struct imx_pcie_priv *priv = &imx_pcie_priv; | 1447 | static struct imx_pcie_priv *priv = &imx_pcie_priv; |
1408 | 1448 | ||
1409 | 1449 | ||
1410 | static int imx_pcie_read_config(struct pci_controller *hose, pci_dev_t d, | 1450 | static int imx_pcie_read_config(struct pci_controller *hose, pci_dev_t d, |
1411 | int where, u32 *val) | 1451 | int where, u32 *val) |
1412 | { | 1452 | { |
1413 | struct imx_pcie_priv *priv = hose->priv_data; | 1453 | struct imx_pcie_priv *priv = hose->priv_data; |
1414 | 1454 | ||
1415 | return imx_pcie_read_cfg(priv, d, where, val); | 1455 | return imx_pcie_read_cfg(priv, d, where, val); |
1416 | } | 1456 | } |
1417 | 1457 | ||
1418 | static int imx_pcie_write_config(struct pci_controller *hose, pci_dev_t d, | 1458 | static int imx_pcie_write_config(struct pci_controller *hose, pci_dev_t d, |
1419 | int where, u32 val) | 1459 | int where, u32 val) |
1420 | { | 1460 | { |
1421 | struct imx_pcie_priv *priv = hose->priv_data; | 1461 | struct imx_pcie_priv *priv = hose->priv_data; |
1422 | 1462 | ||
1423 | return imx_pcie_write_cfg(priv, d, where, val); | 1463 | return imx_pcie_write_cfg(priv, d, where, val); |
1424 | } | 1464 | } |
1425 | 1465 | ||
1426 | void imx_pcie_init(void) | 1466 | void imx_pcie_init(void) |
1427 | { | 1467 | { |
1428 | /* Static instance of the controller. */ | 1468 | /* Static instance of the controller. */ |
1429 | static struct pci_controller pcc; | 1469 | static struct pci_controller pcc; |
1430 | struct pci_controller *hose = &pcc; | 1470 | struct pci_controller *hose = &pcc; |
1431 | int ret; | 1471 | int ret; |
1432 | #ifdef DEBUG_STRESS_WR | 1472 | #ifdef DEBUG_STRESS_WR |
1433 | u32 dbg_reg_addr = SNVS_LPGRP; | 1473 | u32 dbg_reg_addr = SNVS_LPGRP; |
1434 | u32 dbg_reg = readl(dbg_reg_addr) + 1; | 1474 | u32 dbg_reg = readl(dbg_reg_addr) + 1; |
1435 | #endif | 1475 | #endif |
1436 | 1476 | ||
1437 | memset(&pcc, 0, sizeof(pcc)); | 1477 | memset(&pcc, 0, sizeof(pcc)); |
1438 | 1478 | ||
1439 | if (is_mx6sx()) | 1479 | if (is_mx6sx()) |
1440 | priv->variant = IMX6SX; | 1480 | priv->variant = IMX6SX; |
1441 | else if (is_mx6dqp()) | 1481 | else if (is_mx6dqp()) |
1442 | priv->variant = IMX6QP; | 1482 | priv->variant = IMX6QP; |
1443 | else | 1483 | else |
1444 | priv->variant = IMX6Q; | 1484 | priv->variant = IMX6Q; |
1445 | 1485 | ||
1446 | hose->priv_data = priv; | 1486 | hose->priv_data = priv; |
1447 | 1487 | ||
1448 | /* PCI I/O space */ | 1488 | /* PCI I/O space */ |
1449 | pci_set_region(&hose->regions[0], | 1489 | pci_set_region(&hose->regions[0], |
1450 | 0, MX6_IO_ADDR, | 1490 | 0, MX6_IO_ADDR, |
1451 | MX6_IO_SIZE, PCI_REGION_IO); | 1491 | MX6_IO_SIZE, PCI_REGION_IO); |
1452 | 1492 | ||
1453 | /* PCI memory space */ | 1493 | /* PCI memory space */ |
1454 | pci_set_region(&hose->regions[1], | 1494 | pci_set_region(&hose->regions[1], |
1455 | MX6_MEM_ADDR, MX6_MEM_ADDR, | 1495 | MX6_MEM_ADDR, MX6_MEM_ADDR, |
1456 | MX6_MEM_SIZE, PCI_REGION_MEM); | 1496 | MX6_MEM_SIZE, PCI_REGION_MEM); |
1457 | 1497 | ||
1458 | /* System memory space */ | 1498 | /* System memory space */ |
1459 | pci_set_region(&hose->regions[2], | 1499 | pci_set_region(&hose->regions[2], |
1460 | MMDC0_ARB_BASE_ADDR, MMDC0_ARB_BASE_ADDR, | 1500 | MMDC0_ARB_BASE_ADDR, MMDC0_ARB_BASE_ADDR, |
1461 | 0xefffffff, PCI_REGION_MEM | PCI_REGION_SYS_MEMORY); | 1501 | 0xefffffff, PCI_REGION_MEM | PCI_REGION_SYS_MEMORY); |
1462 | 1502 | ||
1463 | priv->io = &hose->regions[0]; | 1503 | priv->io = &hose->regions[0]; |
1464 | priv->mem = &hose->regions[1]; | 1504 | priv->mem = &hose->regions[1]; |
1465 | 1505 | ||
1466 | hose->region_count = 3; | 1506 | hose->region_count = 3; |
1467 | 1507 | ||
1468 | pci_set_ops(hose, | 1508 | pci_set_ops(hose, |
1469 | pci_hose_read_config_byte_via_dword, | 1509 | pci_hose_read_config_byte_via_dword, |
1470 | pci_hose_read_config_word_via_dword, | 1510 | pci_hose_read_config_word_via_dword, |
1471 | imx_pcie_read_config, | 1511 | imx_pcie_read_config, |
1472 | pci_hose_write_config_byte_via_dword, | 1512 | pci_hose_write_config_byte_via_dword, |
1473 | pci_hose_write_config_word_via_dword, | 1513 | pci_hose_write_config_word_via_dword, |
1474 | imx_pcie_write_config); | 1514 | imx_pcie_write_config); |
1475 | 1515 | ||
1476 | /* Start the controller. */ | 1516 | /* Start the controller. */ |
1477 | ret = imx_pcie_link_up(priv); | 1517 | ret = imx_pcie_link_up(priv); |
1478 | 1518 | ||
1479 | if (!ret) { | 1519 | if (!ret) { |
1480 | pci_register_hose(hose); | 1520 | pci_register_hose(hose); |
1481 | hose->last_busno = pci_hose_scan(hose); | 1521 | hose->last_busno = pci_hose_scan(hose); |
1482 | #ifdef DEBUG_STRESS_WR | 1522 | #ifdef DEBUG_STRESS_WR |
1483 | dbg_reg += 1<<16; | 1523 | dbg_reg += 1<<16; |
1484 | #endif | 1524 | #endif |
1485 | } | 1525 | } |
1486 | #ifdef DEBUG_STRESS_WR | 1526 | #ifdef DEBUG_STRESS_WR |
1487 | writel(dbg_reg, dbg_reg_addr); | 1527 | writel(dbg_reg, dbg_reg_addr); |
1488 | DBGF("PCIe Successes/Attempts: %d/%d\n", | 1528 | DBGF("PCIe Successes/Attempts: %d/%d\n", |
1489 | dbg_reg >> 16, dbg_reg & 0xffff); | 1529 | dbg_reg >> 16, dbg_reg & 0xffff); |
1490 | #endif | 1530 | #endif |
1491 | } | 1531 | } |
1492 | 1532 | ||
1493 | void imx_pcie_remove(void) | 1533 | void imx_pcie_remove(void) |
1494 | { | 1534 | { |
1495 | imx6_pcie_assert_core_reset(priv, true); | 1535 | imx6_pcie_assert_core_reset(priv, true); |
1496 | } | 1536 | } |
1497 | 1537 | ||
1498 | /* Probe function. */ | 1538 | /* Probe function. */ |
1499 | void pci_init_board(void) | 1539 | void pci_init_board(void) |
1500 | { | 1540 | { |
1501 | imx_pcie_init(); | 1541 | imx_pcie_init(); |
1502 | } | 1542 | } |
1503 | 1543 | ||
1504 | int pci_skip_dev(struct pci_controller *hose, pci_dev_t dev) | 1544 | int pci_skip_dev(struct pci_controller *hose, pci_dev_t dev) |
1505 | { | 1545 | { |
1506 | return 0; | 1546 | return 0; |
1507 | } | 1547 | } |
1508 | 1548 | ||
1509 | #else | 1549 | #else |
1510 | static int imx_pcie_dm_read_config(const struct udevice *dev, pci_dev_t bdf, | 1550 | static int imx_pcie_dm_read_config(const struct udevice *dev, pci_dev_t bdf, |
1511 | uint offset, ulong *value, | 1551 | uint offset, ulong *value, |
1512 | enum pci_size_t size) | 1552 | enum pci_size_t size) |
1513 | { | 1553 | { |
1514 | struct imx_pcie_priv *priv = dev_get_priv(dev); | 1554 | struct imx_pcie_priv *priv = dev_get_priv(dev); |
1515 | u32 tmpval; | 1555 | u32 tmpval; |
1516 | int ret; | 1556 | int ret; |
1517 | 1557 | ||
1518 | ret = imx_pcie_read_cfg(priv, bdf, offset, &tmpval); | 1558 | ret = imx_pcie_read_cfg(priv, bdf, offset, &tmpval); |
1519 | if (ret) | 1559 | if (ret) |
1520 | return ret; | 1560 | return ret; |
1521 | 1561 | ||
1522 | *value = pci_conv_32_to_size(tmpval, offset, size); | 1562 | *value = pci_conv_32_to_size(tmpval, offset, size); |
1523 | return 0; | 1563 | return 0; |
1524 | } | 1564 | } |
1525 | 1565 | ||
1526 | static int imx_pcie_dm_write_config(struct udevice *dev, pci_dev_t bdf, | 1566 | static int imx_pcie_dm_write_config(struct udevice *dev, pci_dev_t bdf, |
1527 | uint offset, ulong value, | 1567 | uint offset, ulong value, |
1528 | enum pci_size_t size) | 1568 | enum pci_size_t size) |
1529 | { | 1569 | { |
1530 | struct imx_pcie_priv *priv = dev_get_priv(dev); | 1570 | struct imx_pcie_priv *priv = dev_get_priv(dev); |
1531 | u32 tmpval, newval; | 1571 | u32 tmpval, newval; |
1532 | int ret; | 1572 | int ret; |
1533 | 1573 | ||
1534 | ret = imx_pcie_read_cfg(priv, bdf, offset, &tmpval); | 1574 | ret = imx_pcie_read_cfg(priv, bdf, offset, &tmpval); |
1535 | if (ret) | 1575 | if (ret) |
1536 | return ret; | 1576 | return ret; |
1537 | 1577 | ||
1538 | newval = pci_conv_size_to_32(tmpval, value, offset, size); | 1578 | newval = pci_conv_size_to_32(tmpval, value, offset, size); |
1539 | return imx_pcie_write_cfg(priv, bdf, offset, newval); | 1579 | return imx_pcie_write_cfg(priv, bdf, offset, newval); |
1540 | } | 1580 | } |
1541 | 1581 | ||
1542 | static int imx_pcie_dm_probe(struct udevice *dev) | 1582 | static int imx_pcie_dm_probe(struct udevice *dev) |
1543 | { | 1583 | { |
1544 | int ret = 0; | 1584 | int ret = 0; |
1545 | struct imx_pcie_priv *priv = dev_get_priv(dev); | 1585 | struct imx_pcie_priv *priv = dev_get_priv(dev); |
1546 | 1586 | ||
1547 | #if CONFIG_IS_ENABLED(DM_REGULATOR) | 1587 | #if CONFIG_IS_ENABLED(DM_REGULATOR) |
1548 | ret = device_get_supply_regulator(dev, "epdev_on", &priv->epdev_on); | 1588 | ret = device_get_supply_regulator(dev, "epdev_on", &priv->epdev_on); |
1549 | if (ret) { | 1589 | if (ret) { |
1550 | priv->epdev_on = NULL; | 1590 | priv->epdev_on = NULL; |
1551 | dev_dbg(dev, "no epdev_on\n"); | 1591 | dev_dbg(dev, "no epdev_on\n"); |
1552 | } else { | 1592 | } else { |
1553 | ret = regulator_set_enable(priv->epdev_on, true); | 1593 | ret = regulator_set_enable(priv->epdev_on, true); |
1554 | if (ret) { | 1594 | if (ret) { |
1555 | dev_err(dev, "fail to enable epdev_on\n"); | 1595 | dev_err(dev, "fail to enable epdev_on\n"); |
1556 | return ret; | 1596 | return ret; |
1557 | } | 1597 | } |
1558 | } | 1598 | } |
1559 | 1599 | ||
1560 | mdelay(100); | 1600 | mdelay(100); |
1561 | #endif | 1601 | #endif |
1562 | 1602 | ||
1563 | /* Enable the osc clk */ | 1603 | /* Enable the osc clk */ |
1564 | ret = gpio_request_by_name(dev, "clkreq-gpio", 0, &priv->clkreq_gpio, | 1604 | ret = gpio_request_by_name(dev, "clkreq-gpio", 0, &priv->clkreq_gpio, |
1565 | (GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE)); | 1605 | (GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE)); |
1566 | if (ret) { | 1606 | if (ret) { |
1567 | dev_info(dev, "%d unable to get clkreq.\n", ret); | 1607 | dev_info(dev, "%d unable to get clkreq.\n", ret); |
1568 | } | 1608 | } |
1569 | 1609 | ||
1570 | /* enable */ | 1610 | /* enable */ |
1571 | ret = gpio_request_by_name(dev, "disable-gpio", 0, &priv->dis_gpio, | 1611 | ret = gpio_request_by_name(dev, "disable-gpio", 0, &priv->dis_gpio, |
1572 | (GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE)); | 1612 | (GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE)); |
1573 | if (ret) { | 1613 | if (ret) { |
1574 | dev_info(dev, "%d unable to get disable-gpio.\n", ret); | 1614 | dev_info(dev, "%d unable to get disable-gpio.\n", ret); |
1575 | } | 1615 | } |
1576 | 1616 | ||
1577 | /* Set to power on */ | 1617 | /* Set to power on */ |
1578 | ret = gpio_request_by_name(dev, "power-on-gpio", 0, &priv->power_on_gpio, | 1618 | ret = gpio_request_by_name(dev, "power-on-gpio", 0, &priv->power_on_gpio, |
1579 | (GPIOD_IS_OUT |GPIOD_IS_OUT_ACTIVE)); | 1619 | (GPIOD_IS_OUT |GPIOD_IS_OUT_ACTIVE)); |
1580 | if (ret) { | 1620 | if (ret) { |
1581 | dev_info(dev, "%d unable to get power-on-gpio.\n", ret); | 1621 | dev_info(dev, "%d unable to get power-on-gpio.\n", ret); |
1582 | } | 1622 | } |
1583 | 1623 | ||
1584 | /* Set to reset status */ | 1624 | /* Set to reset status */ |
1585 | ret = gpio_request_by_name(dev, "reset-gpio", 0, &priv->reset_gpio, | 1625 | ret = gpio_request_by_name(dev, "reset-gpio", 0, &priv->reset_gpio, |
1586 | (GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE)); | 1626 | (GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE)); |
1587 | if (ret) { | 1627 | if (ret) { |
1588 | dev_info(dev, "%d unable to get power-on-gpio.\n", ret); | 1628 | dev_info(dev, "%d unable to get power-on-gpio.\n", ret); |
1589 | } | 1629 | } |
1590 | 1630 | ||
1591 | #if CONFIG_IS_ENABLED(CLK) | 1631 | #if CONFIG_IS_ENABLED(CLK) |
1592 | ret = clk_get_by_name(dev, "pcie_phy", &priv->pcie_phy); | 1632 | ret = clk_get_by_name(dev, "pcie_phy", &priv->pcie_phy); |
1593 | if (ret) { | 1633 | if (ret) { |
1594 | printf("Failed to get pcie_phy clk\n"); | 1634 | printf("Failed to get pcie_phy clk\n"); |
1595 | return ret; | 1635 | return ret; |
1596 | } | 1636 | } |
1597 | 1637 | ||
1598 | ret = clk_get_by_name(dev, "pcie_bus", &priv->pcie_bus); | 1638 | ret = clk_get_by_name(dev, "pcie_bus", &priv->pcie_bus); |
1599 | if (ret) { | 1639 | if (ret) { |
1600 | printf("Failed to get pcie_bus clk\n"); | 1640 | printf("Failed to get pcie_bus clk\n"); |
1601 | return ret; | 1641 | return ret; |
1602 | } | 1642 | } |
1603 | 1643 | ||
1604 | ret = clk_get_by_name(dev, "pcie", &priv->pcie); | 1644 | ret = clk_get_by_name(dev, "pcie", &priv->pcie); |
1605 | if (ret) { | 1645 | if (ret) { |
1606 | printf("Failed to get pcie clk\n"); | 1646 | printf("Failed to get pcie clk\n"); |
1607 | return ret; | 1647 | return ret; |
1608 | } | 1648 | } |
1609 | #endif | 1649 | #endif |
1610 | 1650 | ||
1611 | if (priv->variant == IMX8QM || priv->variant == IMX8QXP) { | 1651 | if (priv->variant == IMX8QM || priv->variant == IMX8QXP) { |
1612 | #if CONFIG_IS_ENABLED(CLK) | 1652 | #if CONFIG_IS_ENABLED(CLK) |
1613 | ret = clk_get_by_name(dev, "pcie_per", &priv->pcie_per); | 1653 | ret = clk_get_by_name(dev, "pcie_per", &priv->pcie_per); |
1614 | if (ret) { | 1654 | if (ret) { |
1615 | printf("Failed to get pcie_per clk\n"); | 1655 | printf("Failed to get pcie_per clk\n"); |
1616 | return ret; | 1656 | return ret; |
1617 | } | 1657 | } |
1618 | 1658 | ||
1619 | ret = clk_get_by_name(dev, "pcie_inbound_axi", &priv->pcie_inbound_axi); | 1659 | ret = clk_get_by_name(dev, "pcie_inbound_axi", &priv->pcie_inbound_axi); |
1620 | if (ret) { | 1660 | if (ret) { |
1621 | printf("Failed to get pcie_inbound_axi clk\n"); | 1661 | printf("Failed to get pcie_inbound_axi clk\n"); |
1622 | return ret; | 1662 | return ret; |
1663 | } | ||
1664 | |||
1665 | ret = clk_get_by_name(dev, "phy_per", &priv->phy_per); | ||
1666 | if (ret) { | ||
1667 | printf("Failed to get phy_per clk\n"); | ||
1668 | return ret; | ||
1669 | } | ||
1670 | |||
1671 | ret = clk_get_by_name(dev, "misc_per", &priv->misc_per); | ||
1672 | if (ret) { | ||
1673 | printf("Failed to get misc_per clk\n"); | ||
1674 | return ret; | ||
1675 | } | ||
1676 | |||
1677 | if (priv->variant == IMX8QM && priv->ctrl_id == 1) { | ||
1678 | ret = clk_get_by_name(dev, "pcie_phy_pclk", &priv->pcie_phy_pclk); | ||
1679 | if (ret) { | ||
1680 | printf("Failed to get pcie_phy_pclk clk\n"); | ||
1681 | return ret; | ||
1682 | } | ||
1683 | |||
1684 | ret = clk_get_by_name(dev, "pciex2_per", &priv->pciex2_per); | ||
1685 | if (ret) { | ||
1686 | printf("Failed to get pciex2_per clk\n"); | ||
1687 | return ret; | ||
1688 | } | ||
1623 | } | 1689 | } |
1624 | #endif | 1690 | #endif |
1625 | priv->iomuxc_gpr = | 1691 | priv->iomuxc_gpr = |
1626 | syscon_regmap_lookup_by_phandle(dev, "hsio"); | 1692 | syscon_regmap_lookup_by_phandle(dev, "hsio"); |
1627 | if (IS_ERR(priv->iomuxc_gpr)) { | 1693 | if (IS_ERR(priv->iomuxc_gpr)) { |
1628 | dev_err(dev, "unable to find gpr registers\n"); | 1694 | dev_err(dev, "unable to find gpr registers\n"); |
1629 | return PTR_ERR(priv->iomuxc_gpr); | 1695 | return PTR_ERR(priv->iomuxc_gpr); |
1630 | } | 1696 | } |
1631 | } else { | 1697 | } else { |
1632 | #if CONFIG_IS_ENABLED(DM_REGULATOR) | 1698 | #if CONFIG_IS_ENABLED(DM_REGULATOR) |
1633 | if (priv->variant == IMX6QP) { | 1699 | if (priv->variant == IMX6QP) { |
1634 | ret = device_get_supply_regulator(dev, "pcie-bus", &priv->pcie_bus_regulator); | 1700 | ret = device_get_supply_regulator(dev, "pcie-bus", &priv->pcie_bus_regulator); |
1635 | if (ret) { | 1701 | if (ret) { |
1636 | dev_dbg(dev, "no pcie_bus_regulator\n"); | 1702 | dev_dbg(dev, "no pcie_bus_regulator\n"); |
1637 | priv->pcie_bus_regulator = NULL; | 1703 | priv->pcie_bus_regulator = NULL; |
1638 | } | 1704 | } |
1639 | } else if (priv->variant == IMX6SX) { | 1705 | } else if (priv->variant == IMX6SX) { |
1640 | ret = device_get_supply_regulator(dev, "pcie-phy", &priv->pcie_phy_regulator); | 1706 | ret = device_get_supply_regulator(dev, "pcie-phy", &priv->pcie_phy_regulator); |
1641 | if (ret) { | 1707 | if (ret) { |
1642 | dev_dbg(dev, "no pcie_phy_regulator\n"); | 1708 | dev_dbg(dev, "no pcie_phy_regulator\n"); |
1643 | priv->pcie_phy_regulator = NULL; | 1709 | priv->pcie_phy_regulator = NULL; |
1644 | } | 1710 | } |
1645 | } | 1711 | } |
1646 | #endif | 1712 | #endif |
1647 | 1713 | ||
1648 | priv->iomuxc_gpr = | 1714 | priv->iomuxc_gpr = |
1649 | syscon_regmap_lookup_by_phandle(dev, "gpr"); | 1715 | syscon_regmap_lookup_by_phandle(dev, "gpr"); |
1650 | if (IS_ERR(priv->iomuxc_gpr)) { | 1716 | if (IS_ERR(priv->iomuxc_gpr)) { |
1651 | dev_err(dev, "unable to find gpr registers\n"); | 1717 | dev_err(dev, "unable to find gpr registers\n"); |
1652 | return PTR_ERR(priv->iomuxc_gpr); | 1718 | return PTR_ERR(priv->iomuxc_gpr); |
1653 | } | 1719 | } |
1654 | } | 1720 | } |
1655 | 1721 | ||
1656 | pci_get_regions(dev, &priv->io, &priv->mem, &priv->pref); | 1722 | pci_get_regions(dev, &priv->io, &priv->mem, &priv->pref); |
1657 | 1723 | ||
1658 | if (priv->cpu_base) | 1724 | if (priv->cpu_base) |
1659 | priv->cpu_addr_offset = priv->cpu_base | 1725 | priv->cpu_addr_offset = priv->cpu_base |
1660 | - priv->mem->phys_start; | 1726 | - priv->mem->phys_start; |
1661 | else | 1727 | else |
1662 | priv->cpu_addr_offset = 0; | 1728 | priv->cpu_addr_offset = 0; |
1663 | 1729 | ||
1664 | return imx_pcie_link_up(priv); | 1730 | return imx_pcie_link_up(priv); |
1665 | } | 1731 | } |
1666 | 1732 | ||
1667 | static int imx_pcie_dm_remove(struct udevice *dev) | 1733 | static int imx_pcie_dm_remove(struct udevice *dev) |
1668 | { | 1734 | { |
1669 | struct imx_pcie_priv *priv = dev_get_priv(dev); | 1735 | struct imx_pcie_priv *priv = dev_get_priv(dev); |
1670 | 1736 | ||
1671 | imx_pcie_assert_core_reset(priv, true); | 1737 | imx_pcie_assert_core_reset(priv, true); |
1672 | 1738 | ||
1673 | return 0; | 1739 | return 0; |
1674 | } | 1740 | } |
1675 | 1741 | ||
1676 | static int imx_pcie_ofdata_to_platdata(struct udevice *dev) | 1742 | static int imx_pcie_ofdata_to_platdata(struct udevice *dev) |
1677 | { | 1743 | { |
1678 | struct imx_pcie_priv *priv = dev_get_priv(dev); | 1744 | struct imx_pcie_priv *priv = dev_get_priv(dev); |
1679 | int ret; | 1745 | int ret; |
1680 | struct resource cfg_res; | 1746 | struct resource cfg_res; |
1681 | 1747 | ||
1682 | priv->dbi_base = (void __iomem *)devfdt_get_addr_index(dev, 0); | 1748 | priv->dbi_base = (void __iomem *)devfdt_get_addr_index(dev, 0); |
1683 | if (!priv->dbi_base) | 1749 | if (!priv->dbi_base) |
1684 | return -EINVAL; | 1750 | return -EINVAL; |
1685 | 1751 | ||
1686 | ret = dev_read_resource_byname(dev, "config", &cfg_res); | 1752 | ret = dev_read_resource_byname(dev, "config", &cfg_res); |
1687 | if (ret) { | 1753 | if (ret) { |
1688 | printf("can't get config resource(ret = %d)\n", ret); | 1754 | printf("can't get config resource(ret = %d)\n", ret); |
1689 | return -ENOMEM; | 1755 | return -ENOMEM; |
1690 | } | 1756 | } |
1691 | 1757 | ||
1692 | priv->cfg_base = map_physmem(cfg_res.start, | 1758 | priv->cfg_base = map_physmem(cfg_res.start, |
1693 | resource_size(&cfg_res), | 1759 | resource_size(&cfg_res), |
1694 | MAP_NOCACHE); | 1760 | MAP_NOCACHE); |
1695 | priv->cfg1_base = priv->cfg_base + resource_size(&cfg_res) / 2; | 1761 | priv->cfg1_base = priv->cfg_base + resource_size(&cfg_res) / 2; |
1696 | priv->cfg_size = resource_size(&cfg_res); | 1762 | priv->cfg_size = resource_size(&cfg_res); |
1697 | 1763 | ||
1698 | priv->variant = (enum imx_pcie_variants)dev_get_driver_data(dev); | 1764 | priv->variant = (enum imx_pcie_variants)dev_get_driver_data(dev); |
1699 | 1765 | ||
1700 | if (dev_read_u32u(dev, "hsio-cfg", &priv->hsio_cfg)) | 1766 | if (dev_read_u32u(dev, "hsio-cfg", &priv->hsio_cfg)) |
1701 | priv->hsio_cfg = 0; | 1767 | priv->hsio_cfg = 0; |
1702 | 1768 | ||
1703 | if (dev_read_u32u(dev, "ctrl-id", &priv->ctrl_id)) | 1769 | if (dev_read_u32u(dev, "ctrl-id", &priv->ctrl_id)) |
1704 | priv->ctrl_id = 0; | 1770 | priv->ctrl_id = 0; |
1705 | 1771 | ||
1706 | if (dev_read_u32u(dev, "ext_osc", &priv->ext_osc)) | 1772 | if (dev_read_u32u(dev, "ext_osc", &priv->ext_osc)) |
1707 | priv->ext_osc = 0; | 1773 | priv->ext_osc = 0; |
1708 | 1774 | ||
1709 | if (dev_read_u32u(dev, "cpu-base-addr", &priv->cpu_base)) | 1775 | if (dev_read_u32u(dev, "cpu-base-addr", &priv->cpu_base)) |
1710 | priv->cpu_base = 0; | 1776 | priv->cpu_base = 0; |
1711 | 1777 | ||
1712 | if (dev_read_u32u(dev, "num-lanes", &priv->lanes)) | 1778 | if (dev_read_u32u(dev, "num-lanes", &priv->lanes)) |
1713 | priv->lanes = 1; | 1779 | priv->lanes = 1; |
1714 | 1780 | ||
1715 | debug("hsio-cfg %u, ctrl-id %u, ext_osc %u, cpu-base 0x%x\n", | 1781 | debug("hsio-cfg %u, ctrl-id %u, ext_osc %u, cpu-base 0x%x\n", |
1716 | priv->hsio_cfg, priv->ctrl_id, priv->ext_osc, priv->cpu_base); | 1782 | priv->hsio_cfg, priv->ctrl_id, priv->ext_osc, priv->cpu_base); |
1717 | 1783 | ||
1718 | return 0; | 1784 | return 0; |
1719 | } | 1785 | } |
1720 | 1786 | ||
1721 | static const struct dm_pci_ops imx_pcie_ops = { | 1787 | static const struct dm_pci_ops imx_pcie_ops = { |
1722 | .read_config = imx_pcie_dm_read_config, | 1788 | .read_config = imx_pcie_dm_read_config, |
1723 | .write_config = imx_pcie_dm_write_config, | 1789 | .write_config = imx_pcie_dm_write_config, |
1724 | }; | 1790 | }; |
1725 | 1791 | ||
1726 | static const struct udevice_id imx_pcie_ids[] = { | 1792 | static const struct udevice_id imx_pcie_ids[] = { |
1727 | { .compatible = "fsl,imx6q-pcie", .data = (ulong)IMX6Q, }, | 1793 | { .compatible = "fsl,imx6q-pcie", .data = (ulong)IMX6Q, }, |
1728 | { .compatible = "fsl,imx6sx-pcie", .data = (ulong)IMX6SX, }, | 1794 | { .compatible = "fsl,imx6sx-pcie", .data = (ulong)IMX6SX, }, |
1729 | { .compatible = "fsl,imx6qp-pcie", .data = (ulong)IMX6QP, }, | 1795 | { .compatible = "fsl,imx6qp-pcie", .data = (ulong)IMX6QP, }, |
1730 | { .compatible = "fsl,imx8qm-pcie", .data = (ulong)IMX8QM, }, | 1796 | { .compatible = "fsl,imx8qm-pcie", .data = (ulong)IMX8QM, }, |
1731 | { .compatible = "fsl,imx8qxp-pcie", .data = (ulong)IMX8QXP, }, | 1797 | { .compatible = "fsl,imx8qxp-pcie", .data = (ulong)IMX8QXP, }, |
1732 | { } | 1798 | { } |
1733 | }; | 1799 | }; |
1734 | 1800 | ||
1735 | U_BOOT_DRIVER(imx_pcie) = { | 1801 | U_BOOT_DRIVER(imx_pcie) = { |
1736 | .name = "imx_pcie", | 1802 | .name = "imx_pcie", |
1737 | .id = UCLASS_PCI, | 1803 | .id = UCLASS_PCI, |
1738 | .of_match = imx_pcie_ids, | 1804 | .of_match = imx_pcie_ids, |
1739 | .ops = &imx_pcie_ops, | 1805 | .ops = &imx_pcie_ops, |
1740 | .probe = imx_pcie_dm_probe, | 1806 | .probe = imx_pcie_dm_probe, |
1741 | .remove = imx_pcie_dm_remove, | 1807 | .remove = imx_pcie_dm_remove, |
1742 | .ofdata_to_platdata = imx_pcie_ofdata_to_platdata, | 1808 | .ofdata_to_platdata = imx_pcie_ofdata_to_platdata, |
1743 | .priv_auto_alloc_size = sizeof(struct imx_pcie_priv), | 1809 | .priv_auto_alloc_size = sizeof(struct imx_pcie_priv), |