Commit 0f630e17a914f09435f6c2368cb14c997271f14d
1 parent
01823d8988
Exists in
smarc_8mq-imx_v2020.04_5.4.24_2.1.0
and in
1 other branch
MLK-24042-2 pci: pcie_imx: Fix iMX6Q remove issue
Wrong variable is check during pcie driver removing (6QP is checked but should be 6Q), so iMX6Q won't enter its handling and cause kernel PCI link failed to detect. Signed-off-by: Ye Li <ye.li@nxp.com> Reviewed-by: Peng Fan <peng.fan@nxp.com> (cherry picked from commit 0dd8cc03ecc45faaba0297c2832ff6ae307e8f30)
Showing 1 changed file with 1 additions and 1 deletions Inline Diff
drivers/pci/pcie_imx.c
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | 2 | /* |
3 | * Freescale i.MX6 PCI Express Root-Complex driver | 3 | * Freescale i.MX6 PCI Express Root-Complex driver |
4 | * | 4 | * |
5 | * Copyright (C) 2013 Marek Vasut <marex@denx.de> | 5 | * Copyright (C) 2013 Marek Vasut <marex@denx.de> |
6 | * | 6 | * |
7 | * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. | 7 | * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. |
8 | * Copyright 2019 NXP | 8 | * Copyright 2019 NXP |
9 | * | 9 | * |
10 | * Based on upstream Linux kernel driver: | 10 | * Based on upstream Linux kernel driver: |
11 | * pci-imx6.c: Sean Cross <xobs@kosagi.com> | 11 | * pci-imx6.c: Sean Cross <xobs@kosagi.com> |
12 | * pcie-designware.c: Jingoo Han <jg1.han@samsung.com> | 12 | * pcie-designware.c: Jingoo Han <jg1.han@samsung.com> |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <common.h> | 15 | #include <common.h> |
16 | #include <init.h> | 16 | #include <init.h> |
17 | #include <malloc.h> | 17 | #include <malloc.h> |
18 | #include <pci.h> | 18 | #include <pci.h> |
19 | #if CONFIG_IS_ENABLED(CLK) | 19 | #if CONFIG_IS_ENABLED(CLK) |
20 | #include <clk.h> | 20 | #include <clk.h> |
21 | #else | 21 | #else |
22 | #include <asm/arch/clock.h> | 22 | #include <asm/arch/clock.h> |
23 | #endif | 23 | #endif |
24 | #include <asm/arch/iomux.h> | 24 | #include <asm/arch/iomux.h> |
25 | #ifdef CONFIG_MX6 | 25 | #ifdef CONFIG_MX6 |
26 | #include <asm/arch/crm_regs.h> | 26 | #include <asm/arch/crm_regs.h> |
27 | #endif | 27 | #endif |
28 | #include <asm/gpio.h> | 28 | #include <asm/gpio.h> |
29 | #include <asm/io.h> | 29 | #include <asm/io.h> |
30 | #include <dm.h> | 30 | #include <dm.h> |
31 | #include <linux/sizes.h> | 31 | #include <linux/sizes.h> |
32 | #include <linux/ioport.h> | 32 | #include <linux/ioport.h> |
33 | #include <errno.h> | 33 | #include <errno.h> |
34 | #include <asm/arch/sys_proto.h> | 34 | #include <asm/arch/sys_proto.h> |
35 | #include <syscon.h> | 35 | #include <syscon.h> |
36 | #include <regmap.h> | 36 | #include <regmap.h> |
37 | #include <asm-generic/gpio.h> | 37 | #include <asm-generic/gpio.h> |
38 | #include <dt-bindings/soc/imx8_hsio.h> | 38 | #include <dt-bindings/soc/imx8_hsio.h> |
39 | #include <power/regulator.h> | 39 | #include <power/regulator.h> |
40 | #include <dm/device_compat.h> | 40 | #include <dm/device_compat.h> |
41 | 41 | ||
42 | enum imx_pcie_variants { | 42 | enum imx_pcie_variants { |
43 | IMX6Q, | 43 | IMX6Q, |
44 | IMX6SX, | 44 | IMX6SX, |
45 | IMX6QP, | 45 | IMX6QP, |
46 | IMX8QM, | 46 | IMX8QM, |
47 | IMX8QXP, | 47 | IMX8QXP, |
48 | }; | 48 | }; |
49 | 49 | ||
50 | #define PCI_ACCESS_READ 0 | 50 | #define PCI_ACCESS_READ 0 |
51 | #define PCI_ACCESS_WRITE 1 | 51 | #define PCI_ACCESS_WRITE 1 |
52 | 52 | ||
53 | #ifdef CONFIG_MX6SX | 53 | #ifdef CONFIG_MX6SX |
54 | #define MX6_DBI_ADDR 0x08ffc000 | 54 | #define MX6_DBI_ADDR 0x08ffc000 |
55 | #define MX6_IO_ADDR 0x08f80000 | 55 | #define MX6_IO_ADDR 0x08f80000 |
56 | #define MX6_MEM_ADDR 0x08000000 | 56 | #define MX6_MEM_ADDR 0x08000000 |
57 | #define MX6_ROOT_ADDR 0x08f00000 | 57 | #define MX6_ROOT_ADDR 0x08f00000 |
58 | #else | 58 | #else |
59 | #define MX6_DBI_ADDR 0x01ffc000 | 59 | #define MX6_DBI_ADDR 0x01ffc000 |
60 | #define MX6_IO_ADDR 0x01f80000 | 60 | #define MX6_IO_ADDR 0x01f80000 |
61 | #define MX6_MEM_ADDR 0x01000000 | 61 | #define MX6_MEM_ADDR 0x01000000 |
62 | #define MX6_ROOT_ADDR 0x01f00000 | 62 | #define MX6_ROOT_ADDR 0x01f00000 |
63 | #endif | 63 | #endif |
64 | #define MX6_DBI_SIZE 0x4000 | 64 | #define MX6_DBI_SIZE 0x4000 |
65 | #define MX6_IO_SIZE 0x10000 | 65 | #define MX6_IO_SIZE 0x10000 |
66 | #define MX6_MEM_SIZE 0xf00000 | 66 | #define MX6_MEM_SIZE 0xf00000 |
67 | #define MX6_ROOT_SIZE 0x80000 | 67 | #define MX6_ROOT_SIZE 0x80000 |
68 | 68 | ||
69 | /* PCIe Port Logic registers (memory-mapped) */ | 69 | /* PCIe Port Logic registers (memory-mapped) */ |
70 | #define PL_OFFSET 0x700 | 70 | #define PL_OFFSET 0x700 |
71 | #define PCIE_PL_PFLR (PL_OFFSET + 0x08) | 71 | #define PCIE_PL_PFLR (PL_OFFSET + 0x08) |
72 | #define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16) | 72 | #define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16) |
73 | #define PCIE_PL_PFLR_FORCE_LINK (1 << 15) | 73 | #define PCIE_PL_PFLR_FORCE_LINK (1 << 15) |
74 | #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) | 74 | #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) |
75 | #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) | 75 | #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) |
76 | #define PCIE_PHY_DEBUG_R1_LINK_UP (1 << 4) | 76 | #define PCIE_PHY_DEBUG_R1_LINK_UP (1 << 4) |
77 | #define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (1 << 29) | 77 | #define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (1 << 29) |
78 | 78 | ||
79 | #define PCIE_PORT_LINK_CONTROL 0x710 | 79 | #define PCIE_PORT_LINK_CONTROL 0x710 |
80 | #define PORT_LINK_MODE_MASK (0x3f << 16) | 80 | #define PORT_LINK_MODE_MASK (0x3f << 16) |
81 | #define PORT_LINK_MODE_1_LANES (0x1 << 16) | 81 | #define PORT_LINK_MODE_1_LANES (0x1 << 16) |
82 | #define PORT_LINK_MODE_2_LANES (0x3 << 16) | 82 | #define PORT_LINK_MODE_2_LANES (0x3 << 16) |
83 | #define PORT_LINK_MODE_4_LANES (0x7 << 16) | 83 | #define PORT_LINK_MODE_4_LANES (0x7 << 16) |
84 | #define PORT_LINK_MODE_8_LANES (0xf << 16) | 84 | #define PORT_LINK_MODE_8_LANES (0xf << 16) |
85 | 85 | ||
86 | 86 | ||
87 | #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C | 87 | #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C |
88 | #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) | 88 | #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) |
89 | #define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8) | 89 | #define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8) |
90 | #define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) | 90 | #define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) |
91 | #define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) | 91 | #define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) |
92 | #define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) | 92 | #define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) |
93 | #define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8) | 93 | #define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8) |
94 | 94 | ||
95 | #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) | 95 | #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) |
96 | #define PCIE_PHY_CTRL_DATA_LOC 0 | 96 | #define PCIE_PHY_CTRL_DATA_LOC 0 |
97 | #define PCIE_PHY_CTRL_CAP_ADR_LOC 16 | 97 | #define PCIE_PHY_CTRL_CAP_ADR_LOC 16 |
98 | #define PCIE_PHY_CTRL_CAP_DAT_LOC 17 | 98 | #define PCIE_PHY_CTRL_CAP_DAT_LOC 17 |
99 | #define PCIE_PHY_CTRL_WR_LOC 18 | 99 | #define PCIE_PHY_CTRL_WR_LOC 18 |
100 | #define PCIE_PHY_CTRL_RD_LOC 19 | 100 | #define PCIE_PHY_CTRL_RD_LOC 19 |
101 | 101 | ||
102 | #define PCIE_PHY_STAT (PL_OFFSET + 0x110) | 102 | #define PCIE_PHY_STAT (PL_OFFSET + 0x110) |
103 | #define PCIE_PHY_STAT_DATA_LOC 0 | 103 | #define PCIE_PHY_STAT_DATA_LOC 0 |
104 | #define PCIE_PHY_STAT_ACK_LOC 16 | 104 | #define PCIE_PHY_STAT_ACK_LOC 16 |
105 | 105 | ||
106 | /* PHY registers (not memory-mapped) */ | 106 | /* PHY registers (not memory-mapped) */ |
107 | #define PCIE_PHY_RX_ASIC_OUT 0x100D | 107 | #define PCIE_PHY_RX_ASIC_OUT 0x100D |
108 | 108 | ||
109 | #define PHY_RX_OVRD_IN_LO 0x1005 | 109 | #define PHY_RX_OVRD_IN_LO 0x1005 |
110 | #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) | 110 | #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) |
111 | #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) | 111 | #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) |
112 | 112 | ||
113 | #define PCIE_PHY_PUP_REQ (1 << 7) | 113 | #define PCIE_PHY_PUP_REQ (1 << 7) |
114 | 114 | ||
115 | /* iATU registers */ | 115 | /* iATU registers */ |
116 | #define PCIE_ATU_VIEWPORT 0x900 | 116 | #define PCIE_ATU_VIEWPORT 0x900 |
117 | #define PCIE_ATU_REGION_INBOUND (0x1 << 31) | 117 | #define PCIE_ATU_REGION_INBOUND (0x1 << 31) |
118 | #define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) | 118 | #define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) |
119 | #define PCIE_ATU_REGION_INDEX2 (0x2 << 0) | 119 | #define PCIE_ATU_REGION_INDEX2 (0x2 << 0) |
120 | #define PCIE_ATU_REGION_INDEX1 (0x1 << 0) | 120 | #define PCIE_ATU_REGION_INDEX1 (0x1 << 0) |
121 | #define PCIE_ATU_REGION_INDEX0 (0x0 << 0) | 121 | #define PCIE_ATU_REGION_INDEX0 (0x0 << 0) |
122 | #define PCIE_ATU_CR1 0x904 | 122 | #define PCIE_ATU_CR1 0x904 |
123 | #define PCIE_ATU_TYPE_MEM (0x0 << 0) | 123 | #define PCIE_ATU_TYPE_MEM (0x0 << 0) |
124 | #define PCIE_ATU_TYPE_IO (0x2 << 0) | 124 | #define PCIE_ATU_TYPE_IO (0x2 << 0) |
125 | #define PCIE_ATU_TYPE_CFG0 (0x4 << 0) | 125 | #define PCIE_ATU_TYPE_CFG0 (0x4 << 0) |
126 | #define PCIE_ATU_TYPE_CFG1 (0x5 << 0) | 126 | #define PCIE_ATU_TYPE_CFG1 (0x5 << 0) |
127 | #define PCIE_ATU_CR2 0x908 | 127 | #define PCIE_ATU_CR2 0x908 |
128 | #define PCIE_ATU_ENABLE (0x1 << 31) | 128 | #define PCIE_ATU_ENABLE (0x1 << 31) |
129 | #define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) | 129 | #define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) |
130 | #define PCIE_ATU_LOWER_BASE 0x90C | 130 | #define PCIE_ATU_LOWER_BASE 0x90C |
131 | #define PCIE_ATU_UPPER_BASE 0x910 | 131 | #define PCIE_ATU_UPPER_BASE 0x910 |
132 | #define PCIE_ATU_LIMIT 0x914 | 132 | #define PCIE_ATU_LIMIT 0x914 |
133 | #define PCIE_ATU_LOWER_TARGET 0x918 | 133 | #define PCIE_ATU_LOWER_TARGET 0x918 |
134 | #define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) | 134 | #define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) |
135 | #define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) | 135 | #define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) |
136 | #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) | 136 | #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) |
137 | #define PCIE_ATU_UPPER_TARGET 0x91C | 137 | #define PCIE_ATU_UPPER_TARGET 0x91C |
138 | 138 | ||
139 | #define PCIE_MISC_CTRL (PL_OFFSET + 0x1BC) | 139 | #define PCIE_MISC_CTRL (PL_OFFSET + 0x1BC) |
140 | #define PCIE_MISC_DBI_RO_WR_EN BIT(0) | 140 | #define PCIE_MISC_DBI_RO_WR_EN BIT(0) |
141 | 141 | ||
142 | /* iMX8 HSIO registers */ | 142 | /* iMX8 HSIO registers */ |
143 | #define IMX8QM_LPCG_PHYX2_OFFSET 0x00000 | 143 | #define IMX8QM_LPCG_PHYX2_OFFSET 0x00000 |
144 | #define IMX8QM_CSR_PHYX2_OFFSET 0x90000 | 144 | #define IMX8QM_CSR_PHYX2_OFFSET 0x90000 |
145 | #define IMX8QM_CSR_PHYX1_OFFSET 0xA0000 | 145 | #define IMX8QM_CSR_PHYX1_OFFSET 0xA0000 |
146 | #define IMX8QM_CSR_PHYX_STTS0_OFFSET 0x4 | 146 | #define IMX8QM_CSR_PHYX_STTS0_OFFSET 0x4 |
147 | #define IMX8QM_CSR_PCIEA_OFFSET 0xB0000 | 147 | #define IMX8QM_CSR_PCIEA_OFFSET 0xB0000 |
148 | #define IMX8QM_CSR_PCIEB_OFFSET 0xC0000 | 148 | #define IMX8QM_CSR_PCIEB_OFFSET 0xC0000 |
149 | #define IMX8QM_CSR_PCIE_CTRL1_OFFSET 0x4 | 149 | #define IMX8QM_CSR_PCIE_CTRL1_OFFSET 0x4 |
150 | #define IMX8QM_CSR_PCIE_CTRL2_OFFSET 0x8 | 150 | #define IMX8QM_CSR_PCIE_CTRL2_OFFSET 0x8 |
151 | #define IMX8QM_CSR_PCIE_STTS0_OFFSET 0xC | 151 | #define IMX8QM_CSR_PCIE_STTS0_OFFSET 0xC |
152 | #define IMX8QM_CSR_MISC_OFFSET 0xE0000 | 152 | #define IMX8QM_CSR_MISC_OFFSET 0xE0000 |
153 | 153 | ||
154 | #define IMX8QM_LPCG_PHY_PCG0 BIT(1) | 154 | #define IMX8QM_LPCG_PHY_PCG0 BIT(1) |
155 | #define IMX8QM_LPCG_PHY_PCG1 BIT(5) | 155 | #define IMX8QM_LPCG_PHY_PCG1 BIT(5) |
156 | 156 | ||
157 | #define IMX8QM_CTRL_LTSSM_ENABLE BIT(4) | 157 | #define IMX8QM_CTRL_LTSSM_ENABLE BIT(4) |
158 | #define IMX8QM_CTRL_READY_ENTR_L23 BIT(5) | 158 | #define IMX8QM_CTRL_READY_ENTR_L23 BIT(5) |
159 | #define IMX8QM_CTRL_PM_XMT_TURNOFF BIT(9) | 159 | #define IMX8QM_CTRL_PM_XMT_TURNOFF BIT(9) |
160 | #define IMX8QM_CTRL_BUTTON_RST_N BIT(21) | 160 | #define IMX8QM_CTRL_BUTTON_RST_N BIT(21) |
161 | #define IMX8QM_CTRL_PERST_N BIT(22) | 161 | #define IMX8QM_CTRL_PERST_N BIT(22) |
162 | #define IMX8QM_CTRL_POWER_UP_RST_N BIT(23) | 162 | #define IMX8QM_CTRL_POWER_UP_RST_N BIT(23) |
163 | 163 | ||
164 | #define IMX8QM_CTRL_STTS0_PM_LINKST_IN_L2 BIT(13) | 164 | #define IMX8QM_CTRL_STTS0_PM_LINKST_IN_L2 BIT(13) |
165 | #define IMX8QM_CTRL_STTS0_PM_REQ_CORE_RST BIT(19) | 165 | #define IMX8QM_CTRL_STTS0_PM_REQ_CORE_RST BIT(19) |
166 | #define IMX8QM_STTS0_LANE0_TX_PLL_LOCK BIT(4) | 166 | #define IMX8QM_STTS0_LANE0_TX_PLL_LOCK BIT(4) |
167 | #define IMX8QM_STTS0_LANE1_TX_PLL_LOCK BIT(12) | 167 | #define IMX8QM_STTS0_LANE1_TX_PLL_LOCK BIT(12) |
168 | 168 | ||
169 | #define IMX8QM_PCIE_TYPE_MASK (0xF << 24) | 169 | #define IMX8QM_PCIE_TYPE_MASK (0xF << 24) |
170 | 170 | ||
171 | #define IMX8QM_PHYX2_CTRL0_APB_MASK 0x3 | 171 | #define IMX8QM_PHYX2_CTRL0_APB_MASK 0x3 |
172 | #define IMX8QM_PHY_APB_RSTN_0 BIT(0) | 172 | #define IMX8QM_PHY_APB_RSTN_0 BIT(0) |
173 | #define IMX8QM_PHY_APB_RSTN_1 BIT(1) | 173 | #define IMX8QM_PHY_APB_RSTN_1 BIT(1) |
174 | 174 | ||
175 | #define IMX8QM_MISC_IOB_RXENA BIT(0) | 175 | #define IMX8QM_MISC_IOB_RXENA BIT(0) |
176 | #define IMX8QM_MISC_IOB_TXENA BIT(1) | 176 | #define IMX8QM_MISC_IOB_TXENA BIT(1) |
177 | #define IMX8QM_CSR_MISC_IOB_A_0_TXOE BIT(2) | 177 | #define IMX8QM_CSR_MISC_IOB_A_0_TXOE BIT(2) |
178 | #define IMX8QM_CSR_MISC_IOB_A_0_M1M0_MASK (0x3 << 3) | 178 | #define IMX8QM_CSR_MISC_IOB_A_0_M1M0_MASK (0x3 << 3) |
179 | #define IMX8QM_CSR_MISC_IOB_A_0_M1M0_2 BIT(4) | 179 | #define IMX8QM_CSR_MISC_IOB_A_0_M1M0_2 BIT(4) |
180 | #define IMX8QM_MISC_PHYX1_EPCS_SEL BIT(12) | 180 | #define IMX8QM_MISC_PHYX1_EPCS_SEL BIT(12) |
181 | #define IMX8QM_MISC_PCIE_AB_SELECT BIT(13) | 181 | #define IMX8QM_MISC_PCIE_AB_SELECT BIT(13) |
182 | 182 | ||
183 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_MASK (0xF << 13) | 183 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_MASK (0xF << 13) |
184 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_0 BIT(13) | 184 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_0 BIT(13) |
185 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_1 BIT(14) | 185 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_1 BIT(14) |
186 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_2 BIT(15) | 186 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_2 BIT(15) |
187 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_3 BIT(16) | 187 | #define HW_PHYX2_CTRL0_PIPE_LN2LK_3 BIT(16) |
188 | 188 | ||
189 | #define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000 | 189 | #define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000 |
190 | 190 | ||
191 | #ifdef DEBUG | 191 | #ifdef DEBUG |
192 | 192 | ||
193 | #ifdef DEBUG_STRESS_WR /* warm-reset stress tests */ | 193 | #ifdef DEBUG_STRESS_WR /* warm-reset stress tests */ |
194 | #define SNVS_LPGRP 0x020cc068 | 194 | #define SNVS_LPGRP 0x020cc068 |
195 | #endif | 195 | #endif |
196 | 196 | ||
197 | #define DBGF(x...) printf(x) | 197 | #define DBGF(x...) printf(x) |
198 | 198 | ||
199 | static void print_regs(int contain_pcie_reg) | 199 | static void print_regs(int contain_pcie_reg) |
200 | { | 200 | { |
201 | #ifdef CONFIG_MX6 | 201 | #ifdef CONFIG_MX6 |
202 | u32 val; | 202 | u32 val; |
203 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; | 203 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; |
204 | struct mxc_ccm_reg *ccm_regs = (struct mxc_ccm_reg *)CCM_BASE_ADDR; | 204 | struct mxc_ccm_reg *ccm_regs = (struct mxc_ccm_reg *)CCM_BASE_ADDR; |
205 | val = readl(&iomuxc_regs->gpr[1]); | 205 | val = readl(&iomuxc_regs->gpr[1]); |
206 | DBGF("GPR01 a:0x%08x v:0x%08x\n", (u32)&iomuxc_regs->gpr[1], val); | 206 | DBGF("GPR01 a:0x%08x v:0x%08x\n", (u32)&iomuxc_regs->gpr[1], val); |
207 | val = readl(&iomuxc_regs->gpr[5]); | 207 | val = readl(&iomuxc_regs->gpr[5]); |
208 | DBGF("GPR05 a:0x%08x v:0x%08x\n", (u32)&iomuxc_regs->gpr[5], val); | 208 | DBGF("GPR05 a:0x%08x v:0x%08x\n", (u32)&iomuxc_regs->gpr[5], val); |
209 | val = readl(&iomuxc_regs->gpr[8]); | 209 | val = readl(&iomuxc_regs->gpr[8]); |
210 | DBGF("GPR08 a:0x%08x v:0x%08x\n", (u32)&iomuxc_regs->gpr[8], val); | 210 | DBGF("GPR08 a:0x%08x v:0x%08x\n", (u32)&iomuxc_regs->gpr[8], val); |
211 | val = readl(&iomuxc_regs->gpr[12]); | 211 | val = readl(&iomuxc_regs->gpr[12]); |
212 | DBGF("GPR12 a:0x%08x v:0x%08x\n", (u32)&iomuxc_regs->gpr[12], val); | 212 | DBGF("GPR12 a:0x%08x v:0x%08x\n", (u32)&iomuxc_regs->gpr[12], val); |
213 | val = readl(&ccm_regs->analog_pll_enet); | 213 | val = readl(&ccm_regs->analog_pll_enet); |
214 | DBGF("PLL06 a:0x%08x v:0x%08x\n", (u32)&ccm_regs->analog_pll_enet, val); | 214 | DBGF("PLL06 a:0x%08x v:0x%08x\n", (u32)&ccm_regs->analog_pll_enet, val); |
215 | val = readl(&ccm_regs->ana_misc1); | 215 | val = readl(&ccm_regs->ana_misc1); |
216 | DBGF("MISC1 a:0x%08x v:0x%08x\n", (u32)&ccm_regs->ana_misc1, val); | 216 | DBGF("MISC1 a:0x%08x v:0x%08x\n", (u32)&ccm_regs->ana_misc1, val); |
217 | if (contain_pcie_reg) { | 217 | if (contain_pcie_reg) { |
218 | val = readl(MX6_DBI_ADDR + 0x728); | 218 | val = readl(MX6_DBI_ADDR + 0x728); |
219 | DBGF("dbr0 offset 0x728 %08x\n", val); | 219 | DBGF("dbr0 offset 0x728 %08x\n", val); |
220 | val = readl(MX6_DBI_ADDR + 0x72c); | 220 | val = readl(MX6_DBI_ADDR + 0x72c); |
221 | DBGF("dbr1 offset 0x72c %08x\n", val); | 221 | DBGF("dbr1 offset 0x72c %08x\n", val); |
222 | } | 222 | } |
223 | #endif | 223 | #endif |
224 | } | 224 | } |
225 | #else | 225 | #else |
226 | #define DBGF(x...) | 226 | #define DBGF(x...) |
227 | static void print_regs(int contain_pcie_reg) {} | 227 | static void print_regs(int contain_pcie_reg) {} |
228 | #endif | 228 | #endif |
229 | 229 | ||
230 | struct imx_pcie_priv { | 230 | struct imx_pcie_priv { |
231 | void __iomem *dbi_base; | 231 | void __iomem *dbi_base; |
232 | void __iomem *cfg_base; | 232 | void __iomem *cfg_base; |
233 | void __iomem *cfg1_base; | 233 | void __iomem *cfg1_base; |
234 | enum imx_pcie_variants variant; | 234 | enum imx_pcie_variants variant; |
235 | struct regmap *iomuxc_gpr; | 235 | struct regmap *iomuxc_gpr; |
236 | u32 hsio_cfg; | 236 | u32 hsio_cfg; |
237 | u32 ctrl_id; | 237 | u32 ctrl_id; |
238 | u32 ext_osc; | 238 | u32 ext_osc; |
239 | u32 cpu_base; | 239 | u32 cpu_base; |
240 | u32 lanes; | 240 | u32 lanes; |
241 | u32 cfg_size; | 241 | u32 cfg_size; |
242 | int cpu_addr_offset; | 242 | int cpu_addr_offset; |
243 | struct gpio_desc clkreq_gpio; | 243 | struct gpio_desc clkreq_gpio; |
244 | struct gpio_desc dis_gpio; | 244 | struct gpio_desc dis_gpio; |
245 | struct gpio_desc reset_gpio; | 245 | struct gpio_desc reset_gpio; |
246 | struct gpio_desc power_on_gpio; | 246 | struct gpio_desc power_on_gpio; |
247 | 247 | ||
248 | struct pci_region *io; | 248 | struct pci_region *io; |
249 | struct pci_region *mem; | 249 | struct pci_region *mem; |
250 | struct pci_region *pref; | 250 | struct pci_region *pref; |
251 | 251 | ||
252 | #if CONFIG_IS_ENABLED(CLK) | 252 | #if CONFIG_IS_ENABLED(CLK) |
253 | struct clk pcie_bus; | 253 | struct clk pcie_bus; |
254 | struct clk pcie_phy; | 254 | struct clk pcie_phy; |
255 | struct clk pcie_inbound_axi; | 255 | struct clk pcie_inbound_axi; |
256 | struct clk pcie_per; | 256 | struct clk pcie_per; |
257 | struct clk phy_per; | 257 | struct clk phy_per; |
258 | struct clk misc_per; | 258 | struct clk misc_per; |
259 | struct clk pcie; | 259 | struct clk pcie; |
260 | struct clk pcie_ext_src; | 260 | struct clk pcie_ext_src; |
261 | #endif | 261 | #endif |
262 | 262 | ||
263 | #if CONFIG_IS_ENABLED(DM_REGULATOR) | 263 | #if CONFIG_IS_ENABLED(DM_REGULATOR) |
264 | struct udevice *epdev_on; | 264 | struct udevice *epdev_on; |
265 | struct udevice *pcie_bus_regulator; | 265 | struct udevice *pcie_bus_regulator; |
266 | struct udevice *pcie_phy_regulator; | 266 | struct udevice *pcie_phy_regulator; |
267 | #endif | 267 | #endif |
268 | }; | 268 | }; |
269 | 269 | ||
270 | /* | 270 | /* |
271 | * PHY access functions | 271 | * PHY access functions |
272 | */ | 272 | */ |
273 | static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val) | 273 | static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val) |
274 | { | 274 | { |
275 | u32 val; | 275 | u32 val; |
276 | u32 max_iterations = 10; | 276 | u32 max_iterations = 10; |
277 | u32 wait_counter = 0; | 277 | u32 wait_counter = 0; |
278 | 278 | ||
279 | do { | 279 | do { |
280 | val = readl(dbi_base + PCIE_PHY_STAT); | 280 | val = readl(dbi_base + PCIE_PHY_STAT); |
281 | val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1; | 281 | val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1; |
282 | wait_counter++; | 282 | wait_counter++; |
283 | 283 | ||
284 | if (val == exp_val) | 284 | if (val == exp_val) |
285 | return 0; | 285 | return 0; |
286 | 286 | ||
287 | udelay(1); | 287 | udelay(1); |
288 | } while (wait_counter < max_iterations); | 288 | } while (wait_counter < max_iterations); |
289 | 289 | ||
290 | return -ETIMEDOUT; | 290 | return -ETIMEDOUT; |
291 | } | 291 | } |
292 | 292 | ||
293 | static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr) | 293 | static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr) |
294 | { | 294 | { |
295 | u32 val; | 295 | u32 val; |
296 | int ret; | 296 | int ret; |
297 | 297 | ||
298 | val = addr << PCIE_PHY_CTRL_DATA_LOC; | 298 | val = addr << PCIE_PHY_CTRL_DATA_LOC; |
299 | writel(val, dbi_base + PCIE_PHY_CTRL); | 299 | writel(val, dbi_base + PCIE_PHY_CTRL); |
300 | 300 | ||
301 | val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC); | 301 | val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC); |
302 | writel(val, dbi_base + PCIE_PHY_CTRL); | 302 | writel(val, dbi_base + PCIE_PHY_CTRL); |
303 | 303 | ||
304 | ret = pcie_phy_poll_ack(dbi_base, 1); | 304 | ret = pcie_phy_poll_ack(dbi_base, 1); |
305 | if (ret) | 305 | if (ret) |
306 | return ret; | 306 | return ret; |
307 | 307 | ||
308 | val = addr << PCIE_PHY_CTRL_DATA_LOC; | 308 | val = addr << PCIE_PHY_CTRL_DATA_LOC; |
309 | writel(val, dbi_base + PCIE_PHY_CTRL); | 309 | writel(val, dbi_base + PCIE_PHY_CTRL); |
310 | 310 | ||
311 | ret = pcie_phy_poll_ack(dbi_base, 0); | 311 | ret = pcie_phy_poll_ack(dbi_base, 0); |
312 | if (ret) | 312 | if (ret) |
313 | return ret; | 313 | return ret; |
314 | 314 | ||
315 | return 0; | 315 | return 0; |
316 | } | 316 | } |
317 | 317 | ||
318 | /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ | 318 | /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ |
319 | static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data) | 319 | static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data) |
320 | { | 320 | { |
321 | u32 val, phy_ctl; | 321 | u32 val, phy_ctl; |
322 | int ret; | 322 | int ret; |
323 | 323 | ||
324 | ret = pcie_phy_wait_ack(dbi_base, addr); | 324 | ret = pcie_phy_wait_ack(dbi_base, addr); |
325 | if (ret) | 325 | if (ret) |
326 | return ret; | 326 | return ret; |
327 | 327 | ||
328 | /* assert Read signal */ | 328 | /* assert Read signal */ |
329 | phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC; | 329 | phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC; |
330 | writel(phy_ctl, dbi_base + PCIE_PHY_CTRL); | 330 | writel(phy_ctl, dbi_base + PCIE_PHY_CTRL); |
331 | 331 | ||
332 | ret = pcie_phy_poll_ack(dbi_base, 1); | 332 | ret = pcie_phy_poll_ack(dbi_base, 1); |
333 | if (ret) | 333 | if (ret) |
334 | return ret; | 334 | return ret; |
335 | 335 | ||
336 | val = readl(dbi_base + PCIE_PHY_STAT); | 336 | val = readl(dbi_base + PCIE_PHY_STAT); |
337 | *data = val & 0xffff; | 337 | *data = val & 0xffff; |
338 | 338 | ||
339 | /* deassert Read signal */ | 339 | /* deassert Read signal */ |
340 | writel(0x00, dbi_base + PCIE_PHY_CTRL); | 340 | writel(0x00, dbi_base + PCIE_PHY_CTRL); |
341 | 341 | ||
342 | ret = pcie_phy_poll_ack(dbi_base, 0); | 342 | ret = pcie_phy_poll_ack(dbi_base, 0); |
343 | if (ret) | 343 | if (ret) |
344 | return ret; | 344 | return ret; |
345 | 345 | ||
346 | return 0; | 346 | return 0; |
347 | } | 347 | } |
348 | 348 | ||
349 | static int pcie_phy_write(void __iomem *dbi_base, int addr, int data) | 349 | static int pcie_phy_write(void __iomem *dbi_base, int addr, int data) |
350 | { | 350 | { |
351 | u32 var; | 351 | u32 var; |
352 | int ret; | 352 | int ret; |
353 | 353 | ||
354 | /* write addr */ | 354 | /* write addr */ |
355 | /* cap addr */ | 355 | /* cap addr */ |
356 | ret = pcie_phy_wait_ack(dbi_base, addr); | 356 | ret = pcie_phy_wait_ack(dbi_base, addr); |
357 | if (ret) | 357 | if (ret) |
358 | return ret; | 358 | return ret; |
359 | 359 | ||
360 | var = data << PCIE_PHY_CTRL_DATA_LOC; | 360 | var = data << PCIE_PHY_CTRL_DATA_LOC; |
361 | writel(var, dbi_base + PCIE_PHY_CTRL); | 361 | writel(var, dbi_base + PCIE_PHY_CTRL); |
362 | 362 | ||
363 | /* capture data */ | 363 | /* capture data */ |
364 | var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC); | 364 | var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC); |
365 | writel(var, dbi_base + PCIE_PHY_CTRL); | 365 | writel(var, dbi_base + PCIE_PHY_CTRL); |
366 | 366 | ||
367 | ret = pcie_phy_poll_ack(dbi_base, 1); | 367 | ret = pcie_phy_poll_ack(dbi_base, 1); |
368 | if (ret) | 368 | if (ret) |
369 | return ret; | 369 | return ret; |
370 | 370 | ||
371 | /* deassert cap data */ | 371 | /* deassert cap data */ |
372 | var = data << PCIE_PHY_CTRL_DATA_LOC; | 372 | var = data << PCIE_PHY_CTRL_DATA_LOC; |
373 | writel(var, dbi_base + PCIE_PHY_CTRL); | 373 | writel(var, dbi_base + PCIE_PHY_CTRL); |
374 | 374 | ||
375 | /* wait for ack de-assertion */ | 375 | /* wait for ack de-assertion */ |
376 | ret = pcie_phy_poll_ack(dbi_base, 0); | 376 | ret = pcie_phy_poll_ack(dbi_base, 0); |
377 | if (ret) | 377 | if (ret) |
378 | return ret; | 378 | return ret; |
379 | 379 | ||
380 | /* assert wr signal */ | 380 | /* assert wr signal */ |
381 | var = 0x1 << PCIE_PHY_CTRL_WR_LOC; | 381 | var = 0x1 << PCIE_PHY_CTRL_WR_LOC; |
382 | writel(var, dbi_base + PCIE_PHY_CTRL); | 382 | writel(var, dbi_base + PCIE_PHY_CTRL); |
383 | 383 | ||
384 | /* wait for ack */ | 384 | /* wait for ack */ |
385 | ret = pcie_phy_poll_ack(dbi_base, 1); | 385 | ret = pcie_phy_poll_ack(dbi_base, 1); |
386 | if (ret) | 386 | if (ret) |
387 | return ret; | 387 | return ret; |
388 | 388 | ||
389 | /* deassert wr signal */ | 389 | /* deassert wr signal */ |
390 | var = data << PCIE_PHY_CTRL_DATA_LOC; | 390 | var = data << PCIE_PHY_CTRL_DATA_LOC; |
391 | writel(var, dbi_base + PCIE_PHY_CTRL); | 391 | writel(var, dbi_base + PCIE_PHY_CTRL); |
392 | 392 | ||
393 | /* wait for ack de-assertion */ | 393 | /* wait for ack de-assertion */ |
394 | ret = pcie_phy_poll_ack(dbi_base, 0); | 394 | ret = pcie_phy_poll_ack(dbi_base, 0); |
395 | if (ret) | 395 | if (ret) |
396 | return ret; | 396 | return ret; |
397 | 397 | ||
398 | writel(0x0, dbi_base + PCIE_PHY_CTRL); | 398 | writel(0x0, dbi_base + PCIE_PHY_CTRL); |
399 | 399 | ||
400 | return 0; | 400 | return 0; |
401 | } | 401 | } |
402 | 402 | ||
403 | #if !CONFIG_IS_ENABLED(DM_PCI) | 403 | #if !CONFIG_IS_ENABLED(DM_PCI) |
404 | void imx_pcie_gpr_read(struct imx_pcie_priv *priv, uint offset, uint *valp) | 404 | void imx_pcie_gpr_read(struct imx_pcie_priv *priv, uint offset, uint *valp) |
405 | { | 405 | { |
406 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; | 406 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; |
407 | *valp = readl(&iomuxc_regs->gpr[offset >> 2]); | 407 | *valp = readl(&iomuxc_regs->gpr[offset >> 2]); |
408 | } | 408 | } |
409 | 409 | ||
410 | void imx_pcie_gpr_update_bits(struct imx_pcie_priv *priv, uint offset, uint mask, uint val) | 410 | void imx_pcie_gpr_update_bits(struct imx_pcie_priv *priv, uint offset, uint mask, uint val) |
411 | { | 411 | { |
412 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; | 412 | struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR; |
413 | clrsetbits_32(&iomuxc_regs->gpr[offset >> 2], mask, val); | 413 | clrsetbits_32(&iomuxc_regs->gpr[offset >> 2], mask, val); |
414 | } | 414 | } |
415 | 415 | ||
416 | #else | 416 | #else |
417 | void imx_pcie_gpr_read(struct imx_pcie_priv *priv, uint offset, uint *valp) | 417 | void imx_pcie_gpr_read(struct imx_pcie_priv *priv, uint offset, uint *valp) |
418 | { | 418 | { |
419 | regmap_read(priv->iomuxc_gpr, offset, valp); | 419 | regmap_read(priv->iomuxc_gpr, offset, valp); |
420 | } | 420 | } |
421 | 421 | ||
422 | void imx_pcie_gpr_update_bits(struct imx_pcie_priv *priv, uint offset, uint mask, uint val) | 422 | void imx_pcie_gpr_update_bits(struct imx_pcie_priv *priv, uint offset, uint mask, uint val) |
423 | { | 423 | { |
424 | regmap_update_bits(priv->iomuxc_gpr, offset, mask, val); | 424 | regmap_update_bits(priv->iomuxc_gpr, offset, mask, val); |
425 | } | 425 | } |
426 | 426 | ||
427 | #endif | 427 | #endif |
428 | 428 | ||
429 | static int imx6_pcie_link_up(struct imx_pcie_priv *priv) | 429 | static int imx6_pcie_link_up(struct imx_pcie_priv *priv) |
430 | { | 430 | { |
431 | u32 rc, ltssm; | 431 | u32 rc, ltssm; |
432 | int rx_valid, temp; | 432 | int rx_valid, temp; |
433 | 433 | ||
434 | /* link is debug bit 36, debug register 1 starts at bit 32 */ | 434 | /* link is debug bit 36, debug register 1 starts at bit 32 */ |
435 | rc = readl(priv->dbi_base + PCIE_PHY_DEBUG_R1); | 435 | rc = readl(priv->dbi_base + PCIE_PHY_DEBUG_R1); |
436 | if ((rc & PCIE_PHY_DEBUG_R1_LINK_UP) && | 436 | if ((rc & PCIE_PHY_DEBUG_R1_LINK_UP) && |
437 | !(rc & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)) | 437 | !(rc & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)) |
438 | return -EAGAIN; | 438 | return -EAGAIN; |
439 | 439 | ||
440 | /* | 440 | /* |
441 | * From L0, initiate MAC entry to gen2 if EP/RC supports gen2. | 441 | * From L0, initiate MAC entry to gen2 if EP/RC supports gen2. |
442 | * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2). | 442 | * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2). |
443 | * If (MAC/LTSSM.state == Recovery.RcvrLock) | 443 | * If (MAC/LTSSM.state == Recovery.RcvrLock) |
444 | * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition | 444 | * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition |
445 | * to gen2 is stuck | 445 | * to gen2 is stuck |
446 | */ | 446 | */ |
447 | pcie_phy_read(priv->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid); | 447 | pcie_phy_read(priv->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid); |
448 | ltssm = readl(priv->dbi_base + PCIE_PHY_DEBUG_R0) & 0x3F; | 448 | ltssm = readl(priv->dbi_base + PCIE_PHY_DEBUG_R0) & 0x3F; |
449 | 449 | ||
450 | if (rx_valid & 0x01) | 450 | if (rx_valid & 0x01) |
451 | return 0; | 451 | return 0; |
452 | 452 | ||
453 | if (ltssm != 0x0d) | 453 | if (ltssm != 0x0d) |
454 | return 0; | 454 | return 0; |
455 | 455 | ||
456 | printf("transition to gen2 is stuck, reset PHY!\n"); | 456 | printf("transition to gen2 is stuck, reset PHY!\n"); |
457 | 457 | ||
458 | pcie_phy_read(priv->dbi_base, PHY_RX_OVRD_IN_LO, &temp); | 458 | pcie_phy_read(priv->dbi_base, PHY_RX_OVRD_IN_LO, &temp); |
459 | temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); | 459 | temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); |
460 | pcie_phy_write(priv->dbi_base, PHY_RX_OVRD_IN_LO, temp); | 460 | pcie_phy_write(priv->dbi_base, PHY_RX_OVRD_IN_LO, temp); |
461 | 461 | ||
462 | udelay(3000); | 462 | udelay(3000); |
463 | 463 | ||
464 | pcie_phy_read(priv->dbi_base, PHY_RX_OVRD_IN_LO, &temp); | 464 | pcie_phy_read(priv->dbi_base, PHY_RX_OVRD_IN_LO, &temp); |
465 | temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); | 465 | temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); |
466 | pcie_phy_write(priv->dbi_base, PHY_RX_OVRD_IN_LO, temp); | 466 | pcie_phy_write(priv->dbi_base, PHY_RX_OVRD_IN_LO, temp); |
467 | 467 | ||
468 | return 0; | 468 | return 0; |
469 | } | 469 | } |
470 | 470 | ||
471 | /* Fix class value */ | 471 | /* Fix class value */ |
472 | static void imx_pcie_fix_class(struct imx_pcie_priv *priv) | 472 | static void imx_pcie_fix_class(struct imx_pcie_priv *priv) |
473 | { | 473 | { |
474 | writew(PCI_CLASS_BRIDGE_PCI, priv->dbi_base + PCI_CLASS_DEVICE); | 474 | writew(PCI_CLASS_BRIDGE_PCI, priv->dbi_base + PCI_CLASS_DEVICE); |
475 | } | 475 | } |
476 | 476 | ||
477 | /* Clear multi-function bit */ | 477 | /* Clear multi-function bit */ |
478 | static void imx_pcie_clear_multifunction(struct imx_pcie_priv *priv) | 478 | static void imx_pcie_clear_multifunction(struct imx_pcie_priv *priv) |
479 | { | 479 | { |
480 | writeb(PCI_HEADER_TYPE_BRIDGE, priv->dbi_base + PCI_HEADER_TYPE); | 480 | writeb(PCI_HEADER_TYPE_BRIDGE, priv->dbi_base + PCI_HEADER_TYPE); |
481 | } | 481 | } |
482 | 482 | ||
483 | static void imx_pcie_setup_ctrl(struct imx_pcie_priv *priv) | 483 | static void imx_pcie_setup_ctrl(struct imx_pcie_priv *priv) |
484 | { | 484 | { |
485 | u32 val; | 485 | u32 val; |
486 | 486 | ||
487 | writel(PCIE_MISC_DBI_RO_WR_EN, priv->dbi_base + PCIE_MISC_CTRL); | 487 | writel(PCIE_MISC_DBI_RO_WR_EN, priv->dbi_base + PCIE_MISC_CTRL); |
488 | 488 | ||
489 | /* Set the number of lanes */ | 489 | /* Set the number of lanes */ |
490 | val = readl(priv->dbi_base + PCIE_PORT_LINK_CONTROL); | 490 | val = readl(priv->dbi_base + PCIE_PORT_LINK_CONTROL); |
491 | val &= ~PORT_LINK_MODE_MASK; | 491 | val &= ~PORT_LINK_MODE_MASK; |
492 | switch (priv->lanes) { | 492 | switch (priv->lanes) { |
493 | case 1: | 493 | case 1: |
494 | val |= PORT_LINK_MODE_1_LANES; | 494 | val |= PORT_LINK_MODE_1_LANES; |
495 | break; | 495 | break; |
496 | case 2: | 496 | case 2: |
497 | val |= PORT_LINK_MODE_2_LANES; | 497 | val |= PORT_LINK_MODE_2_LANES; |
498 | break; | 498 | break; |
499 | case 4: | 499 | case 4: |
500 | val |= PORT_LINK_MODE_4_LANES; | 500 | val |= PORT_LINK_MODE_4_LANES; |
501 | break; | 501 | break; |
502 | case 8: | 502 | case 8: |
503 | val |= PORT_LINK_MODE_8_LANES; | 503 | val |= PORT_LINK_MODE_8_LANES; |
504 | break; | 504 | break; |
505 | default: | 505 | default: |
506 | printf("num-lanes %u: invalid value\n", priv->lanes); | 506 | printf("num-lanes %u: invalid value\n", priv->lanes); |
507 | return; | 507 | return; |
508 | } | 508 | } |
509 | writel(val, priv->dbi_base + PCIE_PORT_LINK_CONTROL); | 509 | writel(val, priv->dbi_base + PCIE_PORT_LINK_CONTROL); |
510 | 510 | ||
511 | /* Set link width speed control register */ | 511 | /* Set link width speed control register */ |
512 | val = readl(priv->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); | 512 | val = readl(priv->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); |
513 | val &= ~PORT_LOGIC_LINK_WIDTH_MASK; | 513 | val &= ~PORT_LOGIC_LINK_WIDTH_MASK; |
514 | switch (priv->lanes) { | 514 | switch (priv->lanes) { |
515 | case 1: | 515 | case 1: |
516 | val |= PORT_LOGIC_LINK_WIDTH_1_LANES; | 516 | val |= PORT_LOGIC_LINK_WIDTH_1_LANES; |
517 | break; | 517 | break; |
518 | case 2: | 518 | case 2: |
519 | val |= PORT_LOGIC_LINK_WIDTH_2_LANES; | 519 | val |= PORT_LOGIC_LINK_WIDTH_2_LANES; |
520 | break; | 520 | break; |
521 | case 4: | 521 | case 4: |
522 | val |= PORT_LOGIC_LINK_WIDTH_4_LANES; | 522 | val |= PORT_LOGIC_LINK_WIDTH_4_LANES; |
523 | break; | 523 | break; |
524 | case 8: | 524 | case 8: |
525 | val |= PORT_LOGIC_LINK_WIDTH_8_LANES; | 525 | val |= PORT_LOGIC_LINK_WIDTH_8_LANES; |
526 | break; | 526 | break; |
527 | } | 527 | } |
528 | writel(val, priv->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); | 528 | writel(val, priv->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); |
529 | 529 | ||
530 | /* setup RC BARs */ | 530 | /* setup RC BARs */ |
531 | writel(0, priv->dbi_base + PCI_BASE_ADDRESS_0); | 531 | writel(0, priv->dbi_base + PCI_BASE_ADDRESS_0); |
532 | writel(0, priv->dbi_base + PCI_BASE_ADDRESS_1); | 532 | writel(0, priv->dbi_base + PCI_BASE_ADDRESS_1); |
533 | 533 | ||
534 | /* setup bus numbers */ | 534 | /* setup bus numbers */ |
535 | val = readl(priv->dbi_base + PCI_PRIMARY_BUS); | 535 | val = readl(priv->dbi_base + PCI_PRIMARY_BUS); |
536 | val &= 0xff000000; | 536 | val &= 0xff000000; |
537 | val |= 0x00ff0100; | 537 | val |= 0x00ff0100; |
538 | writel(val, priv->dbi_base + PCI_PRIMARY_BUS); | 538 | writel(val, priv->dbi_base + PCI_PRIMARY_BUS); |
539 | 539 | ||
540 | /* setup command register */ | 540 | /* setup command register */ |
541 | val = readl(priv->dbi_base + PCI_COMMAND); | 541 | val = readl(priv->dbi_base + PCI_COMMAND); |
542 | val &= 0xffff0000; | 542 | val &= 0xffff0000; |
543 | val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | | 543 | val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | |
544 | PCI_COMMAND_MASTER; | 544 | PCI_COMMAND_MASTER; |
545 | writel(val, priv->dbi_base + PCI_COMMAND); | 545 | writel(val, priv->dbi_base + PCI_COMMAND); |
546 | 546 | ||
547 | imx_pcie_fix_class(priv); | 547 | imx_pcie_fix_class(priv); |
548 | imx_pcie_clear_multifunction(priv); | 548 | imx_pcie_clear_multifunction(priv); |
549 | 549 | ||
550 | writel(0, priv->dbi_base + PCIE_MISC_CTRL); | 550 | writel(0, priv->dbi_base + PCIE_MISC_CTRL); |
551 | } | 551 | } |
552 | 552 | ||
553 | static void imx_pcie_atu_outbound_set(struct imx_pcie_priv *priv, int idx, int type, | 553 | static void imx_pcie_atu_outbound_set(struct imx_pcie_priv *priv, int idx, int type, |
554 | u64 phys, u64 bus_addr, u32 size) | 554 | u64 phys, u64 bus_addr, u32 size) |
555 | { | 555 | { |
556 | writel(PCIE_ATU_REGION_OUTBOUND | idx, priv->dbi_base + PCIE_ATU_VIEWPORT); | 556 | writel(PCIE_ATU_REGION_OUTBOUND | idx, priv->dbi_base + PCIE_ATU_VIEWPORT); |
557 | writel((u32)(phys + priv->cpu_addr_offset), priv->dbi_base + PCIE_ATU_LOWER_BASE); | 557 | writel((u32)(phys + priv->cpu_addr_offset), priv->dbi_base + PCIE_ATU_LOWER_BASE); |
558 | writel((phys + priv->cpu_addr_offset) >> 32, priv->dbi_base + PCIE_ATU_UPPER_BASE); | 558 | writel((phys + priv->cpu_addr_offset) >> 32, priv->dbi_base + PCIE_ATU_UPPER_BASE); |
559 | writel((u32)(phys + priv->cpu_addr_offset) + size - 1, priv->dbi_base + PCIE_ATU_LIMIT); | 559 | writel((u32)(phys + priv->cpu_addr_offset) + size - 1, priv->dbi_base + PCIE_ATU_LIMIT); |
560 | writel((u32)bus_addr, priv->dbi_base + PCIE_ATU_LOWER_TARGET); | 560 | writel((u32)bus_addr, priv->dbi_base + PCIE_ATU_LOWER_TARGET); |
561 | writel(bus_addr >> 32, priv->dbi_base + PCIE_ATU_UPPER_TARGET); | 561 | writel(bus_addr >> 32, priv->dbi_base + PCIE_ATU_UPPER_TARGET); |
562 | writel(type, priv->dbi_base + PCIE_ATU_CR1); | 562 | writel(type, priv->dbi_base + PCIE_ATU_CR1); |
563 | writel(PCIE_ATU_ENABLE, priv->dbi_base + PCIE_ATU_CR2); | 563 | writel(PCIE_ATU_ENABLE, priv->dbi_base + PCIE_ATU_CR2); |
564 | } | 564 | } |
565 | 565 | ||
566 | /* | 566 | /* |
567 | * iATU region setup | 567 | * iATU region setup |
568 | */ | 568 | */ |
569 | static int imx_pcie_regions_setup(struct imx_pcie_priv *priv) | 569 | static int imx_pcie_regions_setup(struct imx_pcie_priv *priv) |
570 | { | 570 | { |
571 | if (priv->io) | 571 | if (priv->io) |
572 | /* ATU : OUTBOUND : IO */ | 572 | /* ATU : OUTBOUND : IO */ |
573 | imx_pcie_atu_outbound_set(priv, PCIE_ATU_REGION_INDEX2, | 573 | imx_pcie_atu_outbound_set(priv, PCIE_ATU_REGION_INDEX2, |
574 | PCIE_ATU_TYPE_IO, | 574 | PCIE_ATU_TYPE_IO, |
575 | priv->io->phys_start, | 575 | priv->io->phys_start, |
576 | priv->io->bus_start, | 576 | priv->io->bus_start, |
577 | priv->io->size); | 577 | priv->io->size); |
578 | 578 | ||
579 | if (priv->mem) | 579 | if (priv->mem) |
580 | /* ATU : OUTBOUND : MEM */ | 580 | /* ATU : OUTBOUND : MEM */ |
581 | imx_pcie_atu_outbound_set(priv, PCIE_ATU_REGION_INDEX0, | 581 | imx_pcie_atu_outbound_set(priv, PCIE_ATU_REGION_INDEX0, |
582 | PCIE_ATU_TYPE_MEM, | 582 | PCIE_ATU_TYPE_MEM, |
583 | priv->mem->phys_start, | 583 | priv->mem->phys_start, |
584 | priv->mem->bus_start, | 584 | priv->mem->bus_start, |
585 | priv->mem->size); | 585 | priv->mem->size); |
586 | 586 | ||
587 | 587 | ||
588 | return 0; | 588 | return 0; |
589 | } | 589 | } |
590 | 590 | ||
591 | /* | 591 | /* |
592 | * PCI Express accessors | 592 | * PCI Express accessors |
593 | */ | 593 | */ |
594 | static void __iomem *get_bus_address(struct imx_pcie_priv *priv, | 594 | static void __iomem *get_bus_address(struct imx_pcie_priv *priv, |
595 | pci_dev_t d, int where) | 595 | pci_dev_t d, int where) |
596 | { | 596 | { |
597 | void __iomem *va_address; | 597 | void __iomem *va_address; |
598 | 598 | ||
599 | if (PCI_BUS(d) == 0) { | 599 | if (PCI_BUS(d) == 0) { |
600 | /* Outbound TLP matched primary interface of the bridge */ | 600 | /* Outbound TLP matched primary interface of the bridge */ |
601 | va_address = priv->dbi_base; | 601 | va_address = priv->dbi_base; |
602 | } else { | 602 | } else { |
603 | if (PCI_BUS(d) < 2) { | 603 | if (PCI_BUS(d) < 2) { |
604 | /* Outbound TLP matched secondary interface of the bridge changes to CFG0 */ | 604 | /* Outbound TLP matched secondary interface of the bridge changes to CFG0 */ |
605 | imx_pcie_atu_outbound_set(priv, PCIE_ATU_REGION_INDEX1, | 605 | imx_pcie_atu_outbound_set(priv, PCIE_ATU_REGION_INDEX1, |
606 | PCIE_ATU_TYPE_CFG0, | 606 | PCIE_ATU_TYPE_CFG0, |
607 | (ulong)priv->cfg_base, | 607 | (ulong)priv->cfg_base, |
608 | d << 8, | 608 | d << 8, |
609 | priv->cfg_size >> 1); | 609 | priv->cfg_size >> 1); |
610 | va_address = priv->cfg_base; | 610 | va_address = priv->cfg_base; |
611 | } else { | 611 | } else { |
612 | /* Outbound TLP matched the bus behind the bridge uses type CFG1 */ | 612 | /* Outbound TLP matched the bus behind the bridge uses type CFG1 */ |
613 | imx_pcie_atu_outbound_set(priv, PCIE_ATU_REGION_INDEX1, | 613 | imx_pcie_atu_outbound_set(priv, PCIE_ATU_REGION_INDEX1, |
614 | PCIE_ATU_TYPE_CFG1, | 614 | PCIE_ATU_TYPE_CFG1, |
615 | (ulong)priv->cfg1_base, | 615 | (ulong)priv->cfg1_base, |
616 | d << 8, | 616 | d << 8, |
617 | priv->cfg_size >> 1); | 617 | priv->cfg_size >> 1); |
618 | va_address = priv->cfg1_base; | 618 | va_address = priv->cfg1_base; |
619 | } | 619 | } |
620 | } | 620 | } |
621 | 621 | ||
622 | va_address += (where & ~0x3); | 622 | va_address += (where & ~0x3); |
623 | 623 | ||
624 | return va_address; | 624 | return va_address; |
625 | 625 | ||
626 | } | 626 | } |
627 | 627 | ||
628 | static int imx_pcie_addr_valid(pci_dev_t d) | 628 | static int imx_pcie_addr_valid(pci_dev_t d) |
629 | { | 629 | { |
630 | if ((PCI_BUS(d) == 0) && (PCI_DEV(d) > 0)) | 630 | if ((PCI_BUS(d) == 0) && (PCI_DEV(d) > 0)) |
631 | return -EINVAL; | 631 | return -EINVAL; |
632 | /* ARI forward is not enabled, so non-zero device at downstream must be blocked */ | 632 | /* ARI forward is not enabled, so non-zero device at downstream must be blocked */ |
633 | if ((PCI_BUS(d) == 1) && (PCI_DEV(d) > 0)) | 633 | if ((PCI_BUS(d) == 1) && (PCI_DEV(d) > 0)) |
634 | return -EINVAL; | 634 | return -EINVAL; |
635 | return 0; | 635 | return 0; |
636 | } | 636 | } |
637 | 637 | ||
638 | /* | 638 | /* |
639 | * Replace the original ARM DABT handler with a simple jump-back one. | 639 | * Replace the original ARM DABT handler with a simple jump-back one. |
640 | * | 640 | * |
641 | * The problem here is that if we have a PCIe bridge attached to this PCIe | 641 | * The problem here is that if we have a PCIe bridge attached to this PCIe |
642 | * controller, but no PCIe device is connected to the bridges' downstream | 642 | * controller, but no PCIe device is connected to the bridges' downstream |
643 | * port, the attempt to read/write from/to the config space will produce | 643 | * port, the attempt to read/write from/to the config space will produce |
644 | * a DABT. This is a behavior of the controller and can not be disabled | 644 | * a DABT. This is a behavior of the controller and can not be disabled |
645 | * unfortuatelly. | 645 | * unfortuatelly. |
646 | * | 646 | * |
647 | * To work around the problem, we backup the current DABT handler address | 647 | * To work around the problem, we backup the current DABT handler address |
648 | * and replace it with our own DABT handler, which only bounces right back | 648 | * and replace it with our own DABT handler, which only bounces right back |
649 | * into the code. | 649 | * into the code. |
650 | */ | 650 | */ |
651 | static void imx_pcie_fix_dabt_handler(bool set) | 651 | static void imx_pcie_fix_dabt_handler(bool set) |
652 | { | 652 | { |
653 | #ifdef CONFIG_MX6 | 653 | #ifdef CONFIG_MX6 |
654 | extern uint32_t *_data_abort; | 654 | extern uint32_t *_data_abort; |
655 | uint32_t *data_abort_addr = (uint32_t *)&_data_abort; | 655 | uint32_t *data_abort_addr = (uint32_t *)&_data_abort; |
656 | 656 | ||
657 | static const uint32_t data_abort_bounce_handler = 0xe25ef004; | 657 | static const uint32_t data_abort_bounce_handler = 0xe25ef004; |
658 | uint32_t data_abort_bounce_addr = (uint32_t)&data_abort_bounce_handler; | 658 | uint32_t data_abort_bounce_addr = (uint32_t)&data_abort_bounce_handler; |
659 | 659 | ||
660 | static uint32_t data_abort_backup; | 660 | static uint32_t data_abort_backup; |
661 | 661 | ||
662 | if (set) { | 662 | if (set) { |
663 | data_abort_backup = *data_abort_addr; | 663 | data_abort_backup = *data_abort_addr; |
664 | *data_abort_addr = data_abort_bounce_addr; | 664 | *data_abort_addr = data_abort_bounce_addr; |
665 | } else { | 665 | } else { |
666 | *data_abort_addr = data_abort_backup; | 666 | *data_abort_addr = data_abort_backup; |
667 | } | 667 | } |
668 | #endif | 668 | #endif |
669 | } | 669 | } |
670 | 670 | ||
671 | static int imx_pcie_read_cfg(struct imx_pcie_priv *priv, pci_dev_t d, | 671 | static int imx_pcie_read_cfg(struct imx_pcie_priv *priv, pci_dev_t d, |
672 | int where, u32 *val) | 672 | int where, u32 *val) |
673 | { | 673 | { |
674 | void __iomem *va_address; | 674 | void __iomem *va_address; |
675 | int ret; | 675 | int ret; |
676 | 676 | ||
677 | ret = imx_pcie_addr_valid(d); | 677 | ret = imx_pcie_addr_valid(d); |
678 | if (ret) { | 678 | if (ret) { |
679 | *val = 0xffffffff; | 679 | *val = 0xffffffff; |
680 | return 0; | 680 | return 0; |
681 | } | 681 | } |
682 | 682 | ||
683 | va_address = get_bus_address(priv, d, where); | 683 | va_address = get_bus_address(priv, d, where); |
684 | 684 | ||
685 | /* | 685 | /* |
686 | * Read the PCIe config space. We must replace the DABT handler | 686 | * Read the PCIe config space. We must replace the DABT handler |
687 | * here in case we got data abort from the PCIe controller, see | 687 | * here in case we got data abort from the PCIe controller, see |
688 | * imx_pcie_fix_dabt_handler() description. Note that writing the | 688 | * imx_pcie_fix_dabt_handler() description. Note that writing the |
689 | * "val" with valid value is also imperative here as in case we | 689 | * "val" with valid value is also imperative here as in case we |
690 | * did got DABT, the val would contain random value. | 690 | * did got DABT, the val would contain random value. |
691 | */ | 691 | */ |
692 | imx_pcie_fix_dabt_handler(true); | 692 | imx_pcie_fix_dabt_handler(true); |
693 | writel(0xffffffff, val); | 693 | writel(0xffffffff, val); |
694 | *val = readl(va_address); | 694 | *val = readl(va_address); |
695 | imx_pcie_fix_dabt_handler(false); | 695 | imx_pcie_fix_dabt_handler(false); |
696 | 696 | ||
697 | return 0; | 697 | return 0; |
698 | } | 698 | } |
699 | 699 | ||
700 | static int imx_pcie_write_cfg(struct imx_pcie_priv *priv, pci_dev_t d, | 700 | static int imx_pcie_write_cfg(struct imx_pcie_priv *priv, pci_dev_t d, |
701 | int where, u32 val) | 701 | int where, u32 val) |
702 | { | 702 | { |
703 | void __iomem *va_address = NULL; | 703 | void __iomem *va_address = NULL; |
704 | int ret; | 704 | int ret; |
705 | 705 | ||
706 | ret = imx_pcie_addr_valid(d); | 706 | ret = imx_pcie_addr_valid(d); |
707 | if (ret) | 707 | if (ret) |
708 | return ret; | 708 | return ret; |
709 | 709 | ||
710 | va_address = get_bus_address(priv, d, where); | 710 | va_address = get_bus_address(priv, d, where); |
711 | 711 | ||
712 | /* | 712 | /* |
713 | * Write the PCIe config space. We must replace the DABT handler | 713 | * Write the PCIe config space. We must replace the DABT handler |
714 | * here in case we got data abort from the PCIe controller, see | 714 | * here in case we got data abort from the PCIe controller, see |
715 | * imx_pcie_fix_dabt_handler() description. | 715 | * imx_pcie_fix_dabt_handler() description. |
716 | */ | 716 | */ |
717 | imx_pcie_fix_dabt_handler(true); | 717 | imx_pcie_fix_dabt_handler(true); |
718 | writel(val, va_address); | 718 | writel(val, va_address); |
719 | imx_pcie_fix_dabt_handler(false); | 719 | imx_pcie_fix_dabt_handler(false); |
720 | 720 | ||
721 | return 0; | 721 | return 0; |
722 | } | 722 | } |
723 | 723 | ||
724 | static int imx8_pcie_assert_core_reset(struct imx_pcie_priv *priv, | 724 | static int imx8_pcie_assert_core_reset(struct imx_pcie_priv *priv, |
725 | bool prepare_for_boot) | 725 | bool prepare_for_boot) |
726 | { | 726 | { |
727 | u32 val; | 727 | u32 val; |
728 | 728 | ||
729 | switch (priv->variant) { | 729 | switch (priv->variant) { |
730 | case IMX8QXP: | 730 | case IMX8QXP: |
731 | val = IMX8QM_CSR_PCIEB_OFFSET; | 731 | val = IMX8QM_CSR_PCIEB_OFFSET; |
732 | imx_pcie_gpr_update_bits(priv, | 732 | imx_pcie_gpr_update_bits(priv, |
733 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, | 733 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, |
734 | IMX8QM_CTRL_BUTTON_RST_N, | 734 | IMX8QM_CTRL_BUTTON_RST_N, |
735 | IMX8QM_CTRL_BUTTON_RST_N); | 735 | IMX8QM_CTRL_BUTTON_RST_N); |
736 | imx_pcie_gpr_update_bits(priv, | 736 | imx_pcie_gpr_update_bits(priv, |
737 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, | 737 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, |
738 | IMX8QM_CTRL_PERST_N, | 738 | IMX8QM_CTRL_PERST_N, |
739 | IMX8QM_CTRL_PERST_N); | 739 | IMX8QM_CTRL_PERST_N); |
740 | imx_pcie_gpr_update_bits(priv, | 740 | imx_pcie_gpr_update_bits(priv, |
741 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, | 741 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, |
742 | IMX8QM_CTRL_POWER_UP_RST_N, | 742 | IMX8QM_CTRL_POWER_UP_RST_N, |
743 | IMX8QM_CTRL_POWER_UP_RST_N); | 743 | IMX8QM_CTRL_POWER_UP_RST_N); |
744 | break; | 744 | break; |
745 | case IMX8QM: | 745 | case IMX8QM: |
746 | val = IMX8QM_CSR_PCIEA_OFFSET + priv->ctrl_id * SZ_64K; | 746 | val = IMX8QM_CSR_PCIEA_OFFSET + priv->ctrl_id * SZ_64K; |
747 | imx_pcie_gpr_update_bits(priv, | 747 | imx_pcie_gpr_update_bits(priv, |
748 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, | 748 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, |
749 | IMX8QM_CTRL_BUTTON_RST_N, | 749 | IMX8QM_CTRL_BUTTON_RST_N, |
750 | IMX8QM_CTRL_BUTTON_RST_N); | 750 | IMX8QM_CTRL_BUTTON_RST_N); |
751 | imx_pcie_gpr_update_bits(priv, | 751 | imx_pcie_gpr_update_bits(priv, |
752 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, | 752 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, |
753 | IMX8QM_CTRL_PERST_N, | 753 | IMX8QM_CTRL_PERST_N, |
754 | IMX8QM_CTRL_PERST_N); | 754 | IMX8QM_CTRL_PERST_N); |
755 | imx_pcie_gpr_update_bits(priv, | 755 | imx_pcie_gpr_update_bits(priv, |
756 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, | 756 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, |
757 | IMX8QM_CTRL_POWER_UP_RST_N, | 757 | IMX8QM_CTRL_POWER_UP_RST_N, |
758 | IMX8QM_CTRL_POWER_UP_RST_N); | 758 | IMX8QM_CTRL_POWER_UP_RST_N); |
759 | break; | 759 | break; |
760 | default: | 760 | default: |
761 | break; | 761 | break; |
762 | } | 762 | } |
763 | 763 | ||
764 | return 0; | 764 | return 0; |
765 | } | 765 | } |
766 | 766 | ||
767 | static int imx8_pcie_init_phy(struct imx_pcie_priv *priv) | 767 | static int imx8_pcie_init_phy(struct imx_pcie_priv *priv) |
768 | { | 768 | { |
769 | u32 tmp, val; | 769 | u32 tmp, val; |
770 | 770 | ||
771 | if (priv->variant == IMX8QM | 771 | if (priv->variant == IMX8QM |
772 | || priv->variant == IMX8QXP) { | 772 | || priv->variant == IMX8QXP) { |
773 | switch (priv->hsio_cfg) { | 773 | switch (priv->hsio_cfg) { |
774 | case PCIEAX2SATA: | 774 | case PCIEAX2SATA: |
775 | /* | 775 | /* |
776 | * bit 0 rx ena 1. | 776 | * bit 0 rx ena 1. |
777 | * bit12 PHY_X1_EPCS_SEL 1. | 777 | * bit12 PHY_X1_EPCS_SEL 1. |
778 | * bit13 phy_ab_select 0. | 778 | * bit13 phy_ab_select 0. |
779 | */ | 779 | */ |
780 | imx_pcie_gpr_update_bits(priv, | 780 | imx_pcie_gpr_update_bits(priv, |
781 | IMX8QM_CSR_PHYX2_OFFSET, | 781 | IMX8QM_CSR_PHYX2_OFFSET, |
782 | IMX8QM_PHYX2_CTRL0_APB_MASK, | 782 | IMX8QM_PHYX2_CTRL0_APB_MASK, |
783 | IMX8QM_PHY_APB_RSTN_0 | 783 | IMX8QM_PHY_APB_RSTN_0 |
784 | | IMX8QM_PHY_APB_RSTN_1); | 784 | | IMX8QM_PHY_APB_RSTN_1); |
785 | 785 | ||
786 | imx_pcie_gpr_update_bits(priv, | 786 | imx_pcie_gpr_update_bits(priv, |
787 | IMX8QM_CSR_MISC_OFFSET, | 787 | IMX8QM_CSR_MISC_OFFSET, |
788 | IMX8QM_MISC_PHYX1_EPCS_SEL, | 788 | IMX8QM_MISC_PHYX1_EPCS_SEL, |
789 | IMX8QM_MISC_PHYX1_EPCS_SEL); | 789 | IMX8QM_MISC_PHYX1_EPCS_SEL); |
790 | imx_pcie_gpr_update_bits(priv, | 790 | imx_pcie_gpr_update_bits(priv, |
791 | IMX8QM_CSR_MISC_OFFSET, | 791 | IMX8QM_CSR_MISC_OFFSET, |
792 | IMX8QM_MISC_PCIE_AB_SELECT, | 792 | IMX8QM_MISC_PCIE_AB_SELECT, |
793 | 0); | 793 | 0); |
794 | break; | 794 | break; |
795 | 795 | ||
796 | case PCIEAX1PCIEBX1SATA: | 796 | case PCIEAX1PCIEBX1SATA: |
797 | tmp = IMX8QM_PHY_APB_RSTN_1; | 797 | tmp = IMX8QM_PHY_APB_RSTN_1; |
798 | tmp |= IMX8QM_PHY_APB_RSTN_0; | 798 | tmp |= IMX8QM_PHY_APB_RSTN_0; |
799 | imx_pcie_gpr_update_bits(priv, | 799 | imx_pcie_gpr_update_bits(priv, |
800 | IMX8QM_CSR_PHYX2_OFFSET, | 800 | IMX8QM_CSR_PHYX2_OFFSET, |
801 | IMX8QM_PHYX2_CTRL0_APB_MASK, tmp); | 801 | IMX8QM_PHYX2_CTRL0_APB_MASK, tmp); |
802 | 802 | ||
803 | imx_pcie_gpr_update_bits(priv, | 803 | imx_pcie_gpr_update_bits(priv, |
804 | IMX8QM_CSR_MISC_OFFSET, | 804 | IMX8QM_CSR_MISC_OFFSET, |
805 | IMX8QM_MISC_PHYX1_EPCS_SEL, | 805 | IMX8QM_MISC_PHYX1_EPCS_SEL, |
806 | IMX8QM_MISC_PHYX1_EPCS_SEL); | 806 | IMX8QM_MISC_PHYX1_EPCS_SEL); |
807 | imx_pcie_gpr_update_bits(priv, | 807 | imx_pcie_gpr_update_bits(priv, |
808 | IMX8QM_CSR_MISC_OFFSET, | 808 | IMX8QM_CSR_MISC_OFFSET, |
809 | IMX8QM_MISC_PCIE_AB_SELECT, | 809 | IMX8QM_MISC_PCIE_AB_SELECT, |
810 | IMX8QM_MISC_PCIE_AB_SELECT); | 810 | IMX8QM_MISC_PCIE_AB_SELECT); |
811 | 811 | ||
812 | imx_pcie_gpr_update_bits(priv, | 812 | imx_pcie_gpr_update_bits(priv, |
813 | IMX8QM_CSR_PHYX2_OFFSET, | 813 | IMX8QM_CSR_PHYX2_OFFSET, |
814 | HW_PHYX2_CTRL0_PIPE_LN2LK_MASK, | 814 | HW_PHYX2_CTRL0_PIPE_LN2LK_MASK, |
815 | HW_PHYX2_CTRL0_PIPE_LN2LK_3 | HW_PHYX2_CTRL0_PIPE_LN2LK_0); | 815 | HW_PHYX2_CTRL0_PIPE_LN2LK_3 | HW_PHYX2_CTRL0_PIPE_LN2LK_0); |
816 | 816 | ||
817 | break; | 817 | break; |
818 | 818 | ||
819 | case PCIEAX2PCIEBX1: | 819 | case PCIEAX2PCIEBX1: |
820 | /* | 820 | /* |
821 | * bit 0 rx ena 1. | 821 | * bit 0 rx ena 1. |
822 | * bit12 PHY_X1_EPCS_SEL 0. | 822 | * bit12 PHY_X1_EPCS_SEL 0. |
823 | * bit13 phy_ab_select 1. | 823 | * bit13 phy_ab_select 1. |
824 | */ | 824 | */ |
825 | if (priv->ctrl_id) | 825 | if (priv->ctrl_id) |
826 | imx_pcie_gpr_update_bits(priv, | 826 | imx_pcie_gpr_update_bits(priv, |
827 | IMX8QM_CSR_PHYX1_OFFSET, | 827 | IMX8QM_CSR_PHYX1_OFFSET, |
828 | IMX8QM_PHY_APB_RSTN_0, | 828 | IMX8QM_PHY_APB_RSTN_0, |
829 | IMX8QM_PHY_APB_RSTN_0); | 829 | IMX8QM_PHY_APB_RSTN_0); |
830 | else | 830 | else |
831 | imx_pcie_gpr_update_bits(priv, | 831 | imx_pcie_gpr_update_bits(priv, |
832 | IMX8QM_CSR_PHYX2_OFFSET, | 832 | IMX8QM_CSR_PHYX2_OFFSET, |
833 | IMX8QM_PHYX2_CTRL0_APB_MASK, | 833 | IMX8QM_PHYX2_CTRL0_APB_MASK, |
834 | IMX8QM_PHY_APB_RSTN_0 | 834 | IMX8QM_PHY_APB_RSTN_0 |
835 | | IMX8QM_PHY_APB_RSTN_1); | 835 | | IMX8QM_PHY_APB_RSTN_1); |
836 | 836 | ||
837 | imx_pcie_gpr_update_bits(priv, | 837 | imx_pcie_gpr_update_bits(priv, |
838 | IMX8QM_CSR_MISC_OFFSET, | 838 | IMX8QM_CSR_MISC_OFFSET, |
839 | IMX8QM_MISC_PHYX1_EPCS_SEL, | 839 | IMX8QM_MISC_PHYX1_EPCS_SEL, |
840 | 0); | 840 | 0); |
841 | imx_pcie_gpr_update_bits(priv, | 841 | imx_pcie_gpr_update_bits(priv, |
842 | IMX8QM_CSR_MISC_OFFSET, | 842 | IMX8QM_CSR_MISC_OFFSET, |
843 | IMX8QM_MISC_PCIE_AB_SELECT, | 843 | IMX8QM_MISC_PCIE_AB_SELECT, |
844 | IMX8QM_MISC_PCIE_AB_SELECT); | 844 | IMX8QM_MISC_PCIE_AB_SELECT); |
845 | break; | 845 | break; |
846 | } | 846 | } |
847 | 847 | ||
848 | if (priv->ext_osc) { | 848 | if (priv->ext_osc) { |
849 | imx_pcie_gpr_update_bits(priv, | 849 | imx_pcie_gpr_update_bits(priv, |
850 | IMX8QM_CSR_MISC_OFFSET, | 850 | IMX8QM_CSR_MISC_OFFSET, |
851 | IMX8QM_MISC_IOB_RXENA, | 851 | IMX8QM_MISC_IOB_RXENA, |
852 | IMX8QM_MISC_IOB_RXENA); | 852 | IMX8QM_MISC_IOB_RXENA); |
853 | imx_pcie_gpr_update_bits(priv, | 853 | imx_pcie_gpr_update_bits(priv, |
854 | IMX8QM_CSR_MISC_OFFSET, | 854 | IMX8QM_CSR_MISC_OFFSET, |
855 | IMX8QM_MISC_IOB_TXENA, | 855 | IMX8QM_MISC_IOB_TXENA, |
856 | 0); | 856 | 0); |
857 | } else { | 857 | } else { |
858 | /* Try to used the internal pll as ref clk */ | 858 | /* Try to used the internal pll as ref clk */ |
859 | imx_pcie_gpr_update_bits(priv, | 859 | imx_pcie_gpr_update_bits(priv, |
860 | IMX8QM_CSR_MISC_OFFSET, | 860 | IMX8QM_CSR_MISC_OFFSET, |
861 | IMX8QM_MISC_IOB_RXENA, | 861 | IMX8QM_MISC_IOB_RXENA, |
862 | 0); | 862 | 0); |
863 | imx_pcie_gpr_update_bits(priv, | 863 | imx_pcie_gpr_update_bits(priv, |
864 | IMX8QM_CSR_MISC_OFFSET, | 864 | IMX8QM_CSR_MISC_OFFSET, |
865 | IMX8QM_MISC_IOB_TXENA, | 865 | IMX8QM_MISC_IOB_TXENA, |
866 | IMX8QM_MISC_IOB_TXENA); | 866 | IMX8QM_MISC_IOB_TXENA); |
867 | imx_pcie_gpr_update_bits(priv, | 867 | imx_pcie_gpr_update_bits(priv, |
868 | IMX8QM_CSR_MISC_OFFSET, | 868 | IMX8QM_CSR_MISC_OFFSET, |
869 | IMX8QM_CSR_MISC_IOB_A_0_TXOE | 869 | IMX8QM_CSR_MISC_IOB_A_0_TXOE |
870 | | IMX8QM_CSR_MISC_IOB_A_0_M1M0_MASK, | 870 | | IMX8QM_CSR_MISC_IOB_A_0_M1M0_MASK, |
871 | IMX8QM_CSR_MISC_IOB_A_0_TXOE | 871 | IMX8QM_CSR_MISC_IOB_A_0_TXOE |
872 | | IMX8QM_CSR_MISC_IOB_A_0_M1M0_2); | 872 | | IMX8QM_CSR_MISC_IOB_A_0_M1M0_2); |
873 | } | 873 | } |
874 | 874 | ||
875 | val = IMX8QM_CSR_PCIEA_OFFSET | 875 | val = IMX8QM_CSR_PCIEA_OFFSET |
876 | + priv->ctrl_id * SZ_64K; | 876 | + priv->ctrl_id * SZ_64K; |
877 | imx_pcie_gpr_update_bits(priv, | 877 | imx_pcie_gpr_update_bits(priv, |
878 | val, IMX8QM_PCIE_TYPE_MASK, | 878 | val, IMX8QM_PCIE_TYPE_MASK, |
879 | 0x4 << 24); | 879 | 0x4 << 24); |
880 | 880 | ||
881 | mdelay(10); | 881 | mdelay(10); |
882 | } | 882 | } |
883 | 883 | ||
884 | return 0; | 884 | return 0; |
885 | } | 885 | } |
886 | 886 | ||
887 | static int imx8_pcie_wait_for_phy_pll_lock(struct imx_pcie_priv *priv) | 887 | static int imx8_pcie_wait_for_phy_pll_lock(struct imx_pcie_priv *priv) |
888 | { | 888 | { |
889 | u32 val, tmp, orig; | 889 | u32 val, tmp, orig; |
890 | unsigned int retries = 0; | 890 | unsigned int retries = 0; |
891 | 891 | ||
892 | if (priv->variant == IMX8QXP | 892 | if (priv->variant == IMX8QXP |
893 | || priv->variant == IMX8QM) { | 893 | || priv->variant == IMX8QM) { |
894 | for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; | 894 | for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; |
895 | retries++) { | 895 | retries++) { |
896 | if (priv->hsio_cfg == PCIEAX1PCIEBX1SATA) { | 896 | if (priv->hsio_cfg == PCIEAX1PCIEBX1SATA) { |
897 | imx_pcie_gpr_read(priv, | 897 | imx_pcie_gpr_read(priv, |
898 | IMX8QM_CSR_PHYX2_OFFSET + 0x4, | 898 | IMX8QM_CSR_PHYX2_OFFSET + 0x4, |
899 | &tmp); | 899 | &tmp); |
900 | if (priv->ctrl_id == 0) /* pciea 1 lanes */ | 900 | if (priv->ctrl_id == 0) /* pciea 1 lanes */ |
901 | orig = IMX8QM_STTS0_LANE0_TX_PLL_LOCK; | 901 | orig = IMX8QM_STTS0_LANE0_TX_PLL_LOCK; |
902 | else /* pcieb 1 lanes */ | 902 | else /* pcieb 1 lanes */ |
903 | orig = IMX8QM_STTS0_LANE1_TX_PLL_LOCK; | 903 | orig = IMX8QM_STTS0_LANE1_TX_PLL_LOCK; |
904 | tmp &= orig; | 904 | tmp &= orig; |
905 | if (tmp == orig) { | 905 | if (tmp == orig) { |
906 | imx_pcie_gpr_update_bits(priv, | 906 | imx_pcie_gpr_update_bits(priv, |
907 | IMX8QM_LPCG_PHYX2_OFFSET, | 907 | IMX8QM_LPCG_PHYX2_OFFSET, |
908 | IMX8QM_LPCG_PHY_PCG0 | 908 | IMX8QM_LPCG_PHY_PCG0 |
909 | | IMX8QM_LPCG_PHY_PCG1, | 909 | | IMX8QM_LPCG_PHY_PCG1, |
910 | IMX8QM_LPCG_PHY_PCG0 | 910 | IMX8QM_LPCG_PHY_PCG0 |
911 | | IMX8QM_LPCG_PHY_PCG1); | 911 | | IMX8QM_LPCG_PHY_PCG1); |
912 | break; | 912 | break; |
913 | } | 913 | } |
914 | } | 914 | } |
915 | 915 | ||
916 | if (priv->hsio_cfg == PCIEAX2PCIEBX1) { | 916 | if (priv->hsio_cfg == PCIEAX2PCIEBX1) { |
917 | val = IMX8QM_CSR_PHYX2_OFFSET | 917 | val = IMX8QM_CSR_PHYX2_OFFSET |
918 | + priv->ctrl_id * SZ_64K; | 918 | + priv->ctrl_id * SZ_64K; |
919 | imx_pcie_gpr_read(priv, | 919 | imx_pcie_gpr_read(priv, |
920 | val + IMX8QM_CSR_PHYX_STTS0_OFFSET, | 920 | val + IMX8QM_CSR_PHYX_STTS0_OFFSET, |
921 | &tmp); | 921 | &tmp); |
922 | orig = IMX8QM_STTS0_LANE0_TX_PLL_LOCK; | 922 | orig = IMX8QM_STTS0_LANE0_TX_PLL_LOCK; |
923 | if (priv->ctrl_id == 0) /* pciea 2 lanes */ | 923 | if (priv->ctrl_id == 0) /* pciea 2 lanes */ |
924 | orig |= IMX8QM_STTS0_LANE1_TX_PLL_LOCK; | 924 | orig |= IMX8QM_STTS0_LANE1_TX_PLL_LOCK; |
925 | tmp &= orig; | 925 | tmp &= orig; |
926 | if (tmp == orig) { | 926 | if (tmp == orig) { |
927 | val = IMX8QM_CSR_PHYX2_OFFSET | 927 | val = IMX8QM_CSR_PHYX2_OFFSET |
928 | + priv->ctrl_id * SZ_64K; | 928 | + priv->ctrl_id * SZ_64K; |
929 | imx_pcie_gpr_update_bits(priv, | 929 | imx_pcie_gpr_update_bits(priv, |
930 | val, IMX8QM_LPCG_PHY_PCG0, | 930 | val, IMX8QM_LPCG_PHY_PCG0, |
931 | IMX8QM_LPCG_PHY_PCG0); | 931 | IMX8QM_LPCG_PHY_PCG0); |
932 | break; | 932 | break; |
933 | } | 933 | } |
934 | } | 934 | } |
935 | udelay(10); | 935 | udelay(10); |
936 | } | 936 | } |
937 | } | 937 | } |
938 | 938 | ||
939 | if (retries >= PHY_PLL_LOCK_WAIT_MAX_RETRIES) { | 939 | if (retries >= PHY_PLL_LOCK_WAIT_MAX_RETRIES) { |
940 | printf("pcie phy pll can't be locked.\n"); | 940 | printf("pcie phy pll can't be locked.\n"); |
941 | return -ENODEV; | 941 | return -ENODEV; |
942 | } else { | 942 | } else { |
943 | debug("pcie phy pll is locked.\n"); | 943 | debug("pcie phy pll is locked.\n"); |
944 | return 0; | 944 | return 0; |
945 | } | 945 | } |
946 | } | 946 | } |
947 | 947 | ||
948 | static int imx8_pcie_deassert_core_reset(struct imx_pcie_priv *priv) | 948 | static int imx8_pcie_deassert_core_reset(struct imx_pcie_priv *priv) |
949 | { | 949 | { |
950 | int ret, i; | 950 | int ret, i; |
951 | u32 val, tmp; | 951 | u32 val, tmp; |
952 | 952 | ||
953 | #if CONFIG_IS_ENABLED(CLK) | 953 | #if CONFIG_IS_ENABLED(CLK) |
954 | ret = clk_enable(&priv->pcie); | 954 | ret = clk_enable(&priv->pcie); |
955 | if (ret) { | 955 | if (ret) { |
956 | printf("unable to enable pcie clock\n"); | 956 | printf("unable to enable pcie clock\n"); |
957 | return ret; | 957 | return ret; |
958 | } | 958 | } |
959 | 959 | ||
960 | ret = clk_enable(&priv->pcie_phy); | 960 | ret = clk_enable(&priv->pcie_phy); |
961 | if (ret) { | 961 | if (ret) { |
962 | printf("unable to enable pcie_phy clock\n"); | 962 | printf("unable to enable pcie_phy clock\n"); |
963 | goto err_pcie; | 963 | goto err_pcie; |
964 | } | 964 | } |
965 | #endif | 965 | #endif |
966 | 966 | ||
967 | if (priv->variant == IMX8QM | 967 | if (priv->variant == IMX8QM |
968 | || priv->variant == IMX8QXP) { | 968 | || priv->variant == IMX8QXP) { |
969 | 969 | ||
970 | #if CONFIG_IS_ENABLED(CLK) | 970 | #if CONFIG_IS_ENABLED(CLK) |
971 | ret = clk_enable(&priv->pcie_inbound_axi); | 971 | ret = clk_enable(&priv->pcie_inbound_axi); |
972 | if (ret) { | 972 | if (ret) { |
973 | printf("unable to enable pcie_axi clock\n"); | 973 | printf("unable to enable pcie_axi clock\n"); |
974 | goto err_pcie_phy; | 974 | goto err_pcie_phy; |
975 | } | 975 | } |
976 | ret = clk_enable(&priv->pcie_per); | 976 | ret = clk_enable(&priv->pcie_per); |
977 | if (ret) { | 977 | if (ret) { |
978 | printf("unable to enable pcie_per clock\n"); | 978 | printf("unable to enable pcie_per clock\n"); |
979 | clk_disable(&priv->pcie_inbound_axi); | 979 | clk_disable(&priv->pcie_inbound_axi); |
980 | goto err_pcie_phy; | 980 | goto err_pcie_phy; |
981 | } | 981 | } |
982 | #endif | 982 | #endif |
983 | /* allow the clocks to stabilize */ | 983 | /* allow the clocks to stabilize */ |
984 | udelay(200); | 984 | udelay(200); |
985 | 985 | ||
986 | /* bit19 PM_REQ_CORE_RST of pciex#_stts0 should be cleared. */ | 986 | /* bit19 PM_REQ_CORE_RST of pciex#_stts0 should be cleared. */ |
987 | for (i = 0; i < 100; i++) { | 987 | for (i = 0; i < 100; i++) { |
988 | val = IMX8QM_CSR_PCIEA_OFFSET | 988 | val = IMX8QM_CSR_PCIEA_OFFSET |
989 | + priv->ctrl_id * SZ_64K; | 989 | + priv->ctrl_id * SZ_64K; |
990 | imx_pcie_gpr_read(priv, | 990 | imx_pcie_gpr_read(priv, |
991 | val + IMX8QM_CSR_PCIE_STTS0_OFFSET, | 991 | val + IMX8QM_CSR_PCIE_STTS0_OFFSET, |
992 | &tmp); | 992 | &tmp); |
993 | if ((tmp & IMX8QM_CTRL_STTS0_PM_REQ_CORE_RST) == 0) | 993 | if ((tmp & IMX8QM_CTRL_STTS0_PM_REQ_CORE_RST) == 0) |
994 | break; | 994 | break; |
995 | udelay(10); | 995 | udelay(10); |
996 | } | 996 | } |
997 | 997 | ||
998 | if ((tmp & IMX8QM_CTRL_STTS0_PM_REQ_CORE_RST) != 0) | 998 | if ((tmp & IMX8QM_CTRL_STTS0_PM_REQ_CORE_RST) != 0) |
999 | printf("ERROR PM_REQ_CORE_RST is still set.\n"); | 999 | printf("ERROR PM_REQ_CORE_RST is still set.\n"); |
1000 | 1000 | ||
1001 | /* wait for phy pll lock firstly. */ | 1001 | /* wait for phy pll lock firstly. */ |
1002 | if (imx8_pcie_wait_for_phy_pll_lock(priv)) { | 1002 | if (imx8_pcie_wait_for_phy_pll_lock(priv)) { |
1003 | ret = -ENODEV; | 1003 | ret = -ENODEV; |
1004 | goto err_ref_clk;; | 1004 | goto err_ref_clk;; |
1005 | } | 1005 | } |
1006 | 1006 | ||
1007 | if (dm_gpio_is_valid(&priv->reset_gpio)) { | 1007 | if (dm_gpio_is_valid(&priv->reset_gpio)) { |
1008 | dm_gpio_set_value(&priv->reset_gpio, 1); | 1008 | dm_gpio_set_value(&priv->reset_gpio, 1); |
1009 | mdelay(20); | 1009 | mdelay(20); |
1010 | dm_gpio_set_value(&priv->reset_gpio, 0); | 1010 | dm_gpio_set_value(&priv->reset_gpio, 0); |
1011 | mdelay(20); | 1011 | mdelay(20); |
1012 | } | 1012 | } |
1013 | 1013 | ||
1014 | return 0; | 1014 | return 0; |
1015 | } | 1015 | } |
1016 | 1016 | ||
1017 | err_ref_clk: | 1017 | err_ref_clk: |
1018 | #if CONFIG_IS_ENABLED(CLK) | 1018 | #if CONFIG_IS_ENABLED(CLK) |
1019 | clk_disable(&priv->pcie_per); | 1019 | clk_disable(&priv->pcie_per); |
1020 | clk_disable(&priv->pcie_inbound_axi); | 1020 | clk_disable(&priv->pcie_inbound_axi); |
1021 | err_pcie_phy: | 1021 | err_pcie_phy: |
1022 | clk_disable(&priv->pcie_phy); | 1022 | clk_disable(&priv->pcie_phy); |
1023 | err_pcie: | 1023 | err_pcie: |
1024 | clk_disable(&priv->pcie); | 1024 | clk_disable(&priv->pcie); |
1025 | #endif | 1025 | #endif |
1026 | 1026 | ||
1027 | return ret; | 1027 | return ret; |
1028 | } | 1028 | } |
1029 | 1029 | ||
1030 | #ifdef CONFIG_MX6 | 1030 | #ifdef CONFIG_MX6 |
1031 | /* | 1031 | /* |
1032 | * Initial bus setup | 1032 | * Initial bus setup |
1033 | */ | 1033 | */ |
1034 | static int imx6_pcie_assert_core_reset(struct imx_pcie_priv *priv, | 1034 | static int imx6_pcie_assert_core_reset(struct imx_pcie_priv *priv, |
1035 | bool prepare_for_boot) | 1035 | bool prepare_for_boot) |
1036 | { | 1036 | { |
1037 | if (priv->variant == IMX6QP) | 1037 | if (priv->variant == IMX6QP) |
1038 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_PCIE_SW_RST, IOMUXC_GPR1_PCIE_SW_RST); | 1038 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_PCIE_SW_RST, IOMUXC_GPR1_PCIE_SW_RST); |
1039 | 1039 | ||
1040 | #if defined(CONFIG_MX6SX) | 1040 | #if defined(CONFIG_MX6SX) |
1041 | if (priv->variant == IMX6SX) { | 1041 | if (priv->variant == IMX6SX) { |
1042 | struct gpc *gpc_regs = (struct gpc *)GPC_BASE_ADDR; | 1042 | struct gpc *gpc_regs = (struct gpc *)GPC_BASE_ADDR; |
1043 | 1043 | ||
1044 | /* SSP_EN is not used on MX6SX anymore */ | 1044 | /* SSP_EN is not used on MX6SX anymore */ |
1045 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_TEST_POWERDOWN, IOMUXC_GPR12_TEST_POWERDOWN); | 1045 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_TEST_POWERDOWN, IOMUXC_GPR12_TEST_POWERDOWN); |
1046 | /* Force PCIe PHY reset */ | 1046 | /* Force PCIe PHY reset */ |
1047 | imx_pcie_gpr_update_bits(priv, 20, IOMUXC_GPR5_PCIE_BTNRST, IOMUXC_GPR5_PCIE_BTNRST); | 1047 | imx_pcie_gpr_update_bits(priv, 20, IOMUXC_GPR5_PCIE_BTNRST, IOMUXC_GPR5_PCIE_BTNRST); |
1048 | /* Power up PCIe PHY */ | 1048 | /* Power up PCIe PHY */ |
1049 | setbits_le32(&gpc_regs->cntr, PCIE_PHY_PUP_REQ); | 1049 | setbits_le32(&gpc_regs->cntr, PCIE_PHY_PUP_REQ); |
1050 | pcie_power_up(); | 1050 | pcie_power_up(); |
1051 | 1051 | ||
1052 | return 0; | 1052 | return 0; |
1053 | } | 1053 | } |
1054 | #endif | 1054 | #endif |
1055 | /* | 1055 | /* |
1056 | * If the bootloader already enabled the link we need some special | 1056 | * If the bootloader already enabled the link we need some special |
1057 | * handling to get the core back into a state where it is safe to | 1057 | * handling to get the core back into a state where it is safe to |
1058 | * touch it for configuration. As there is no dedicated reset signal | 1058 | * touch it for configuration. As there is no dedicated reset signal |
1059 | * wired up for MX6QDL, we need to manually force LTSSM into "detect" | 1059 | * wired up for MX6QDL, we need to manually force LTSSM into "detect" |
1060 | * state before completely disabling LTSSM, which is a prerequisite | 1060 | * state before completely disabling LTSSM, which is a prerequisite |
1061 | * for core configuration. | 1061 | * for core configuration. |
1062 | * | 1062 | * |
1063 | * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong | 1063 | * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong |
1064 | * indication that the bootloader activated the link. | 1064 | * indication that the bootloader activated the link. |
1065 | */ | 1065 | */ |
1066 | if (priv->variant == IMX6QP && prepare_for_boot) { | 1066 | if (priv->variant == IMX6Q && prepare_for_boot) { |
1067 | u32 val, gpr1, gpr12; | 1067 | u32 val, gpr1, gpr12; |
1068 | 1068 | ||
1069 | imx_pcie_gpr_read(priv, 4, &gpr1); | 1069 | imx_pcie_gpr_read(priv, 4, &gpr1); |
1070 | imx_pcie_gpr_read(priv, 48, &gpr12); | 1070 | imx_pcie_gpr_read(priv, 48, &gpr12); |
1071 | if ((gpr1 & IOMUXC_GPR1_PCIE_REF_CLK_EN) && | 1071 | if ((gpr1 & IOMUXC_GPR1_PCIE_REF_CLK_EN) && |
1072 | (gpr12 & IOMUXC_GPR12_PCIE_CTL_2)) { | 1072 | (gpr12 & IOMUXC_GPR12_PCIE_CTL_2)) { |
1073 | val = readl(priv->dbi_base + PCIE_PL_PFLR); | 1073 | val = readl(priv->dbi_base + PCIE_PL_PFLR); |
1074 | val &= ~PCIE_PL_PFLR_LINK_STATE_MASK; | 1074 | val &= ~PCIE_PL_PFLR_LINK_STATE_MASK; |
1075 | val |= PCIE_PL_PFLR_FORCE_LINK; | 1075 | val |= PCIE_PL_PFLR_FORCE_LINK; |
1076 | 1076 | ||
1077 | imx_pcie_fix_dabt_handler(true); | 1077 | imx_pcie_fix_dabt_handler(true); |
1078 | writel(val, priv->dbi_base + PCIE_PL_PFLR); | 1078 | writel(val, priv->dbi_base + PCIE_PL_PFLR); |
1079 | imx_pcie_fix_dabt_handler(false); | 1079 | imx_pcie_fix_dabt_handler(false); |
1080 | 1080 | ||
1081 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_PCIE_CTL_2, 0); | 1081 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_PCIE_CTL_2, 0); |
1082 | } | 1082 | } |
1083 | } | 1083 | } |
1084 | 1084 | ||
1085 | if (priv->variant == IMX6QP || priv->variant == IMX6Q) { | 1085 | if (priv->variant == IMX6QP || priv->variant == IMX6Q) { |
1086 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_TEST_POWERDOWN, | 1086 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_TEST_POWERDOWN, |
1087 | IOMUXC_GPR1_TEST_POWERDOWN); | 1087 | IOMUXC_GPR1_TEST_POWERDOWN); |
1088 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_REF_SSP_EN, 0); | 1088 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_REF_SSP_EN, 0); |
1089 | } | 1089 | } |
1090 | 1090 | ||
1091 | return 0; | 1091 | return 0; |
1092 | } | 1092 | } |
1093 | 1093 | ||
1094 | static int imx6_pcie_init_phy(struct imx_pcie_priv *priv) | 1094 | static int imx6_pcie_init_phy(struct imx_pcie_priv *priv) |
1095 | { | 1095 | { |
1096 | #ifndef DEBUG | 1096 | #ifndef DEBUG |
1097 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_APPS_LTSSM_ENABLE, 0); | 1097 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_APPS_LTSSM_ENABLE, 0); |
1098 | #endif | 1098 | #endif |
1099 | 1099 | ||
1100 | imx_pcie_gpr_update_bits(priv, 48, | 1100 | imx_pcie_gpr_update_bits(priv, 48, |
1101 | IOMUXC_GPR12_DEVICE_TYPE_MASK, | 1101 | IOMUXC_GPR12_DEVICE_TYPE_MASK, |
1102 | IOMUXC_GPR12_DEVICE_TYPE_RC); | 1102 | IOMUXC_GPR12_DEVICE_TYPE_RC); |
1103 | imx_pcie_gpr_update_bits(priv, 48, | 1103 | imx_pcie_gpr_update_bits(priv, 48, |
1104 | IOMUXC_GPR12_LOS_LEVEL_MASK, | 1104 | IOMUXC_GPR12_LOS_LEVEL_MASK, |
1105 | IOMUXC_GPR12_LOS_LEVEL_9); | 1105 | IOMUXC_GPR12_LOS_LEVEL_9); |
1106 | 1106 | ||
1107 | if (priv->variant == IMX6SX) { | 1107 | if (priv->variant == IMX6SX) { |
1108 | imx_pcie_gpr_update_bits(priv, 48, | 1108 | imx_pcie_gpr_update_bits(priv, 48, |
1109 | IOMUXC_GPR12_RX_EQ_MASK, | 1109 | IOMUXC_GPR12_RX_EQ_MASK, |
1110 | IOMUXC_GPR12_RX_EQ_2); | 1110 | IOMUXC_GPR12_RX_EQ_2); |
1111 | } | 1111 | } |
1112 | 1112 | ||
1113 | imx_pcie_gpr_update_bits(priv, 32, 0xffffffff, | 1113 | imx_pcie_gpr_update_bits(priv, 32, 0xffffffff, |
1114 | (0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN1_OFFSET) | | 1114 | (0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN1_OFFSET) | |
1115 | (0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_3P5DB_OFFSET) | | 1115 | (0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_3P5DB_OFFSET) | |
1116 | (20 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_6DB_OFFSET) | | 1116 | (20 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_6DB_OFFSET) | |
1117 | (127 << IOMUXC_GPR8_PCS_TX_SWING_FULL_OFFSET) | | 1117 | (127 << IOMUXC_GPR8_PCS_TX_SWING_FULL_OFFSET) | |
1118 | (127 << IOMUXC_GPR8_PCS_TX_SWING_LOW_OFFSET)); | 1118 | (127 << IOMUXC_GPR8_PCS_TX_SWING_LOW_OFFSET)); |
1119 | 1119 | ||
1120 | return 0; | 1120 | return 0; |
1121 | } | 1121 | } |
1122 | 1122 | ||
1123 | __weak int imx6_pcie_toggle_power(void) | 1123 | __weak int imx6_pcie_toggle_power(void) |
1124 | { | 1124 | { |
1125 | #ifdef CONFIG_PCIE_IMX_POWER_GPIO | 1125 | #ifdef CONFIG_PCIE_IMX_POWER_GPIO |
1126 | gpio_request(CONFIG_PCIE_IMX_POWER_GPIO, "pcie_power"); | 1126 | gpio_request(CONFIG_PCIE_IMX_POWER_GPIO, "pcie_power"); |
1127 | gpio_direction_output(CONFIG_PCIE_IMX_POWER_GPIO, 0); | 1127 | gpio_direction_output(CONFIG_PCIE_IMX_POWER_GPIO, 0); |
1128 | mdelay(20); | 1128 | mdelay(20); |
1129 | gpio_set_value(CONFIG_PCIE_IMX_POWER_GPIO, 1); | 1129 | gpio_set_value(CONFIG_PCIE_IMX_POWER_GPIO, 1); |
1130 | mdelay(20); | 1130 | mdelay(20); |
1131 | gpio_free(CONFIG_PCIE_IMX_POWER_GPIO); | 1131 | gpio_free(CONFIG_PCIE_IMX_POWER_GPIO); |
1132 | #endif | 1132 | #endif |
1133 | return 0; | 1133 | return 0; |
1134 | } | 1134 | } |
1135 | 1135 | ||
1136 | __weak int imx6_pcie_toggle_reset(void) | 1136 | __weak int imx6_pcie_toggle_reset(void) |
1137 | { | 1137 | { |
1138 | /* | 1138 | /* |
1139 | * See 'PCI EXPRESS BASE SPECIFICATION, REV 3.0, SECTION 6.6.1' | 1139 | * See 'PCI EXPRESS BASE SPECIFICATION, REV 3.0, SECTION 6.6.1' |
1140 | * for detailed understanding of the PCIe CR reset logic. | 1140 | * for detailed understanding of the PCIe CR reset logic. |
1141 | * | 1141 | * |
1142 | * The PCIe #PERST reset line _MUST_ be connected, otherwise your | 1142 | * The PCIe #PERST reset line _MUST_ be connected, otherwise your |
1143 | * design does not conform to the specification. You must wait at | 1143 | * design does not conform to the specification. You must wait at |
1144 | * least 20 ms after de-asserting the #PERST so the EP device can | 1144 | * least 20 ms after de-asserting the #PERST so the EP device can |
1145 | * do self-initialisation. | 1145 | * do self-initialisation. |
1146 | * | 1146 | * |
1147 | * In case your #PERST pin is connected to a plain GPIO pin of the | 1147 | * In case your #PERST pin is connected to a plain GPIO pin of the |
1148 | * CPU, you can define CONFIG_PCIE_IMX_PERST_GPIO in your board's | 1148 | * CPU, you can define CONFIG_PCIE_IMX_PERST_GPIO in your board's |
1149 | * configuration file and the condition below will handle the rest | 1149 | * configuration file and the condition below will handle the rest |
1150 | * of the reset toggling. | 1150 | * of the reset toggling. |
1151 | * | 1151 | * |
1152 | * In case your #PERST toggling logic is more complex, for example | 1152 | * In case your #PERST toggling logic is more complex, for example |
1153 | * connected via CPLD or somesuch, you can override this function | 1153 | * connected via CPLD or somesuch, you can override this function |
1154 | * in your board file and implement reset logic as needed. You must | 1154 | * in your board file and implement reset logic as needed. You must |
1155 | * not forget to wait at least 20 ms after de-asserting #PERST in | 1155 | * not forget to wait at least 20 ms after de-asserting #PERST in |
1156 | * this case either though. | 1156 | * this case either though. |
1157 | * | 1157 | * |
1158 | * In case your #PERST line of the PCIe EP device is not connected | 1158 | * In case your #PERST line of the PCIe EP device is not connected |
1159 | * at all, your design is broken and you should fix your design, | 1159 | * at all, your design is broken and you should fix your design, |
1160 | * otherwise you will observe problems like for example the link | 1160 | * otherwise you will observe problems like for example the link |
1161 | * not coming up after rebooting the system back from running Linux | 1161 | * not coming up after rebooting the system back from running Linux |
1162 | * that uses the PCIe as well OR the PCIe link might not come up in | 1162 | * that uses the PCIe as well OR the PCIe link might not come up in |
1163 | * Linux at all in the first place since it's in some non-reset | 1163 | * Linux at all in the first place since it's in some non-reset |
1164 | * state due to being previously used in U-Boot. | 1164 | * state due to being previously used in U-Boot. |
1165 | */ | 1165 | */ |
1166 | #ifdef CONFIG_PCIE_IMX_PERST_GPIO | 1166 | #ifdef CONFIG_PCIE_IMX_PERST_GPIO |
1167 | gpio_request(CONFIG_PCIE_IMX_PERST_GPIO, "pcie_reset"); | 1167 | gpio_request(CONFIG_PCIE_IMX_PERST_GPIO, "pcie_reset"); |
1168 | gpio_direction_output(CONFIG_PCIE_IMX_PERST_GPIO, 0); | 1168 | gpio_direction_output(CONFIG_PCIE_IMX_PERST_GPIO, 0); |
1169 | mdelay(20); | 1169 | mdelay(20); |
1170 | gpio_set_value(CONFIG_PCIE_IMX_PERST_GPIO, 1); | 1170 | gpio_set_value(CONFIG_PCIE_IMX_PERST_GPIO, 1); |
1171 | mdelay(20); | 1171 | mdelay(20); |
1172 | gpio_free(CONFIG_PCIE_IMX_PERST_GPIO); | 1172 | gpio_free(CONFIG_PCIE_IMX_PERST_GPIO); |
1173 | #else | 1173 | #else |
1174 | puts("WARNING: Make sure the PCIe #PERST line is connected!\n"); | 1174 | puts("WARNING: Make sure the PCIe #PERST line is connected!\n"); |
1175 | #endif | 1175 | #endif |
1176 | 1176 | ||
1177 | return 0; | 1177 | return 0; |
1178 | } | 1178 | } |
1179 | 1179 | ||
1180 | static int imx6_pcie_deassert_core_reset(struct imx_pcie_priv *priv) | 1180 | static int imx6_pcie_deassert_core_reset(struct imx_pcie_priv *priv) |
1181 | { | 1181 | { |
1182 | #if !CONFIG_IS_ENABLED(DM_PCI) | 1182 | #if !CONFIG_IS_ENABLED(DM_PCI) |
1183 | imx6_pcie_toggle_power(); | 1183 | imx6_pcie_toggle_power(); |
1184 | #endif | 1184 | #endif |
1185 | 1185 | ||
1186 | enable_pcie_clock(); | 1186 | enable_pcie_clock(); |
1187 | 1187 | ||
1188 | if (priv->variant == IMX6QP) | 1188 | if (priv->variant == IMX6QP) |
1189 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_PCIE_SW_RST, 0); | 1189 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_PCIE_SW_RST, 0); |
1190 | 1190 | ||
1191 | /* | 1191 | /* |
1192 | * Wait for the clock to settle a bit, when the clock are sourced | 1192 | * Wait for the clock to settle a bit, when the clock are sourced |
1193 | * from the CPU, we need about 30 ms to settle. | 1193 | * from the CPU, we need about 30 ms to settle. |
1194 | */ | 1194 | */ |
1195 | mdelay(50); | 1195 | mdelay(50); |
1196 | 1196 | ||
1197 | if (priv->variant == IMX6SX) { | 1197 | if (priv->variant == IMX6SX) { |
1198 | /* SSP_EN is not used on MX6SX anymore */ | 1198 | /* SSP_EN is not used on MX6SX anymore */ |
1199 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_TEST_POWERDOWN, 0); | 1199 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_TEST_POWERDOWN, 0); |
1200 | /* Clear PCIe PHY reset bit */ | 1200 | /* Clear PCIe PHY reset bit */ |
1201 | imx_pcie_gpr_update_bits(priv, 20, IOMUXC_GPR5_PCIE_BTNRST, 0); | 1201 | imx_pcie_gpr_update_bits(priv, 20, IOMUXC_GPR5_PCIE_BTNRST, 0); |
1202 | } else { | 1202 | } else { |
1203 | /* Enable PCIe */ | 1203 | /* Enable PCIe */ |
1204 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_TEST_POWERDOWN, 0); | 1204 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_TEST_POWERDOWN, 0); |
1205 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_REF_SSP_EN, IOMUXC_GPR1_REF_SSP_EN); | 1205 | imx_pcie_gpr_update_bits(priv, 4, IOMUXC_GPR1_REF_SSP_EN, IOMUXC_GPR1_REF_SSP_EN); |
1206 | } | 1206 | } |
1207 | 1207 | ||
1208 | #if !CONFIG_IS_ENABLED(DM_PCI) | 1208 | #if !CONFIG_IS_ENABLED(DM_PCI) |
1209 | imx6_pcie_toggle_reset(); | 1209 | imx6_pcie_toggle_reset(); |
1210 | #else | 1210 | #else |
1211 | if (dm_gpio_is_valid(&priv->reset_gpio)) { | 1211 | if (dm_gpio_is_valid(&priv->reset_gpio)) { |
1212 | dm_gpio_set_value(&priv->reset_gpio, 1); | 1212 | dm_gpio_set_value(&priv->reset_gpio, 1); |
1213 | mdelay(20); | 1213 | mdelay(20); |
1214 | dm_gpio_set_value(&priv->reset_gpio, 0); | 1214 | dm_gpio_set_value(&priv->reset_gpio, 0); |
1215 | mdelay(20); | 1215 | mdelay(20); |
1216 | } | 1216 | } |
1217 | #endif | 1217 | #endif |
1218 | 1218 | ||
1219 | return 0; | 1219 | return 0; |
1220 | } | 1220 | } |
1221 | #endif | 1221 | #endif |
1222 | 1222 | ||
1223 | static int imx_pcie_assert_core_reset(struct imx_pcie_priv *priv, | 1223 | static int imx_pcie_assert_core_reset(struct imx_pcie_priv *priv, |
1224 | bool prepare_for_boot) | 1224 | bool prepare_for_boot) |
1225 | { | 1225 | { |
1226 | switch (priv->variant) { | 1226 | switch (priv->variant) { |
1227 | #ifdef CONFIG_MX6 | 1227 | #ifdef CONFIG_MX6 |
1228 | case IMX6Q: | 1228 | case IMX6Q: |
1229 | case IMX6QP: | 1229 | case IMX6QP: |
1230 | case IMX6SX: | 1230 | case IMX6SX: |
1231 | return imx6_pcie_assert_core_reset(priv, prepare_for_boot); | 1231 | return imx6_pcie_assert_core_reset(priv, prepare_for_boot); |
1232 | #endif | 1232 | #endif |
1233 | case IMX8QM: | 1233 | case IMX8QM: |
1234 | case IMX8QXP: | 1234 | case IMX8QXP: |
1235 | return imx8_pcie_assert_core_reset(priv, prepare_for_boot); | 1235 | return imx8_pcie_assert_core_reset(priv, prepare_for_boot); |
1236 | default: | 1236 | default: |
1237 | return -EPERM; | 1237 | return -EPERM; |
1238 | } | 1238 | } |
1239 | } | 1239 | } |
1240 | 1240 | ||
1241 | static int imx_pcie_init_phy(struct imx_pcie_priv *priv) | 1241 | static int imx_pcie_init_phy(struct imx_pcie_priv *priv) |
1242 | { | 1242 | { |
1243 | switch (priv->variant) { | 1243 | switch (priv->variant) { |
1244 | #ifdef CONFIG_MX6 | 1244 | #ifdef CONFIG_MX6 |
1245 | case IMX6Q: | 1245 | case IMX6Q: |
1246 | case IMX6QP: | 1246 | case IMX6QP: |
1247 | case IMX6SX: | 1247 | case IMX6SX: |
1248 | return imx6_pcie_init_phy(priv); | 1248 | return imx6_pcie_init_phy(priv); |
1249 | #endif | 1249 | #endif |
1250 | case IMX8QM: | 1250 | case IMX8QM: |
1251 | case IMX8QXP: | 1251 | case IMX8QXP: |
1252 | return imx8_pcie_init_phy(priv); | 1252 | return imx8_pcie_init_phy(priv); |
1253 | default: | 1253 | default: |
1254 | return -EPERM; | 1254 | return -EPERM; |
1255 | } | 1255 | } |
1256 | } | 1256 | } |
1257 | 1257 | ||
1258 | static int imx_pcie_deassert_core_reset(struct imx_pcie_priv *priv) | 1258 | static int imx_pcie_deassert_core_reset(struct imx_pcie_priv *priv) |
1259 | { | 1259 | { |
1260 | switch (priv->variant) { | 1260 | switch (priv->variant) { |
1261 | #ifdef CONFIG_MX6 | 1261 | #ifdef CONFIG_MX6 |
1262 | case IMX6Q: | 1262 | case IMX6Q: |
1263 | case IMX6QP: | 1263 | case IMX6QP: |
1264 | case IMX6SX: | 1264 | case IMX6SX: |
1265 | return imx6_pcie_deassert_core_reset(priv); | 1265 | return imx6_pcie_deassert_core_reset(priv); |
1266 | #endif | 1266 | #endif |
1267 | case IMX8QM: | 1267 | case IMX8QM: |
1268 | case IMX8QXP: | 1268 | case IMX8QXP: |
1269 | return imx8_pcie_deassert_core_reset(priv); | 1269 | return imx8_pcie_deassert_core_reset(priv); |
1270 | default: | 1270 | default: |
1271 | return -EPERM; | 1271 | return -EPERM; |
1272 | } | 1272 | } |
1273 | } | 1273 | } |
1274 | 1274 | ||
1275 | static void imx_pcie_ltssm_enable(struct imx_pcie_priv *priv, bool enable) | 1275 | static void imx_pcie_ltssm_enable(struct imx_pcie_priv *priv, bool enable) |
1276 | { | 1276 | { |
1277 | u32 val; | 1277 | u32 val; |
1278 | 1278 | ||
1279 | switch (priv->variant) { | 1279 | switch (priv->variant) { |
1280 | #ifdef CONFIG_MX6 | 1280 | #ifdef CONFIG_MX6 |
1281 | case IMX6Q: | 1281 | case IMX6Q: |
1282 | case IMX6SX: | 1282 | case IMX6SX: |
1283 | case IMX6QP: | 1283 | case IMX6QP: |
1284 | if (enable) | 1284 | if (enable) |
1285 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_APPS_LTSSM_ENABLE, | 1285 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_APPS_LTSSM_ENABLE, |
1286 | IOMUXC_GPR12_APPS_LTSSM_ENABLE); /* LTSSM enable, starting link. */ | 1286 | IOMUXC_GPR12_APPS_LTSSM_ENABLE); /* LTSSM enable, starting link. */ |
1287 | else | 1287 | else |
1288 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_APPS_LTSSM_ENABLE, 0); | 1288 | imx_pcie_gpr_update_bits(priv, 48, IOMUXC_GPR12_APPS_LTSSM_ENABLE, 0); |
1289 | 1289 | ||
1290 | break; | 1290 | break; |
1291 | #endif | 1291 | #endif |
1292 | case IMX8QXP: | 1292 | case IMX8QXP: |
1293 | case IMX8QM: | 1293 | case IMX8QM: |
1294 | /* Bit4 of the CTRL2 */ | 1294 | /* Bit4 of the CTRL2 */ |
1295 | val = IMX8QM_CSR_PCIEA_OFFSET | 1295 | val = IMX8QM_CSR_PCIEA_OFFSET |
1296 | + priv->ctrl_id * SZ_64K; | 1296 | + priv->ctrl_id * SZ_64K; |
1297 | if (enable) { | 1297 | if (enable) { |
1298 | imx_pcie_gpr_update_bits(priv, | 1298 | imx_pcie_gpr_update_bits(priv, |
1299 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, | 1299 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, |
1300 | IMX8QM_CTRL_LTSSM_ENABLE, | 1300 | IMX8QM_CTRL_LTSSM_ENABLE, |
1301 | IMX8QM_CTRL_LTSSM_ENABLE); | 1301 | IMX8QM_CTRL_LTSSM_ENABLE); |
1302 | } else { | 1302 | } else { |
1303 | imx_pcie_gpr_update_bits(priv, | 1303 | imx_pcie_gpr_update_bits(priv, |
1304 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, | 1304 | val + IMX8QM_CSR_PCIE_CTRL2_OFFSET, |
1305 | IMX8QM_CTRL_LTSSM_ENABLE, | 1305 | IMX8QM_CTRL_LTSSM_ENABLE, |
1306 | 0); | 1306 | 0); |
1307 | } | 1307 | } |
1308 | break; | 1308 | break; |
1309 | default: | 1309 | default: |
1310 | break; | 1310 | break; |
1311 | } | 1311 | } |
1312 | 1312 | ||
1313 | } | 1313 | } |
1314 | 1314 | ||
1315 | 1315 | ||
1316 | static int imx_pcie_link_up(struct imx_pcie_priv *priv) | 1316 | static int imx_pcie_link_up(struct imx_pcie_priv *priv) |
1317 | { | 1317 | { |
1318 | uint32_t tmp; | 1318 | uint32_t tmp; |
1319 | int count = 0; | 1319 | int count = 0; |
1320 | 1320 | ||
1321 | imx_pcie_assert_core_reset(priv, false); | 1321 | imx_pcie_assert_core_reset(priv, false); |
1322 | imx_pcie_init_phy(priv); | 1322 | imx_pcie_init_phy(priv); |
1323 | imx_pcie_deassert_core_reset(priv); | 1323 | imx_pcie_deassert_core_reset(priv); |
1324 | 1324 | ||
1325 | imx_pcie_setup_ctrl(priv); | 1325 | imx_pcie_setup_ctrl(priv); |
1326 | imx_pcie_regions_setup(priv); | 1326 | imx_pcie_regions_setup(priv); |
1327 | 1327 | ||
1328 | /* | 1328 | /* |
1329 | * By default, the subordinate is set equally to the secondary | 1329 | * By default, the subordinate is set equally to the secondary |
1330 | * bus (0x01) when the RC boots. | 1330 | * bus (0x01) when the RC boots. |
1331 | * This means that theoretically, only bus 1 is reachable from the RC. | 1331 | * This means that theoretically, only bus 1 is reachable from the RC. |
1332 | * Force the PCIe RC subordinate to 0xff, otherwise no downstream | 1332 | * Force the PCIe RC subordinate to 0xff, otherwise no downstream |
1333 | * devices will be detected if the enumeration is applied strictly. | 1333 | * devices will be detected if the enumeration is applied strictly. |
1334 | */ | 1334 | */ |
1335 | tmp = readl(priv->dbi_base + 0x18); | 1335 | tmp = readl(priv->dbi_base + 0x18); |
1336 | tmp |= (0xff << 16); | 1336 | tmp |= (0xff << 16); |
1337 | writel(tmp, priv->dbi_base + 0x18); | 1337 | writel(tmp, priv->dbi_base + 0x18); |
1338 | 1338 | ||
1339 | /* | 1339 | /* |
1340 | * FIXME: Force the PCIe RC to Gen1 operation | 1340 | * FIXME: Force the PCIe RC to Gen1 operation |
1341 | * The RC must be forced into Gen1 mode before bringing the link | 1341 | * The RC must be forced into Gen1 mode before bringing the link |
1342 | * up, otherwise no downstream devices are detected. After the | 1342 | * up, otherwise no downstream devices are detected. After the |
1343 | * link is up, a managed Gen1->Gen2 transition can be initiated. | 1343 | * link is up, a managed Gen1->Gen2 transition can be initiated. |
1344 | */ | 1344 | */ |
1345 | tmp = readl(priv->dbi_base + 0x7c); | 1345 | tmp = readl(priv->dbi_base + 0x7c); |
1346 | tmp &= ~0xf; | 1346 | tmp &= ~0xf; |
1347 | tmp |= 0x1; | 1347 | tmp |= 0x1; |
1348 | writel(tmp, priv->dbi_base + 0x7c); | 1348 | writel(tmp, priv->dbi_base + 0x7c); |
1349 | 1349 | ||
1350 | /* LTSSM enable, starting link. */ | 1350 | /* LTSSM enable, starting link. */ |
1351 | imx_pcie_ltssm_enable(priv, true); | 1351 | imx_pcie_ltssm_enable(priv, true); |
1352 | 1352 | ||
1353 | while (!imx6_pcie_link_up(priv)) { | 1353 | while (!imx6_pcie_link_up(priv)) { |
1354 | udelay(10); | 1354 | udelay(10); |
1355 | count++; | 1355 | count++; |
1356 | if (count == 1000) { | 1356 | if (count == 1000) { |
1357 | print_regs(1); | 1357 | print_regs(1); |
1358 | /* link down, try reset ep, and re-try link here */ | 1358 | /* link down, try reset ep, and re-try link here */ |
1359 | DBGF("pcie link is down, reset ep, then retry!\n"); | 1359 | DBGF("pcie link is down, reset ep, then retry!\n"); |
1360 | 1360 | ||
1361 | #if CONFIG_IS_ENABLED(DM_PCI) | 1361 | #if CONFIG_IS_ENABLED(DM_PCI) |
1362 | if (dm_gpio_is_valid(&priv->reset_gpio)) { | 1362 | if (dm_gpio_is_valid(&priv->reset_gpio)) { |
1363 | dm_gpio_set_value(&priv->reset_gpio, 1); | 1363 | dm_gpio_set_value(&priv->reset_gpio, 1); |
1364 | mdelay(20); | 1364 | mdelay(20); |
1365 | dm_gpio_set_value(&priv->reset_gpio, 0); | 1365 | dm_gpio_set_value(&priv->reset_gpio, 0); |
1366 | mdelay(20); | 1366 | mdelay(20); |
1367 | } | 1367 | } |
1368 | #elif defined(CONFIG_MX6) | 1368 | #elif defined(CONFIG_MX6) |
1369 | imx6_pcie_toggle_reset(); | 1369 | imx6_pcie_toggle_reset(); |
1370 | #endif | 1370 | #endif |
1371 | continue; | 1371 | continue; |
1372 | } | 1372 | } |
1373 | #ifdef DEBUG | 1373 | #ifdef DEBUG |
1374 | else if (count >= 2000) { | 1374 | else if (count >= 2000) { |
1375 | print_regs(1); | 1375 | print_regs(1); |
1376 | /* link is down, stop here */ | 1376 | /* link is down, stop here */ |
1377 | env_set("bootcmd", "sleep 2;"); | 1377 | env_set("bootcmd", "sleep 2;"); |
1378 | DBGF("pcie link is down, stop here!\n"); | 1378 | DBGF("pcie link is down, stop here!\n"); |
1379 | imx_pcie_ltssm_enable(priv, false); | 1379 | imx_pcie_ltssm_enable(priv, false); |
1380 | return -EINVAL; | 1380 | return -EINVAL; |
1381 | } | 1381 | } |
1382 | #endif | 1382 | #endif |
1383 | if (count >= 4000) { | 1383 | if (count >= 4000) { |
1384 | #ifdef CONFIG_PCI_SCAN_SHOW | 1384 | #ifdef CONFIG_PCI_SCAN_SHOW |
1385 | puts("PCI: pcie phy link never came up\n"); | 1385 | puts("PCI: pcie phy link never came up\n"); |
1386 | #endif | 1386 | #endif |
1387 | debug("DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", | 1387 | debug("DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", |
1388 | readl(priv->dbi_base + PCIE_PHY_DEBUG_R0), | 1388 | readl(priv->dbi_base + PCIE_PHY_DEBUG_R0), |
1389 | readl(priv->dbi_base + PCIE_PHY_DEBUG_R1)); | 1389 | readl(priv->dbi_base + PCIE_PHY_DEBUG_R1)); |
1390 | imx_pcie_ltssm_enable(priv, false); | 1390 | imx_pcie_ltssm_enable(priv, false); |
1391 | return -EINVAL; | 1391 | return -EINVAL; |
1392 | } | 1392 | } |
1393 | } | 1393 | } |
1394 | 1394 | ||
1395 | return 0; | 1395 | return 0; |
1396 | } | 1396 | } |
1397 | 1397 | ||
1398 | #if !CONFIG_IS_ENABLED(DM_PCI) | 1398 | #if !CONFIG_IS_ENABLED(DM_PCI) |
1399 | static struct imx_pcie_priv imx_pcie_priv = { | 1399 | static struct imx_pcie_priv imx_pcie_priv = { |
1400 | .dbi_base = (void __iomem *)MX6_DBI_ADDR, | 1400 | .dbi_base = (void __iomem *)MX6_DBI_ADDR, |
1401 | .cfg_base = (void __iomem *)MX6_ROOT_ADDR, | 1401 | .cfg_base = (void __iomem *)MX6_ROOT_ADDR, |
1402 | .cfg1_base = (void __iomem *)(MX6_ROOT_ADDR + MX6_ROOT_SIZE / 2), | 1402 | .cfg1_base = (void __iomem *)(MX6_ROOT_ADDR + MX6_ROOT_SIZE / 2), |
1403 | .cfg_size = MX6_ROOT_SIZE, | 1403 | .cfg_size = MX6_ROOT_SIZE, |
1404 | .lanes = 1, | 1404 | .lanes = 1, |
1405 | }; | 1405 | }; |
1406 | 1406 | ||
1407 | static struct imx_pcie_priv *priv = &imx_pcie_priv; | 1407 | static struct imx_pcie_priv *priv = &imx_pcie_priv; |
1408 | 1408 | ||
1409 | 1409 | ||
1410 | static int imx_pcie_read_config(struct pci_controller *hose, pci_dev_t d, | 1410 | static int imx_pcie_read_config(struct pci_controller *hose, pci_dev_t d, |
1411 | int where, u32 *val) | 1411 | int where, u32 *val) |
1412 | { | 1412 | { |
1413 | struct imx_pcie_priv *priv = hose->priv_data; | 1413 | struct imx_pcie_priv *priv = hose->priv_data; |
1414 | 1414 | ||
1415 | return imx_pcie_read_cfg(priv, d, where, val); | 1415 | return imx_pcie_read_cfg(priv, d, where, val); |
1416 | } | 1416 | } |
1417 | 1417 | ||
1418 | static int imx_pcie_write_config(struct pci_controller *hose, pci_dev_t d, | 1418 | static int imx_pcie_write_config(struct pci_controller *hose, pci_dev_t d, |
1419 | int where, u32 val) | 1419 | int where, u32 val) |
1420 | { | 1420 | { |
1421 | struct imx_pcie_priv *priv = hose->priv_data; | 1421 | struct imx_pcie_priv *priv = hose->priv_data; |
1422 | 1422 | ||
1423 | return imx_pcie_write_cfg(priv, d, where, val); | 1423 | return imx_pcie_write_cfg(priv, d, where, val); |
1424 | } | 1424 | } |
1425 | 1425 | ||
1426 | void imx_pcie_init(void) | 1426 | void imx_pcie_init(void) |
1427 | { | 1427 | { |
1428 | /* Static instance of the controller. */ | 1428 | /* Static instance of the controller. */ |
1429 | static struct pci_controller pcc; | 1429 | static struct pci_controller pcc; |
1430 | struct pci_controller *hose = &pcc; | 1430 | struct pci_controller *hose = &pcc; |
1431 | int ret; | 1431 | int ret; |
1432 | #ifdef DEBUG_STRESS_WR | 1432 | #ifdef DEBUG_STRESS_WR |
1433 | u32 dbg_reg_addr = SNVS_LPGRP; | 1433 | u32 dbg_reg_addr = SNVS_LPGRP; |
1434 | u32 dbg_reg = readl(dbg_reg_addr) + 1; | 1434 | u32 dbg_reg = readl(dbg_reg_addr) + 1; |
1435 | #endif | 1435 | #endif |
1436 | 1436 | ||
1437 | memset(&pcc, 0, sizeof(pcc)); | 1437 | memset(&pcc, 0, sizeof(pcc)); |
1438 | 1438 | ||
1439 | if (is_mx6sx()) | 1439 | if (is_mx6sx()) |
1440 | priv->variant = IMX6SX; | 1440 | priv->variant = IMX6SX; |
1441 | else if (is_mx6dqp()) | 1441 | else if (is_mx6dqp()) |
1442 | priv->variant = IMX6QP; | 1442 | priv->variant = IMX6QP; |
1443 | else | 1443 | else |
1444 | priv->variant = IMX6Q; | 1444 | priv->variant = IMX6Q; |
1445 | 1445 | ||
1446 | hose->priv_data = priv; | 1446 | hose->priv_data = priv; |
1447 | 1447 | ||
1448 | /* PCI I/O space */ | 1448 | /* PCI I/O space */ |
1449 | pci_set_region(&hose->regions[0], | 1449 | pci_set_region(&hose->regions[0], |
1450 | 0, MX6_IO_ADDR, | 1450 | 0, MX6_IO_ADDR, |
1451 | MX6_IO_SIZE, PCI_REGION_IO); | 1451 | MX6_IO_SIZE, PCI_REGION_IO); |
1452 | 1452 | ||
1453 | /* PCI memory space */ | 1453 | /* PCI memory space */ |
1454 | pci_set_region(&hose->regions[1], | 1454 | pci_set_region(&hose->regions[1], |
1455 | MX6_MEM_ADDR, MX6_MEM_ADDR, | 1455 | MX6_MEM_ADDR, MX6_MEM_ADDR, |
1456 | MX6_MEM_SIZE, PCI_REGION_MEM); | 1456 | MX6_MEM_SIZE, PCI_REGION_MEM); |
1457 | 1457 | ||
1458 | /* System memory space */ | 1458 | /* System memory space */ |
1459 | pci_set_region(&hose->regions[2], | 1459 | pci_set_region(&hose->regions[2], |
1460 | MMDC0_ARB_BASE_ADDR, MMDC0_ARB_BASE_ADDR, | 1460 | MMDC0_ARB_BASE_ADDR, MMDC0_ARB_BASE_ADDR, |
1461 | 0xefffffff, PCI_REGION_MEM | PCI_REGION_SYS_MEMORY); | 1461 | 0xefffffff, PCI_REGION_MEM | PCI_REGION_SYS_MEMORY); |
1462 | 1462 | ||
1463 | priv->io = &hose->regions[0]; | 1463 | priv->io = &hose->regions[0]; |
1464 | priv->mem = &hose->regions[1]; | 1464 | priv->mem = &hose->regions[1]; |
1465 | 1465 | ||
1466 | hose->region_count = 3; | 1466 | hose->region_count = 3; |
1467 | 1467 | ||
1468 | pci_set_ops(hose, | 1468 | pci_set_ops(hose, |
1469 | pci_hose_read_config_byte_via_dword, | 1469 | pci_hose_read_config_byte_via_dword, |
1470 | pci_hose_read_config_word_via_dword, | 1470 | pci_hose_read_config_word_via_dword, |
1471 | imx_pcie_read_config, | 1471 | imx_pcie_read_config, |
1472 | pci_hose_write_config_byte_via_dword, | 1472 | pci_hose_write_config_byte_via_dword, |
1473 | pci_hose_write_config_word_via_dword, | 1473 | pci_hose_write_config_word_via_dword, |
1474 | imx_pcie_write_config); | 1474 | imx_pcie_write_config); |
1475 | 1475 | ||
1476 | /* Start the controller. */ | 1476 | /* Start the controller. */ |
1477 | ret = imx_pcie_link_up(priv); | 1477 | ret = imx_pcie_link_up(priv); |
1478 | 1478 | ||
1479 | if (!ret) { | 1479 | if (!ret) { |
1480 | pci_register_hose(hose); | 1480 | pci_register_hose(hose); |
1481 | hose->last_busno = pci_hose_scan(hose); | 1481 | hose->last_busno = pci_hose_scan(hose); |
1482 | #ifdef DEBUG_STRESS_WR | 1482 | #ifdef DEBUG_STRESS_WR |
1483 | dbg_reg += 1<<16; | 1483 | dbg_reg += 1<<16; |
1484 | #endif | 1484 | #endif |
1485 | } | 1485 | } |
1486 | #ifdef DEBUG_STRESS_WR | 1486 | #ifdef DEBUG_STRESS_WR |
1487 | writel(dbg_reg, dbg_reg_addr); | 1487 | writel(dbg_reg, dbg_reg_addr); |
1488 | DBGF("PCIe Successes/Attempts: %d/%d\n", | 1488 | DBGF("PCIe Successes/Attempts: %d/%d\n", |
1489 | dbg_reg >> 16, dbg_reg & 0xffff); | 1489 | dbg_reg >> 16, dbg_reg & 0xffff); |
1490 | #endif | 1490 | #endif |
1491 | } | 1491 | } |
1492 | 1492 | ||
1493 | void imx_pcie_remove(void) | 1493 | void imx_pcie_remove(void) |
1494 | { | 1494 | { |
1495 | imx6_pcie_assert_core_reset(priv, true); | 1495 | imx6_pcie_assert_core_reset(priv, true); |
1496 | } | 1496 | } |
1497 | 1497 | ||
1498 | /* Probe function. */ | 1498 | /* Probe function. */ |
1499 | void pci_init_board(void) | 1499 | void pci_init_board(void) |
1500 | { | 1500 | { |
1501 | imx_pcie_init(); | 1501 | imx_pcie_init(); |
1502 | } | 1502 | } |
1503 | 1503 | ||
1504 | int pci_skip_dev(struct pci_controller *hose, pci_dev_t dev) | 1504 | int pci_skip_dev(struct pci_controller *hose, pci_dev_t dev) |
1505 | { | 1505 | { |
1506 | return 0; | 1506 | return 0; |
1507 | } | 1507 | } |
1508 | 1508 | ||
1509 | #else | 1509 | #else |
1510 | static int imx_pcie_dm_read_config(const struct udevice *dev, pci_dev_t bdf, | 1510 | static int imx_pcie_dm_read_config(const struct udevice *dev, pci_dev_t bdf, |
1511 | uint offset, ulong *value, | 1511 | uint offset, ulong *value, |
1512 | enum pci_size_t size) | 1512 | enum pci_size_t size) |
1513 | { | 1513 | { |
1514 | struct imx_pcie_priv *priv = dev_get_priv(dev); | 1514 | struct imx_pcie_priv *priv = dev_get_priv(dev); |
1515 | u32 tmpval; | 1515 | u32 tmpval; |
1516 | int ret; | 1516 | int ret; |
1517 | 1517 | ||
1518 | ret = imx_pcie_read_cfg(priv, bdf, offset, &tmpval); | 1518 | ret = imx_pcie_read_cfg(priv, bdf, offset, &tmpval); |
1519 | if (ret) | 1519 | if (ret) |
1520 | return ret; | 1520 | return ret; |
1521 | 1521 | ||
1522 | *value = pci_conv_32_to_size(tmpval, offset, size); | 1522 | *value = pci_conv_32_to_size(tmpval, offset, size); |
1523 | return 0; | 1523 | return 0; |
1524 | } | 1524 | } |
1525 | 1525 | ||
1526 | static int imx_pcie_dm_write_config(struct udevice *dev, pci_dev_t bdf, | 1526 | static int imx_pcie_dm_write_config(struct udevice *dev, pci_dev_t bdf, |
1527 | uint offset, ulong value, | 1527 | uint offset, ulong value, |
1528 | enum pci_size_t size) | 1528 | enum pci_size_t size) |
1529 | { | 1529 | { |
1530 | struct imx_pcie_priv *priv = dev_get_priv(dev); | 1530 | struct imx_pcie_priv *priv = dev_get_priv(dev); |
1531 | u32 tmpval, newval; | 1531 | u32 tmpval, newval; |
1532 | int ret; | 1532 | int ret; |
1533 | 1533 | ||
1534 | ret = imx_pcie_read_cfg(priv, bdf, offset, &tmpval); | 1534 | ret = imx_pcie_read_cfg(priv, bdf, offset, &tmpval); |
1535 | if (ret) | 1535 | if (ret) |
1536 | return ret; | 1536 | return ret; |
1537 | 1537 | ||
1538 | newval = pci_conv_size_to_32(tmpval, value, offset, size); | 1538 | newval = pci_conv_size_to_32(tmpval, value, offset, size); |
1539 | return imx_pcie_write_cfg(priv, bdf, offset, newval); | 1539 | return imx_pcie_write_cfg(priv, bdf, offset, newval); |
1540 | } | 1540 | } |
1541 | 1541 | ||
1542 | static int imx_pcie_dm_probe(struct udevice *dev) | 1542 | static int imx_pcie_dm_probe(struct udevice *dev) |
1543 | { | 1543 | { |
1544 | int ret = 0; | 1544 | int ret = 0; |
1545 | struct imx_pcie_priv *priv = dev_get_priv(dev); | 1545 | struct imx_pcie_priv *priv = dev_get_priv(dev); |
1546 | 1546 | ||
1547 | #if CONFIG_IS_ENABLED(DM_REGULATOR) | 1547 | #if CONFIG_IS_ENABLED(DM_REGULATOR) |
1548 | ret = device_get_supply_regulator(dev, "epdev_on", &priv->epdev_on); | 1548 | ret = device_get_supply_regulator(dev, "epdev_on", &priv->epdev_on); |
1549 | if (ret) { | 1549 | if (ret) { |
1550 | priv->epdev_on = NULL; | 1550 | priv->epdev_on = NULL; |
1551 | dev_dbg(dev, "no epdev_on\n"); | 1551 | dev_dbg(dev, "no epdev_on\n"); |
1552 | } else { | 1552 | } else { |
1553 | ret = regulator_set_enable(priv->epdev_on, true); | 1553 | ret = regulator_set_enable(priv->epdev_on, true); |
1554 | if (ret) { | 1554 | if (ret) { |
1555 | dev_err(dev, "fail to enable epdev_on\n"); | 1555 | dev_err(dev, "fail to enable epdev_on\n"); |
1556 | return ret; | 1556 | return ret; |
1557 | } | 1557 | } |
1558 | } | 1558 | } |
1559 | 1559 | ||
1560 | mdelay(100); | 1560 | mdelay(100); |
1561 | #endif | 1561 | #endif |
1562 | 1562 | ||
1563 | /* Enable the osc clk */ | 1563 | /* Enable the osc clk */ |
1564 | ret = gpio_request_by_name(dev, "clkreq-gpio", 0, &priv->clkreq_gpio, | 1564 | ret = gpio_request_by_name(dev, "clkreq-gpio", 0, &priv->clkreq_gpio, |
1565 | (GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE)); | 1565 | (GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE)); |
1566 | if (ret) { | 1566 | if (ret) { |
1567 | dev_info(dev, "%d unable to get clkreq.\n", ret); | 1567 | dev_info(dev, "%d unable to get clkreq.\n", ret); |
1568 | } | 1568 | } |
1569 | 1569 | ||
1570 | /* enable */ | 1570 | /* enable */ |
1571 | ret = gpio_request_by_name(dev, "disable-gpio", 0, &priv->dis_gpio, | 1571 | ret = gpio_request_by_name(dev, "disable-gpio", 0, &priv->dis_gpio, |
1572 | (GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE)); | 1572 | (GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE)); |
1573 | if (ret) { | 1573 | if (ret) { |
1574 | dev_info(dev, "%d unable to get disable-gpio.\n", ret); | 1574 | dev_info(dev, "%d unable to get disable-gpio.\n", ret); |
1575 | } | 1575 | } |
1576 | 1576 | ||
1577 | /* Set to power on */ | 1577 | /* Set to power on */ |
1578 | ret = gpio_request_by_name(dev, "power-on-gpio", 0, &priv->power_on_gpio, | 1578 | ret = gpio_request_by_name(dev, "power-on-gpio", 0, &priv->power_on_gpio, |
1579 | (GPIOD_IS_OUT |GPIOD_IS_OUT_ACTIVE)); | 1579 | (GPIOD_IS_OUT |GPIOD_IS_OUT_ACTIVE)); |
1580 | if (ret) { | 1580 | if (ret) { |
1581 | dev_info(dev, "%d unable to get power-on-gpio.\n", ret); | 1581 | dev_info(dev, "%d unable to get power-on-gpio.\n", ret); |
1582 | } | 1582 | } |
1583 | 1583 | ||
1584 | /* Set to reset status */ | 1584 | /* Set to reset status */ |
1585 | ret = gpio_request_by_name(dev, "reset-gpio", 0, &priv->reset_gpio, | 1585 | ret = gpio_request_by_name(dev, "reset-gpio", 0, &priv->reset_gpio, |
1586 | (GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE)); | 1586 | (GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE)); |
1587 | if (ret) { | 1587 | if (ret) { |
1588 | dev_info(dev, "%d unable to get power-on-gpio.\n", ret); | 1588 | dev_info(dev, "%d unable to get power-on-gpio.\n", ret); |
1589 | } | 1589 | } |
1590 | 1590 | ||
1591 | #if CONFIG_IS_ENABLED(CLK) | 1591 | #if CONFIG_IS_ENABLED(CLK) |
1592 | ret = clk_get_by_name(dev, "pcie_phy", &priv->pcie_phy); | 1592 | ret = clk_get_by_name(dev, "pcie_phy", &priv->pcie_phy); |
1593 | if (ret) { | 1593 | if (ret) { |
1594 | printf("Failed to get pcie_phy clk\n"); | 1594 | printf("Failed to get pcie_phy clk\n"); |
1595 | return ret; | 1595 | return ret; |
1596 | } | 1596 | } |
1597 | 1597 | ||
1598 | ret = clk_get_by_name(dev, "pcie_bus", &priv->pcie_bus); | 1598 | ret = clk_get_by_name(dev, "pcie_bus", &priv->pcie_bus); |
1599 | if (ret) { | 1599 | if (ret) { |
1600 | printf("Failed to get pcie_bus clk\n"); | 1600 | printf("Failed to get pcie_bus clk\n"); |
1601 | return ret; | 1601 | return ret; |
1602 | } | 1602 | } |
1603 | 1603 | ||
1604 | ret = clk_get_by_name(dev, "pcie", &priv->pcie); | 1604 | ret = clk_get_by_name(dev, "pcie", &priv->pcie); |
1605 | if (ret) { | 1605 | if (ret) { |
1606 | printf("Failed to get pcie clk\n"); | 1606 | printf("Failed to get pcie clk\n"); |
1607 | return ret; | 1607 | return ret; |
1608 | } | 1608 | } |
1609 | #endif | 1609 | #endif |
1610 | 1610 | ||
1611 | if (priv->variant == IMX8QM || priv->variant == IMX8QXP) { | 1611 | if (priv->variant == IMX8QM || priv->variant == IMX8QXP) { |
1612 | #if CONFIG_IS_ENABLED(CLK) | 1612 | #if CONFIG_IS_ENABLED(CLK) |
1613 | ret = clk_get_by_name(dev, "pcie_per", &priv->pcie_per); | 1613 | ret = clk_get_by_name(dev, "pcie_per", &priv->pcie_per); |
1614 | if (ret) { | 1614 | if (ret) { |
1615 | printf("Failed to get pcie_per clk\n"); | 1615 | printf("Failed to get pcie_per clk\n"); |
1616 | return ret; | 1616 | return ret; |
1617 | } | 1617 | } |
1618 | 1618 | ||
1619 | ret = clk_get_by_name(dev, "pcie_inbound_axi", &priv->pcie_inbound_axi); | 1619 | ret = clk_get_by_name(dev, "pcie_inbound_axi", &priv->pcie_inbound_axi); |
1620 | if (ret) { | 1620 | if (ret) { |
1621 | printf("Failed to get pcie_inbound_axi clk\n"); | 1621 | printf("Failed to get pcie_inbound_axi clk\n"); |
1622 | return ret; | 1622 | return ret; |
1623 | } | 1623 | } |
1624 | #endif | 1624 | #endif |
1625 | priv->iomuxc_gpr = | 1625 | priv->iomuxc_gpr = |
1626 | syscon_regmap_lookup_by_phandle(dev, "hsio"); | 1626 | syscon_regmap_lookup_by_phandle(dev, "hsio"); |
1627 | if (IS_ERR(priv->iomuxc_gpr)) { | 1627 | if (IS_ERR(priv->iomuxc_gpr)) { |
1628 | dev_err(dev, "unable to find gpr registers\n"); | 1628 | dev_err(dev, "unable to find gpr registers\n"); |
1629 | return PTR_ERR(priv->iomuxc_gpr); | 1629 | return PTR_ERR(priv->iomuxc_gpr); |
1630 | } | 1630 | } |
1631 | } else { | 1631 | } else { |
1632 | #if CONFIG_IS_ENABLED(DM_REGULATOR) | 1632 | #if CONFIG_IS_ENABLED(DM_REGULATOR) |
1633 | if (priv->variant == IMX6QP) { | 1633 | if (priv->variant == IMX6QP) { |
1634 | ret = device_get_supply_regulator(dev, "pcie-bus", &priv->pcie_bus_regulator); | 1634 | ret = device_get_supply_regulator(dev, "pcie-bus", &priv->pcie_bus_regulator); |
1635 | if (ret) { | 1635 | if (ret) { |
1636 | dev_dbg(dev, "no pcie_bus_regulator\n"); | 1636 | dev_dbg(dev, "no pcie_bus_regulator\n"); |
1637 | priv->pcie_bus_regulator = NULL; | 1637 | priv->pcie_bus_regulator = NULL; |
1638 | } | 1638 | } |
1639 | } else if (priv->variant == IMX6SX) { | 1639 | } else if (priv->variant == IMX6SX) { |
1640 | ret = device_get_supply_regulator(dev, "pcie-phy", &priv->pcie_phy_regulator); | 1640 | ret = device_get_supply_regulator(dev, "pcie-phy", &priv->pcie_phy_regulator); |
1641 | if (ret) { | 1641 | if (ret) { |
1642 | dev_dbg(dev, "no pcie_phy_regulator\n"); | 1642 | dev_dbg(dev, "no pcie_phy_regulator\n"); |
1643 | priv->pcie_phy_regulator = NULL; | 1643 | priv->pcie_phy_regulator = NULL; |
1644 | } | 1644 | } |
1645 | } | 1645 | } |
1646 | #endif | 1646 | #endif |
1647 | 1647 | ||
1648 | priv->iomuxc_gpr = | 1648 | priv->iomuxc_gpr = |
1649 | syscon_regmap_lookup_by_phandle(dev, "gpr"); | 1649 | syscon_regmap_lookup_by_phandle(dev, "gpr"); |
1650 | if (IS_ERR(priv->iomuxc_gpr)) { | 1650 | if (IS_ERR(priv->iomuxc_gpr)) { |
1651 | dev_err(dev, "unable to find gpr registers\n"); | 1651 | dev_err(dev, "unable to find gpr registers\n"); |
1652 | return PTR_ERR(priv->iomuxc_gpr); | 1652 | return PTR_ERR(priv->iomuxc_gpr); |
1653 | } | 1653 | } |
1654 | } | 1654 | } |
1655 | 1655 | ||
1656 | pci_get_regions(dev, &priv->io, &priv->mem, &priv->pref); | 1656 | pci_get_regions(dev, &priv->io, &priv->mem, &priv->pref); |
1657 | 1657 | ||
1658 | if (priv->cpu_base) | 1658 | if (priv->cpu_base) |
1659 | priv->cpu_addr_offset = priv->cpu_base | 1659 | priv->cpu_addr_offset = priv->cpu_base |
1660 | - priv->mem->phys_start; | 1660 | - priv->mem->phys_start; |
1661 | else | 1661 | else |
1662 | priv->cpu_addr_offset = 0; | 1662 | priv->cpu_addr_offset = 0; |
1663 | 1663 | ||
1664 | return imx_pcie_link_up(priv); | 1664 | return imx_pcie_link_up(priv); |
1665 | } | 1665 | } |
1666 | 1666 | ||
1667 | static int imx_pcie_dm_remove(struct udevice *dev) | 1667 | static int imx_pcie_dm_remove(struct udevice *dev) |
1668 | { | 1668 | { |
1669 | struct imx_pcie_priv *priv = dev_get_priv(dev); | 1669 | struct imx_pcie_priv *priv = dev_get_priv(dev); |
1670 | 1670 | ||
1671 | imx_pcie_assert_core_reset(priv, true); | 1671 | imx_pcie_assert_core_reset(priv, true); |
1672 | 1672 | ||
1673 | return 0; | 1673 | return 0; |
1674 | } | 1674 | } |
1675 | 1675 | ||
1676 | static int imx_pcie_ofdata_to_platdata(struct udevice *dev) | 1676 | static int imx_pcie_ofdata_to_platdata(struct udevice *dev) |
1677 | { | 1677 | { |
1678 | struct imx_pcie_priv *priv = dev_get_priv(dev); | 1678 | struct imx_pcie_priv *priv = dev_get_priv(dev); |
1679 | int ret; | 1679 | int ret; |
1680 | struct resource cfg_res; | 1680 | struct resource cfg_res; |
1681 | 1681 | ||
1682 | priv->dbi_base = (void __iomem *)devfdt_get_addr_index(dev, 0); | 1682 | priv->dbi_base = (void __iomem *)devfdt_get_addr_index(dev, 0); |
1683 | if (!priv->dbi_base) | 1683 | if (!priv->dbi_base) |
1684 | return -EINVAL; | 1684 | return -EINVAL; |
1685 | 1685 | ||
1686 | ret = dev_read_resource_byname(dev, "config", &cfg_res); | 1686 | ret = dev_read_resource_byname(dev, "config", &cfg_res); |
1687 | if (ret) { | 1687 | if (ret) { |
1688 | printf("can't get config resource(ret = %d)\n", ret); | 1688 | printf("can't get config resource(ret = %d)\n", ret); |
1689 | return -ENOMEM; | 1689 | return -ENOMEM; |
1690 | } | 1690 | } |
1691 | 1691 | ||
1692 | priv->cfg_base = map_physmem(cfg_res.start, | 1692 | priv->cfg_base = map_physmem(cfg_res.start, |
1693 | resource_size(&cfg_res), | 1693 | resource_size(&cfg_res), |
1694 | MAP_NOCACHE); | 1694 | MAP_NOCACHE); |
1695 | priv->cfg1_base = priv->cfg_base + resource_size(&cfg_res) / 2; | 1695 | priv->cfg1_base = priv->cfg_base + resource_size(&cfg_res) / 2; |
1696 | priv->cfg_size = resource_size(&cfg_res); | 1696 | priv->cfg_size = resource_size(&cfg_res); |
1697 | 1697 | ||
1698 | priv->variant = (enum imx_pcie_variants)dev_get_driver_data(dev); | 1698 | priv->variant = (enum imx_pcie_variants)dev_get_driver_data(dev); |
1699 | 1699 | ||
1700 | if (dev_read_u32u(dev, "hsio-cfg", &priv->hsio_cfg)) | 1700 | if (dev_read_u32u(dev, "hsio-cfg", &priv->hsio_cfg)) |
1701 | priv->hsio_cfg = 0; | 1701 | priv->hsio_cfg = 0; |
1702 | 1702 | ||
1703 | if (dev_read_u32u(dev, "ctrl-id", &priv->ctrl_id)) | 1703 | if (dev_read_u32u(dev, "ctrl-id", &priv->ctrl_id)) |
1704 | priv->ctrl_id = 0; | 1704 | priv->ctrl_id = 0; |
1705 | 1705 | ||
1706 | if (dev_read_u32u(dev, "ext_osc", &priv->ext_osc)) | 1706 | if (dev_read_u32u(dev, "ext_osc", &priv->ext_osc)) |
1707 | priv->ext_osc = 0; | 1707 | priv->ext_osc = 0; |
1708 | 1708 | ||
1709 | if (dev_read_u32u(dev, "cpu-base-addr", &priv->cpu_base)) | 1709 | if (dev_read_u32u(dev, "cpu-base-addr", &priv->cpu_base)) |
1710 | priv->cpu_base = 0; | 1710 | priv->cpu_base = 0; |
1711 | 1711 | ||
1712 | if (dev_read_u32u(dev, "num-lanes", &priv->lanes)) | 1712 | if (dev_read_u32u(dev, "num-lanes", &priv->lanes)) |
1713 | priv->lanes = 1; | 1713 | priv->lanes = 1; |
1714 | 1714 | ||
1715 | debug("hsio-cfg %u, ctrl-id %u, ext_osc %u, cpu-base 0x%x\n", | 1715 | debug("hsio-cfg %u, ctrl-id %u, ext_osc %u, cpu-base 0x%x\n", |
1716 | priv->hsio_cfg, priv->ctrl_id, priv->ext_osc, priv->cpu_base); | 1716 | priv->hsio_cfg, priv->ctrl_id, priv->ext_osc, priv->cpu_base); |
1717 | 1717 | ||
1718 | return 0; | 1718 | return 0; |
1719 | } | 1719 | } |
1720 | 1720 | ||
1721 | static const struct dm_pci_ops imx_pcie_ops = { | 1721 | static const struct dm_pci_ops imx_pcie_ops = { |
1722 | .read_config = imx_pcie_dm_read_config, | 1722 | .read_config = imx_pcie_dm_read_config, |
1723 | .write_config = imx_pcie_dm_write_config, | 1723 | .write_config = imx_pcie_dm_write_config, |
1724 | }; | 1724 | }; |
1725 | 1725 | ||
1726 | static const struct udevice_id imx_pcie_ids[] = { | 1726 | static const struct udevice_id imx_pcie_ids[] = { |
1727 | { .compatible = "fsl,imx6q-pcie", .data = (ulong)IMX6Q, }, | 1727 | { .compatible = "fsl,imx6q-pcie", .data = (ulong)IMX6Q, }, |
1728 | { .compatible = "fsl,imx6sx-pcie", .data = (ulong)IMX6SX, }, | 1728 | { .compatible = "fsl,imx6sx-pcie", .data = (ulong)IMX6SX, }, |
1729 | { .compatible = "fsl,imx6qp-pcie", .data = (ulong)IMX6QP, }, | 1729 | { .compatible = "fsl,imx6qp-pcie", .data = (ulong)IMX6QP, }, |
1730 | { .compatible = "fsl,imx8qm-pcie", .data = (ulong)IMX8QM, }, | 1730 | { .compatible = "fsl,imx8qm-pcie", .data = (ulong)IMX8QM, }, |
1731 | { .compatible = "fsl,imx8qxp-pcie", .data = (ulong)IMX8QXP, }, | 1731 | { .compatible = "fsl,imx8qxp-pcie", .data = (ulong)IMX8QXP, }, |
1732 | { } | 1732 | { } |
1733 | }; | 1733 | }; |
1734 | 1734 | ||
1735 | U_BOOT_DRIVER(imx_pcie) = { | 1735 | U_BOOT_DRIVER(imx_pcie) = { |
1736 | .name = "imx_pcie", | 1736 | .name = "imx_pcie", |
1737 | .id = UCLASS_PCI, | 1737 | .id = UCLASS_PCI, |
1738 | .of_match = imx_pcie_ids, | 1738 | .of_match = imx_pcie_ids, |
1739 | .ops = &imx_pcie_ops, | 1739 | .ops = &imx_pcie_ops, |
1740 | .probe = imx_pcie_dm_probe, | 1740 | .probe = imx_pcie_dm_probe, |
1741 | .remove = imx_pcie_dm_remove, | 1741 | .remove = imx_pcie_dm_remove, |
1742 | .ofdata_to_platdata = imx_pcie_ofdata_to_platdata, | 1742 | .ofdata_to_platdata = imx_pcie_ofdata_to_platdata, |
1743 | .priv_auto_alloc_size = sizeof(struct imx_pcie_priv), | 1743 | .priv_auto_alloc_size = sizeof(struct imx_pcie_priv), |
1744 | .flags = DM_FLAG_OS_PREPARE, | 1744 | .flags = DM_FLAG_OS_PREPARE, |
1745 | }; | 1745 | }; |
1746 | #endif | 1746 | #endif |
1747 | 1747 |