Commit 8d9e53b93de7383d5bb4b3507f146bfcd83c6e5d
1 parent
eafa5c8a10
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
tile PCI RC: use proper accessor function
Using the low-level hv_dev_pread() API makes assumptions about the layout of datastructures in the Tilera hypervisor API; it's better to use the gxio_XXX accessor and the pcie_trio_ports_property struct. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Showing 1 changed file with 11 additions and 13 deletions Inline Diff
arch/tile/kernel/pci_gx.c
1 | /* | 1 | /* |
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | 2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU General Public License | 5 | * modify it under the terms of the GNU General Public License |
6 | * as published by the Free Software Foundation, version 2. | 6 | * as published by the Free Software Foundation, version 2. |
7 | * | 7 | * |
8 | * This program is distributed in the hope that it will be useful, but | 8 | * This program is distributed in the hope that it will be useful, but |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | 10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
11 | * NON INFRINGEMENT. See the GNU General Public License for | 11 | * NON INFRINGEMENT. See the GNU General Public License for |
12 | * more details. | 12 | * more details. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/mmzone.h> | 16 | #include <linux/mmzone.h> |
17 | #include <linux/pci.h> | 17 | #include <linux/pci.h> |
18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
19 | #include <linux/string.h> | 19 | #include <linux/string.h> |
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/capability.h> | 21 | #include <linux/capability.h> |
22 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
23 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
24 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
25 | #include <linux/msi.h> | 25 | #include <linux/msi.h> |
26 | #include <linux/io.h> | 26 | #include <linux/io.h> |
27 | #include <linux/uaccess.h> | 27 | #include <linux/uaccess.h> |
28 | #include <linux/ctype.h> | 28 | #include <linux/ctype.h> |
29 | 29 | ||
30 | #include <asm/processor.h> | 30 | #include <asm/processor.h> |
31 | #include <asm/sections.h> | 31 | #include <asm/sections.h> |
32 | #include <asm/byteorder.h> | 32 | #include <asm/byteorder.h> |
33 | 33 | ||
34 | #include <gxio/iorpc_globals.h> | 34 | #include <gxio/iorpc_globals.h> |
35 | #include <gxio/kiorpc.h> | 35 | #include <gxio/kiorpc.h> |
36 | #include <gxio/trio.h> | 36 | #include <gxio/trio.h> |
37 | #include <gxio/iorpc_trio.h> | 37 | #include <gxio/iorpc_trio.h> |
38 | #include <hv/drv_trio_intf.h> | 38 | #include <hv/drv_trio_intf.h> |
39 | 39 | ||
40 | #include <arch/sim.h> | 40 | #include <arch/sim.h> |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * This file containes the routines to search for PCI buses, | 43 | * This file containes the routines to search for PCI buses, |
44 | * enumerate the buses, and configure any attached devices. | 44 | * enumerate the buses, and configure any attached devices. |
45 | */ | 45 | */ |
46 | 46 | ||
47 | #define DEBUG_PCI_CFG 0 | 47 | #define DEBUG_PCI_CFG 0 |
48 | 48 | ||
49 | #if DEBUG_PCI_CFG | 49 | #if DEBUG_PCI_CFG |
50 | #define TRACE_CFG_WR(size, val, bus, dev, func, offset) \ | 50 | #define TRACE_CFG_WR(size, val, bus, dev, func, offset) \ |
51 | pr_info("CFG WR %d-byte VAL %#x to bus %d dev %d func %d addr %u\n", \ | 51 | pr_info("CFG WR %d-byte VAL %#x to bus %d dev %d func %d addr %u\n", \ |
52 | size, val, bus, dev, func, offset & 0xFFF); | 52 | size, val, bus, dev, func, offset & 0xFFF); |
53 | #define TRACE_CFG_RD(size, val, bus, dev, func, offset) \ | 53 | #define TRACE_CFG_RD(size, val, bus, dev, func, offset) \ |
54 | pr_info("CFG RD %d-byte VAL %#x from bus %d dev %d func %d addr %u\n", \ | 54 | pr_info("CFG RD %d-byte VAL %#x from bus %d dev %d func %d addr %u\n", \ |
55 | size, val, bus, dev, func, offset & 0xFFF); | 55 | size, val, bus, dev, func, offset & 0xFFF); |
56 | #else | 56 | #else |
57 | #define TRACE_CFG_WR(...) | 57 | #define TRACE_CFG_WR(...) |
58 | #define TRACE_CFG_RD(...) | 58 | #define TRACE_CFG_RD(...) |
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | static int pci_probe = 1; | 61 | static int pci_probe = 1; |
62 | 62 | ||
63 | /* Information on the PCIe RC ports configuration. */ | 63 | /* Information on the PCIe RC ports configuration. */ |
64 | static int pcie_rc[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES]; | 64 | static int pcie_rc[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES]; |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * On some platforms with one or more Gx endpoint ports, we need to | 67 | * On some platforms with one or more Gx endpoint ports, we need to |
68 | * delay the PCIe RC port probe for a few seconds to work around | 68 | * delay the PCIe RC port probe for a few seconds to work around |
69 | * a HW PCIe link-training bug. The exact delay is specified with | 69 | * a HW PCIe link-training bug. The exact delay is specified with |
70 | * a kernel boot argument in the form of "pcie_rc_delay=T,P,S", | 70 | * a kernel boot argument in the form of "pcie_rc_delay=T,P,S", |
71 | * where T is the TRIO instance number, P is the port number and S is | 71 | * where T is the TRIO instance number, P is the port number and S is |
72 | * the delay in seconds. If the argument is specified, but the delay is | 72 | * the delay in seconds. If the argument is specified, but the delay is |
73 | * not provided, the value will be DEFAULT_RC_DELAY. | 73 | * not provided, the value will be DEFAULT_RC_DELAY. |
74 | */ | 74 | */ |
75 | static int rc_delay[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES]; | 75 | static int rc_delay[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES]; |
76 | 76 | ||
77 | /* Default number of seconds that the PCIe RC port probe can be delayed. */ | 77 | /* Default number of seconds that the PCIe RC port probe can be delayed. */ |
78 | #define DEFAULT_RC_DELAY 10 | 78 | #define DEFAULT_RC_DELAY 10 |
79 | 79 | ||
80 | /* The PCI I/O space size in each PCI domain. */ | 80 | /* The PCI I/O space size in each PCI domain. */ |
81 | #define IO_SPACE_SIZE 0x10000 | 81 | #define IO_SPACE_SIZE 0x10000 |
82 | 82 | ||
83 | /* Provide shorter versions of some very long constant names. */ | 83 | /* Provide shorter versions of some very long constant names. */ |
84 | #define AUTO_CONFIG_RC \ | 84 | #define AUTO_CONFIG_RC \ |
85 | TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC | 85 | TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC |
86 | #define AUTO_CONFIG_RC_G1 \ | 86 | #define AUTO_CONFIG_RC_G1 \ |
87 | TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1 | 87 | TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1 |
88 | #define AUTO_CONFIG_EP \ | 88 | #define AUTO_CONFIG_EP \ |
89 | TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT | 89 | TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT |
90 | #define AUTO_CONFIG_EP_G1 \ | 90 | #define AUTO_CONFIG_EP_G1 \ |
91 | TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1 | 91 | TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1 |
92 | 92 | ||
93 | /* Array of the PCIe ports configuration info obtained from the BIB. */ | 93 | /* Array of the PCIe ports configuration info obtained from the BIB. */ |
94 | struct pcie_port_property pcie_ports[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES]; | 94 | struct pcie_trio_ports_property pcie_ports[TILEGX_NUM_TRIO]; |
95 | 95 | ||
96 | /* Number of configured TRIO instances. */ | 96 | /* Number of configured TRIO instances. */ |
97 | int num_trio_shims; | 97 | int num_trio_shims; |
98 | 98 | ||
99 | /* All drivers share the TRIO contexts defined here. */ | 99 | /* All drivers share the TRIO contexts defined here. */ |
100 | gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO]; | 100 | gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO]; |
101 | 101 | ||
102 | /* Pointer to an array of PCIe RC controllers. */ | 102 | /* Pointer to an array of PCIe RC controllers. */ |
103 | struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES]; | 103 | struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES]; |
104 | int num_rc_controllers; | 104 | int num_rc_controllers; |
105 | 105 | ||
106 | static struct pci_ops tile_cfg_ops; | 106 | static struct pci_ops tile_cfg_ops; |
107 | 107 | ||
108 | /* Mask of CPUs that should receive PCIe interrupts. */ | 108 | /* Mask of CPUs that should receive PCIe interrupts. */ |
109 | static struct cpumask intr_cpus_map; | 109 | static struct cpumask intr_cpus_map; |
110 | 110 | ||
111 | /* We don't need to worry about the alignment of resources. */ | 111 | /* We don't need to worry about the alignment of resources. */ |
112 | resource_size_t pcibios_align_resource(void *data, const struct resource *res, | 112 | resource_size_t pcibios_align_resource(void *data, const struct resource *res, |
113 | resource_size_t size, | 113 | resource_size_t size, |
114 | resource_size_t align) | 114 | resource_size_t align) |
115 | { | 115 | { |
116 | return res->start; | 116 | return res->start; |
117 | } | 117 | } |
118 | EXPORT_SYMBOL(pcibios_align_resource); | 118 | EXPORT_SYMBOL(pcibios_align_resource); |
119 | 119 | ||
120 | /* | 120 | /* |
121 | * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #. | 121 | * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #. |
122 | * For now, we simply send interrupts to non-dataplane CPUs. | 122 | * For now, we simply send interrupts to non-dataplane CPUs. |
123 | * We may implement methods to allow user to specify the target CPUs, | 123 | * We may implement methods to allow user to specify the target CPUs, |
124 | * e.g. via boot arguments. | 124 | * e.g. via boot arguments. |
125 | */ | 125 | */ |
126 | static int tile_irq_cpu(int irq) | 126 | static int tile_irq_cpu(int irq) |
127 | { | 127 | { |
128 | unsigned int count; | 128 | unsigned int count; |
129 | int i = 0; | 129 | int i = 0; |
130 | int cpu; | 130 | int cpu; |
131 | 131 | ||
132 | count = cpumask_weight(&intr_cpus_map); | 132 | count = cpumask_weight(&intr_cpus_map); |
133 | if (unlikely(count == 0)) { | 133 | if (unlikely(count == 0)) { |
134 | pr_warning("intr_cpus_map empty, interrupts will be" | 134 | pr_warning("intr_cpus_map empty, interrupts will be" |
135 | " delievered to dataplane tiles\n"); | 135 | " delievered to dataplane tiles\n"); |
136 | return irq % (smp_height * smp_width); | 136 | return irq % (smp_height * smp_width); |
137 | } | 137 | } |
138 | 138 | ||
139 | count = irq % count; | 139 | count = irq % count; |
140 | for_each_cpu(cpu, &intr_cpus_map) { | 140 | for_each_cpu(cpu, &intr_cpus_map) { |
141 | if (i++ == count) | 141 | if (i++ == count) |
142 | break; | 142 | break; |
143 | } | 143 | } |
144 | return cpu; | 144 | return cpu; |
145 | } | 145 | } |
146 | 146 | ||
147 | /* Open a file descriptor to the TRIO shim. */ | 147 | /* Open a file descriptor to the TRIO shim. */ |
148 | static int tile_pcie_open(int trio_index) | 148 | static int tile_pcie_open(int trio_index) |
149 | { | 149 | { |
150 | gxio_trio_context_t *context = &trio_contexts[trio_index]; | 150 | gxio_trio_context_t *context = &trio_contexts[trio_index]; |
151 | int ret; | 151 | int ret; |
152 | int mac; | 152 | int mac; |
153 | 153 | ||
154 | /* This opens a file descriptor to the TRIO shim. */ | 154 | /* This opens a file descriptor to the TRIO shim. */ |
155 | ret = gxio_trio_init(context, trio_index); | 155 | ret = gxio_trio_init(context, trio_index); |
156 | if (ret < 0) | 156 | if (ret < 0) |
157 | goto gxio_trio_init_failure; | 157 | goto gxio_trio_init_failure; |
158 | 158 | ||
159 | /* Allocate an ASID for the kernel. */ | 159 | /* Allocate an ASID for the kernel. */ |
160 | ret = gxio_trio_alloc_asids(context, 1, 0, 0); | 160 | ret = gxio_trio_alloc_asids(context, 1, 0, 0); |
161 | if (ret < 0) { | 161 | if (ret < 0) { |
162 | pr_err("PCI: ASID alloc failure on TRIO %d, give up\n", | 162 | pr_err("PCI: ASID alloc failure on TRIO %d, give up\n", |
163 | trio_index); | 163 | trio_index); |
164 | goto asid_alloc_failure; | 164 | goto asid_alloc_failure; |
165 | } | 165 | } |
166 | 166 | ||
167 | context->asid = ret; | 167 | context->asid = ret; |
168 | 168 | ||
169 | #ifdef USE_SHARED_PCIE_CONFIG_REGION | 169 | #ifdef USE_SHARED_PCIE_CONFIG_REGION |
170 | /* | 170 | /* |
171 | * Alloc a PIO region for config access, shared by all MACs per TRIO. | 171 | * Alloc a PIO region for config access, shared by all MACs per TRIO. |
172 | * This shouldn't fail since the kernel is supposed to the first | 172 | * This shouldn't fail since the kernel is supposed to the first |
173 | * client of the TRIO's PIO regions. | 173 | * client of the TRIO's PIO regions. |
174 | */ | 174 | */ |
175 | ret = gxio_trio_alloc_pio_regions(context, 1, 0, 0); | 175 | ret = gxio_trio_alloc_pio_regions(context, 1, 0, 0); |
176 | if (ret < 0) { | 176 | if (ret < 0) { |
177 | pr_err("PCI: CFG PIO alloc failure on TRIO %d, give up\n", | 177 | pr_err("PCI: CFG PIO alloc failure on TRIO %d, give up\n", |
178 | trio_index); | 178 | trio_index); |
179 | goto pio_alloc_failure; | 179 | goto pio_alloc_failure; |
180 | } | 180 | } |
181 | 181 | ||
182 | context->pio_cfg_index = ret; | 182 | context->pio_cfg_index = ret; |
183 | 183 | ||
184 | /* | 184 | /* |
185 | * For PIO CFG, the bus_address_hi parameter is 0. The mac parameter | 185 | * For PIO CFG, the bus_address_hi parameter is 0. The mac parameter |
186 | * is also 0 because it is specified in PIO_REGION_SETUP_CFG_ADDR. | 186 | * is also 0 because it is specified in PIO_REGION_SETUP_CFG_ADDR. |
187 | */ | 187 | */ |
188 | ret = gxio_trio_init_pio_region_aux(context, context->pio_cfg_index, | 188 | ret = gxio_trio_init_pio_region_aux(context, context->pio_cfg_index, |
189 | 0, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE); | 189 | 0, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE); |
190 | if (ret < 0) { | 190 | if (ret < 0) { |
191 | pr_err("PCI: CFG PIO init failure on TRIO %d, give up\n", | 191 | pr_err("PCI: CFG PIO init failure on TRIO %d, give up\n", |
192 | trio_index); | 192 | trio_index); |
193 | goto pio_alloc_failure; | 193 | goto pio_alloc_failure; |
194 | } | 194 | } |
195 | #endif | 195 | #endif |
196 | 196 | ||
197 | /* Get the properties of the PCIe ports on this TRIO instance. */ | 197 | /* Get the properties of the PCIe ports on this TRIO instance. */ |
198 | ret = hv_dev_pread(context->fd, 0, | 198 | ret = gxio_trio_get_port_property(context, &pcie_ports[trio_index]); |
199 | (HV_VirtAddr)&pcie_ports[trio_index][0], | ||
200 | sizeof(struct pcie_port_property) * TILEGX_TRIO_PCIES, | ||
201 | GXIO_TRIO_OP_GET_PORT_PROPERTY); | ||
202 | if (ret < 0) { | 199 | if (ret < 0) { |
203 | pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d," | 200 | pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d," |
204 | " on TRIO %d\n", ret, trio_index); | 201 | " on TRIO %d\n", ret, trio_index); |
205 | goto get_port_property_failure; | 202 | goto get_port_property_failure; |
206 | } | 203 | } |
207 | 204 | ||
208 | context->mmio_base_mac = | 205 | context->mmio_base_mac = |
209 | iorpc_ioremap(context->fd, 0, HV_TRIO_CONFIG_IOREMAP_SIZE); | 206 | iorpc_ioremap(context->fd, 0, HV_TRIO_CONFIG_IOREMAP_SIZE); |
210 | if (context->mmio_base_mac == NULL) { | 207 | if (context->mmio_base_mac == NULL) { |
211 | pr_err("PCI: TRIO config space mapping failure, error %d," | 208 | pr_err("PCI: TRIO config space mapping failure, error %d," |
212 | " on TRIO %d\n", ret, trio_index); | 209 | " on TRIO %d\n", ret, trio_index); |
213 | ret = -ENOMEM; | 210 | ret = -ENOMEM; |
214 | 211 | ||
215 | goto trio_mmio_mapping_failure; | 212 | goto trio_mmio_mapping_failure; |
216 | } | 213 | } |
217 | 214 | ||
218 | /* Check the port strap state which will override the BIB setting. */ | 215 | /* Check the port strap state which will override the BIB setting. */ |
219 | for (mac = 0; mac < TILEGX_TRIO_PCIES; mac++) { | 216 | for (mac = 0; mac < TILEGX_TRIO_PCIES; mac++) { |
220 | TRIO_PCIE_INTFC_PORT_CONFIG_t port_config; | 217 | TRIO_PCIE_INTFC_PORT_CONFIG_t port_config; |
221 | unsigned int reg_offset; | 218 | unsigned int reg_offset; |
222 | 219 | ||
223 | /* Ignore ports that are not specified in the BIB. */ | 220 | /* Ignore ports that are not specified in the BIB. */ |
224 | if (!pcie_ports[trio_index][mac].allow_rc && | 221 | if (!pcie_ports[trio_index].ports[mac].allow_rc && |
225 | !pcie_ports[trio_index][mac].allow_ep) | 222 | !pcie_ports[trio_index].ports[mac].allow_ep) |
226 | continue; | 223 | continue; |
227 | 224 | ||
228 | reg_offset = | 225 | reg_offset = |
229 | (TRIO_PCIE_INTFC_PORT_CONFIG << | 226 | (TRIO_PCIE_INTFC_PORT_CONFIG << |
230 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | 227 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | |
231 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << | 228 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << |
232 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT) | | 229 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT) | |
233 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | 230 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); |
234 | 231 | ||
235 | port_config.word = | 232 | port_config.word = |
236 | __gxio_mmio_read(context->mmio_base_mac + reg_offset); | 233 | __gxio_mmio_read(context->mmio_base_mac + reg_offset); |
237 | 234 | ||
238 | if (port_config.strap_state != AUTO_CONFIG_RC && | 235 | if (port_config.strap_state != AUTO_CONFIG_RC && |
239 | port_config.strap_state != AUTO_CONFIG_RC_G1) { | 236 | port_config.strap_state != AUTO_CONFIG_RC_G1) { |
240 | /* | 237 | /* |
241 | * If this is really intended to be an EP port, record | 238 | * If this is really intended to be an EP port, record |
242 | * it so that the endpoint driver will know about it. | 239 | * it so that the endpoint driver will know about it. |
243 | */ | 240 | */ |
244 | if (port_config.strap_state == AUTO_CONFIG_EP || | 241 | if (port_config.strap_state == AUTO_CONFIG_EP || |
245 | port_config.strap_state == AUTO_CONFIG_EP_G1) | 242 | port_config.strap_state == AUTO_CONFIG_EP_G1) |
246 | pcie_ports[trio_index][mac].allow_ep = 1; | 243 | pcie_ports[trio_index].ports[mac].allow_ep = 1; |
247 | } | 244 | } |
248 | } | 245 | } |
249 | 246 | ||
250 | return ret; | 247 | return ret; |
251 | 248 | ||
252 | trio_mmio_mapping_failure: | 249 | trio_mmio_mapping_failure: |
253 | get_port_property_failure: | 250 | get_port_property_failure: |
254 | asid_alloc_failure: | 251 | asid_alloc_failure: |
255 | #ifdef USE_SHARED_PCIE_CONFIG_REGION | 252 | #ifdef USE_SHARED_PCIE_CONFIG_REGION |
256 | pio_alloc_failure: | 253 | pio_alloc_failure: |
257 | #endif | 254 | #endif |
258 | hv_dev_close(context->fd); | 255 | hv_dev_close(context->fd); |
259 | gxio_trio_init_failure: | 256 | gxio_trio_init_failure: |
260 | context->fd = -1; | 257 | context->fd = -1; |
261 | 258 | ||
262 | return ret; | 259 | return ret; |
263 | } | 260 | } |
264 | 261 | ||
265 | static int __init tile_trio_init(void) | 262 | static int __init tile_trio_init(void) |
266 | { | 263 | { |
267 | int i; | 264 | int i; |
268 | 265 | ||
269 | /* We loop over all the TRIO shims. */ | 266 | /* We loop over all the TRIO shims. */ |
270 | for (i = 0; i < TILEGX_NUM_TRIO; i++) { | 267 | for (i = 0; i < TILEGX_NUM_TRIO; i++) { |
271 | if (tile_pcie_open(i) < 0) | 268 | if (tile_pcie_open(i) < 0) |
272 | continue; | 269 | continue; |
273 | num_trio_shims++; | 270 | num_trio_shims++; |
274 | } | 271 | } |
275 | 272 | ||
276 | return 0; | 273 | return 0; |
277 | } | 274 | } |
278 | postcore_initcall(tile_trio_init); | 275 | postcore_initcall(tile_trio_init); |
279 | 276 | ||
280 | static void tilegx_legacy_irq_ack(struct irq_data *d) | 277 | static void tilegx_legacy_irq_ack(struct irq_data *d) |
281 | { | 278 | { |
282 | __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq); | 279 | __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq); |
283 | } | 280 | } |
284 | 281 | ||
285 | static void tilegx_legacy_irq_mask(struct irq_data *d) | 282 | static void tilegx_legacy_irq_mask(struct irq_data *d) |
286 | { | 283 | { |
287 | __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq); | 284 | __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq); |
288 | } | 285 | } |
289 | 286 | ||
290 | static void tilegx_legacy_irq_unmask(struct irq_data *d) | 287 | static void tilegx_legacy_irq_unmask(struct irq_data *d) |
291 | { | 288 | { |
292 | __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq); | 289 | __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq); |
293 | } | 290 | } |
294 | 291 | ||
295 | static struct irq_chip tilegx_legacy_irq_chip = { | 292 | static struct irq_chip tilegx_legacy_irq_chip = { |
296 | .name = "tilegx_legacy_irq", | 293 | .name = "tilegx_legacy_irq", |
297 | .irq_ack = tilegx_legacy_irq_ack, | 294 | .irq_ack = tilegx_legacy_irq_ack, |
298 | .irq_mask = tilegx_legacy_irq_mask, | 295 | .irq_mask = tilegx_legacy_irq_mask, |
299 | .irq_unmask = tilegx_legacy_irq_unmask, | 296 | .irq_unmask = tilegx_legacy_irq_unmask, |
300 | 297 | ||
301 | /* TBD: support set_affinity. */ | 298 | /* TBD: support set_affinity. */ |
302 | }; | 299 | }; |
303 | 300 | ||
304 | /* | 301 | /* |
305 | * This is a wrapper function of the kernel level-trigger interrupt | 302 | * This is a wrapper function of the kernel level-trigger interrupt |
306 | * handler handle_level_irq() for PCI legacy interrupts. The TRIO | 303 | * handler handle_level_irq() for PCI legacy interrupts. The TRIO |
307 | * is configured such that only INTx Assert interrupts are proxied | 304 | * is configured such that only INTx Assert interrupts are proxied |
308 | * to Linux which just calls handle_level_irq() after clearing the | 305 | * to Linux which just calls handle_level_irq() after clearing the |
309 | * MAC INTx Assert status bit associated with this interrupt. | 306 | * MAC INTx Assert status bit associated with this interrupt. |
310 | */ | 307 | */ |
311 | static void trio_handle_level_irq(unsigned int irq, struct irq_desc *desc) | 308 | static void trio_handle_level_irq(unsigned int irq, struct irq_desc *desc) |
312 | { | 309 | { |
313 | struct pci_controller *controller = irq_desc_get_handler_data(desc); | 310 | struct pci_controller *controller = irq_desc_get_handler_data(desc); |
314 | gxio_trio_context_t *trio_context = controller->trio; | 311 | gxio_trio_context_t *trio_context = controller->trio; |
315 | uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc); | 312 | uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc); |
316 | int mac = controller->mac; | 313 | int mac = controller->mac; |
317 | unsigned int reg_offset; | 314 | unsigned int reg_offset; |
318 | uint64_t level_mask; | 315 | uint64_t level_mask; |
319 | 316 | ||
320 | handle_level_irq(irq, desc); | 317 | handle_level_irq(irq, desc); |
321 | 318 | ||
322 | /* | 319 | /* |
323 | * Clear the INTx Level status, otherwise future interrupts are | 320 | * Clear the INTx Level status, otherwise future interrupts are |
324 | * not sent. | 321 | * not sent. |
325 | */ | 322 | */ |
326 | reg_offset = (TRIO_PCIE_INTFC_MAC_INT_STS << | 323 | reg_offset = (TRIO_PCIE_INTFC_MAC_INT_STS << |
327 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | 324 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | |
328 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << | 325 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << |
329 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | 326 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | |
330 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | 327 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); |
331 | 328 | ||
332 | level_mask = TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK << intx; | 329 | level_mask = TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK << intx; |
333 | 330 | ||
334 | __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset, level_mask); | 331 | __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset, level_mask); |
335 | } | 332 | } |
336 | 333 | ||
337 | /* | 334 | /* |
338 | * Create kernel irqs and set up the handlers for the legacy interrupts. | 335 | * Create kernel irqs and set up the handlers for the legacy interrupts. |
339 | * Also some minimum initialization for the MSI support. | 336 | * Also some minimum initialization for the MSI support. |
340 | */ | 337 | */ |
341 | static int tile_init_irqs(struct pci_controller *controller) | 338 | static int tile_init_irqs(struct pci_controller *controller) |
342 | { | 339 | { |
343 | int i; | 340 | int i; |
344 | int j; | 341 | int j; |
345 | int irq; | 342 | int irq; |
346 | int result; | 343 | int result; |
347 | 344 | ||
348 | cpumask_copy(&intr_cpus_map, cpu_online_mask); | 345 | cpumask_copy(&intr_cpus_map, cpu_online_mask); |
349 | 346 | ||
350 | 347 | ||
351 | for (i = 0; i < 4; i++) { | 348 | for (i = 0; i < 4; i++) { |
352 | gxio_trio_context_t *context = controller->trio; | 349 | gxio_trio_context_t *context = controller->trio; |
353 | int cpu; | 350 | int cpu; |
354 | 351 | ||
355 | /* Ask the kernel to allocate an IRQ. */ | 352 | /* Ask the kernel to allocate an IRQ. */ |
356 | irq = create_irq(); | 353 | irq = create_irq(); |
357 | if (irq < 0) { | 354 | if (irq < 0) { |
358 | pr_err("PCI: no free irq vectors, failed for %d\n", i); | 355 | pr_err("PCI: no free irq vectors, failed for %d\n", i); |
359 | 356 | ||
360 | goto free_irqs; | 357 | goto free_irqs; |
361 | } | 358 | } |
362 | controller->irq_intx_table[i] = irq; | 359 | controller->irq_intx_table[i] = irq; |
363 | 360 | ||
364 | /* Distribute the 4 IRQs to different tiles. */ | 361 | /* Distribute the 4 IRQs to different tiles. */ |
365 | cpu = tile_irq_cpu(irq); | 362 | cpu = tile_irq_cpu(irq); |
366 | 363 | ||
367 | /* Configure the TRIO intr binding for this IRQ. */ | 364 | /* Configure the TRIO intr binding for this IRQ. */ |
368 | result = gxio_trio_config_legacy_intr(context, cpu_x(cpu), | 365 | result = gxio_trio_config_legacy_intr(context, cpu_x(cpu), |
369 | cpu_y(cpu), KERNEL_PL, | 366 | cpu_y(cpu), KERNEL_PL, |
370 | irq, controller->mac, i); | 367 | irq, controller->mac, i); |
371 | if (result < 0) { | 368 | if (result < 0) { |
372 | pr_err("PCI: MAC intx config failed for %d\n", i); | 369 | pr_err("PCI: MAC intx config failed for %d\n", i); |
373 | 370 | ||
374 | goto free_irqs; | 371 | goto free_irqs; |
375 | } | 372 | } |
376 | 373 | ||
377 | /* Register the IRQ handler with the kernel. */ | 374 | /* Register the IRQ handler with the kernel. */ |
378 | irq_set_chip_and_handler(irq, &tilegx_legacy_irq_chip, | 375 | irq_set_chip_and_handler(irq, &tilegx_legacy_irq_chip, |
379 | trio_handle_level_irq); | 376 | trio_handle_level_irq); |
380 | irq_set_chip_data(irq, (void *)(uint64_t)i); | 377 | irq_set_chip_data(irq, (void *)(uint64_t)i); |
381 | irq_set_handler_data(irq, controller); | 378 | irq_set_handler_data(irq, controller); |
382 | } | 379 | } |
383 | 380 | ||
384 | return 0; | 381 | return 0; |
385 | 382 | ||
386 | free_irqs: | 383 | free_irqs: |
387 | for (j = 0; j < i; j++) | 384 | for (j = 0; j < i; j++) |
388 | destroy_irq(controller->irq_intx_table[j]); | 385 | destroy_irq(controller->irq_intx_table[j]); |
389 | 386 | ||
390 | return -1; | 387 | return -1; |
391 | } | 388 | } |
392 | 389 | ||
393 | /* | 390 | /* |
394 | * Return 1 if the port is strapped to operate in RC mode. | 391 | * Return 1 if the port is strapped to operate in RC mode. |
395 | */ | 392 | */ |
396 | static int | 393 | static int |
397 | strapped_for_rc(gxio_trio_context_t *trio_context, int mac) | 394 | strapped_for_rc(gxio_trio_context_t *trio_context, int mac) |
398 | { | 395 | { |
399 | TRIO_PCIE_INTFC_PORT_CONFIG_t port_config; | 396 | TRIO_PCIE_INTFC_PORT_CONFIG_t port_config; |
400 | unsigned int reg_offset; | 397 | unsigned int reg_offset; |
401 | 398 | ||
402 | /* Check the port configuration. */ | 399 | /* Check the port configuration. */ |
403 | reg_offset = | 400 | reg_offset = |
404 | (TRIO_PCIE_INTFC_PORT_CONFIG << | 401 | (TRIO_PCIE_INTFC_PORT_CONFIG << |
405 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | 402 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | |
406 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << | 403 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << |
407 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT) | | 404 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT) | |
408 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | 405 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); |
409 | port_config.word = | 406 | port_config.word = |
410 | __gxio_mmio_read(trio_context->mmio_base_mac + reg_offset); | 407 | __gxio_mmio_read(trio_context->mmio_base_mac + reg_offset); |
411 | 408 | ||
412 | if (port_config.strap_state == AUTO_CONFIG_RC || | 409 | if (port_config.strap_state == AUTO_CONFIG_RC || |
413 | port_config.strap_state == AUTO_CONFIG_RC_G1) | 410 | port_config.strap_state == AUTO_CONFIG_RC_G1) |
414 | return 1; | 411 | return 1; |
415 | else | 412 | else |
416 | return 0; | 413 | return 0; |
417 | } | 414 | } |
418 | 415 | ||
419 | /* | 416 | /* |
420 | * Find valid controllers and fill in pci_controller structs for each | 417 | * Find valid controllers and fill in pci_controller structs for each |
421 | * of them. | 418 | * of them. |
422 | * | 419 | * |
423 | * Return the number of controllers discovered. | 420 | * Return the number of controllers discovered. |
424 | */ | 421 | */ |
425 | int __init tile_pci_init(void) | 422 | int __init tile_pci_init(void) |
426 | { | 423 | { |
427 | int ctl_index = 0; | 424 | int ctl_index = 0; |
428 | int i, j; | 425 | int i, j; |
429 | 426 | ||
430 | if (!pci_probe) { | 427 | if (!pci_probe) { |
431 | pr_info("PCI: disabled by boot argument\n"); | 428 | pr_info("PCI: disabled by boot argument\n"); |
432 | return 0; | 429 | return 0; |
433 | } | 430 | } |
434 | 431 | ||
435 | pr_info("PCI: Searching for controllers...\n"); | 432 | pr_info("PCI: Searching for controllers...\n"); |
436 | 433 | ||
437 | if (num_trio_shims == 0 || sim_is_simulator()) | 434 | if (num_trio_shims == 0 || sim_is_simulator()) |
438 | return 0; | 435 | return 0; |
439 | 436 | ||
440 | /* | 437 | /* |
441 | * Now determine which PCIe ports are configured to operate in RC mode. | 438 | * Now determine which PCIe ports are configured to operate in RC |
442 | * We look at the Board Information Block first and then see if there | 439 | * mode. To use a port, it must be allowed to be in RC mode by the |
443 | * are any overriding configuration by the HW strapping pin. | 440 | * Board Information Block, and the hardware strapping pins must be |
441 | * set to RC mode. | ||
444 | */ | 442 | */ |
445 | for (i = 0; i < TILEGX_NUM_TRIO; i++) { | 443 | for (i = 0; i < TILEGX_NUM_TRIO; i++) { |
446 | gxio_trio_context_t *context = &trio_contexts[i]; | 444 | gxio_trio_context_t *context = &trio_contexts[i]; |
447 | 445 | ||
448 | if (context->fd < 0) | 446 | if (context->fd < 0) |
449 | continue; | 447 | continue; |
450 | 448 | ||
451 | for (j = 0; j < TILEGX_TRIO_PCIES; j++) { | 449 | for (j = 0; j < TILEGX_TRIO_PCIES; j++) { |
452 | if (pcie_ports[i][j].allow_rc && | 450 | if (pcie_ports[i].ports[j].allow_rc && |
453 | strapped_for_rc(context, j)) { | 451 | strapped_for_rc(context, j)) { |
454 | pcie_rc[i][j] = 1; | 452 | pcie_rc[i][j] = 1; |
455 | num_rc_controllers++; | 453 | num_rc_controllers++; |
456 | } | 454 | } |
457 | } | 455 | } |
458 | } | 456 | } |
459 | 457 | ||
460 | /* Return if no PCIe ports are configured to operate in RC mode. */ | 458 | /* Return if no PCIe ports are configured to operate in RC mode. */ |
461 | if (num_rc_controllers == 0) | 459 | if (num_rc_controllers == 0) |
462 | return 0; | 460 | return 0; |
463 | 461 | ||
464 | /* Set the TRIO pointer and MAC index for each PCIe RC port. */ | 462 | /* Set the TRIO pointer and MAC index for each PCIe RC port. */ |
465 | for (i = 0; i < TILEGX_NUM_TRIO; i++) { | 463 | for (i = 0; i < TILEGX_NUM_TRIO; i++) { |
466 | for (j = 0; j < TILEGX_TRIO_PCIES; j++) { | 464 | for (j = 0; j < TILEGX_TRIO_PCIES; j++) { |
467 | if (pcie_rc[i][j]) { | 465 | if (pcie_rc[i][j]) { |
468 | pci_controllers[ctl_index].trio = | 466 | pci_controllers[ctl_index].trio = |
469 | &trio_contexts[i]; | 467 | &trio_contexts[i]; |
470 | pci_controllers[ctl_index].mac = j; | 468 | pci_controllers[ctl_index].mac = j; |
471 | pci_controllers[ctl_index].trio_index = i; | 469 | pci_controllers[ctl_index].trio_index = i; |
472 | ctl_index++; | 470 | ctl_index++; |
473 | if (ctl_index == num_rc_controllers) | 471 | if (ctl_index == num_rc_controllers) |
474 | goto out; | 472 | goto out; |
475 | } | 473 | } |
476 | } | 474 | } |
477 | } | 475 | } |
478 | 476 | ||
479 | out: | 477 | out: |
480 | /* Configure each PCIe RC port. */ | 478 | /* Configure each PCIe RC port. */ |
481 | for (i = 0; i < num_rc_controllers; i++) { | 479 | for (i = 0; i < num_rc_controllers; i++) { |
482 | 480 | ||
483 | /* Configure the PCIe MAC to run in RC mode. */ | 481 | /* Configure the PCIe MAC to run in RC mode. */ |
484 | struct pci_controller *controller = &pci_controllers[i]; | 482 | struct pci_controller *controller = &pci_controllers[i]; |
485 | 483 | ||
486 | controller->index = i; | 484 | controller->index = i; |
487 | controller->ops = &tile_cfg_ops; | 485 | controller->ops = &tile_cfg_ops; |
488 | 486 | ||
489 | controller->io_space.start = PCIBIOS_MIN_IO + | 487 | controller->io_space.start = PCIBIOS_MIN_IO + |
490 | (i * IO_SPACE_SIZE); | 488 | (i * IO_SPACE_SIZE); |
491 | controller->io_space.end = controller->io_space.start + | 489 | controller->io_space.end = controller->io_space.start + |
492 | IO_SPACE_SIZE - 1; | 490 | IO_SPACE_SIZE - 1; |
493 | BUG_ON(controller->io_space.end > IO_SPACE_LIMIT); | 491 | BUG_ON(controller->io_space.end > IO_SPACE_LIMIT); |
494 | controller->io_space.flags = IORESOURCE_IO; | 492 | controller->io_space.flags = IORESOURCE_IO; |
495 | snprintf(controller->io_space_name, | 493 | snprintf(controller->io_space_name, |
496 | sizeof(controller->io_space_name), | 494 | sizeof(controller->io_space_name), |
497 | "PCI I/O domain %d", i); | 495 | "PCI I/O domain %d", i); |
498 | controller->io_space.name = controller->io_space_name; | 496 | controller->io_space.name = controller->io_space_name; |
499 | 497 | ||
500 | /* | 498 | /* |
501 | * The PCI memory resource is located above the PA space. | 499 | * The PCI memory resource is located above the PA space. |
502 | * For every host bridge, the BAR window or the MMIO aperture | 500 | * For every host bridge, the BAR window or the MMIO aperture |
503 | * is in range [3GB, 4GB - 1] of a 4GB space beyond the | 501 | * is in range [3GB, 4GB - 1] of a 4GB space beyond the |
504 | * PA space. | 502 | * PA space. |
505 | */ | 503 | */ |
506 | controller->mem_offset = TILE_PCI_MEM_START + | 504 | controller->mem_offset = TILE_PCI_MEM_START + |
507 | (i * TILE_PCI_BAR_WINDOW_TOP); | 505 | (i * TILE_PCI_BAR_WINDOW_TOP); |
508 | controller->mem_space.start = controller->mem_offset + | 506 | controller->mem_space.start = controller->mem_offset + |
509 | TILE_PCI_BAR_WINDOW_TOP - TILE_PCI_BAR_WINDOW_SIZE; | 507 | TILE_PCI_BAR_WINDOW_TOP - TILE_PCI_BAR_WINDOW_SIZE; |
510 | controller->mem_space.end = controller->mem_offset + | 508 | controller->mem_space.end = controller->mem_offset + |
511 | TILE_PCI_BAR_WINDOW_TOP - 1; | 509 | TILE_PCI_BAR_WINDOW_TOP - 1; |
512 | controller->mem_space.flags = IORESOURCE_MEM; | 510 | controller->mem_space.flags = IORESOURCE_MEM; |
513 | snprintf(controller->mem_space_name, | 511 | snprintf(controller->mem_space_name, |
514 | sizeof(controller->mem_space_name), | 512 | sizeof(controller->mem_space_name), |
515 | "PCI mem domain %d", i); | 513 | "PCI mem domain %d", i); |
516 | controller->mem_space.name = controller->mem_space_name; | 514 | controller->mem_space.name = controller->mem_space_name; |
517 | } | 515 | } |
518 | 516 | ||
519 | return num_rc_controllers; | 517 | return num_rc_controllers; |
520 | } | 518 | } |
521 | 519 | ||
522 | /* | 520 | /* |
523 | * (pin - 1) converts from the PCI standard's [1:4] convention to | 521 | * (pin - 1) converts from the PCI standard's [1:4] convention to |
524 | * a normal [0:3] range. | 522 | * a normal [0:3] range. |
525 | */ | 523 | */ |
526 | static int tile_map_irq(const struct pci_dev *dev, u8 device, u8 pin) | 524 | static int tile_map_irq(const struct pci_dev *dev, u8 device, u8 pin) |
527 | { | 525 | { |
528 | struct pci_controller *controller = | 526 | struct pci_controller *controller = |
529 | (struct pci_controller *)dev->sysdata; | 527 | (struct pci_controller *)dev->sysdata; |
530 | return controller->irq_intx_table[pin - 1]; | 528 | return controller->irq_intx_table[pin - 1]; |
531 | } | 529 | } |
532 | 530 | ||
533 | static void fixup_read_and_payload_sizes(struct pci_controller *controller) | 531 | static void fixup_read_and_payload_sizes(struct pci_controller *controller) |
534 | { | 532 | { |
535 | gxio_trio_context_t *trio_context = controller->trio; | 533 | gxio_trio_context_t *trio_context = controller->trio; |
536 | struct pci_bus *root_bus = controller->root_bus; | 534 | struct pci_bus *root_bus = controller->root_bus; |
537 | TRIO_PCIE_RC_DEVICE_CONTROL_t dev_control; | 535 | TRIO_PCIE_RC_DEVICE_CONTROL_t dev_control; |
538 | TRIO_PCIE_RC_DEVICE_CAP_t rc_dev_cap; | 536 | TRIO_PCIE_RC_DEVICE_CAP_t rc_dev_cap; |
539 | unsigned int reg_offset; | 537 | unsigned int reg_offset; |
540 | struct pci_bus *child; | 538 | struct pci_bus *child; |
541 | int mac; | 539 | int mac; |
542 | int err; | 540 | int err; |
543 | 541 | ||
544 | mac = controller->mac; | 542 | mac = controller->mac; |
545 | 543 | ||
546 | /* Set our max read request size to be 4KB. */ | 544 | /* Set our max read request size to be 4KB. */ |
547 | reg_offset = | 545 | reg_offset = |
548 | (TRIO_PCIE_RC_DEVICE_CONTROL << | 546 | (TRIO_PCIE_RC_DEVICE_CONTROL << |
549 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | 547 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | |
550 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << | 548 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << |
551 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | 549 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | |
552 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | 550 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); |
553 | 551 | ||
554 | dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac + | 552 | dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac + |
555 | reg_offset); | 553 | reg_offset); |
556 | dev_control.max_read_req_sz = 5; | 554 | dev_control.max_read_req_sz = 5; |
557 | __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset, | 555 | __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset, |
558 | dev_control.word); | 556 | dev_control.word); |
559 | 557 | ||
560 | /* | 558 | /* |
561 | * Set the max payload size supported by this Gx PCIe MAC. | 559 | * Set the max payload size supported by this Gx PCIe MAC. |
562 | * Though Gx PCIe supports Max Payload Size of up to 1024 bytes, | 560 | * Though Gx PCIe supports Max Payload Size of up to 1024 bytes, |
563 | * experiments have shown that setting MPS to 256 yields the | 561 | * experiments have shown that setting MPS to 256 yields the |
564 | * best performance. | 562 | * best performance. |
565 | */ | 563 | */ |
566 | reg_offset = | 564 | reg_offset = |
567 | (TRIO_PCIE_RC_DEVICE_CAP << | 565 | (TRIO_PCIE_RC_DEVICE_CAP << |
568 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | 566 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | |
569 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << | 567 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << |
570 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | 568 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | |
571 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | 569 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); |
572 | 570 | ||
573 | rc_dev_cap.word = __gxio_mmio_read32(trio_context->mmio_base_mac + | 571 | rc_dev_cap.word = __gxio_mmio_read32(trio_context->mmio_base_mac + |
574 | reg_offset); | 572 | reg_offset); |
575 | rc_dev_cap.mps_sup = 1; | 573 | rc_dev_cap.mps_sup = 1; |
576 | __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset, | 574 | __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset, |
577 | rc_dev_cap.word); | 575 | rc_dev_cap.word); |
578 | 576 | ||
579 | /* Configure PCI Express MPS setting. */ | 577 | /* Configure PCI Express MPS setting. */ |
580 | list_for_each_entry(child, &root_bus->children, node) { | 578 | list_for_each_entry(child, &root_bus->children, node) { |
581 | struct pci_dev *self = child->self; | 579 | struct pci_dev *self = child->self; |
582 | if (!self) | 580 | if (!self) |
583 | continue; | 581 | continue; |
584 | 582 | ||
585 | pcie_bus_configure_settings(child, self->pcie_mpss); | 583 | pcie_bus_configure_settings(child, self->pcie_mpss); |
586 | } | 584 | } |
587 | 585 | ||
588 | /* | 586 | /* |
589 | * Set the mac_config register in trio based on the MPS/MRS of the link. | 587 | * Set the mac_config register in trio based on the MPS/MRS of the link. |
590 | */ | 588 | */ |
591 | reg_offset = | 589 | reg_offset = |
592 | (TRIO_PCIE_RC_DEVICE_CONTROL << | 590 | (TRIO_PCIE_RC_DEVICE_CONTROL << |
593 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | 591 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | |
594 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << | 592 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << |
595 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | 593 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | |
596 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | 594 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); |
597 | 595 | ||
598 | dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac + | 596 | dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac + |
599 | reg_offset); | 597 | reg_offset); |
600 | 598 | ||
601 | err = gxio_trio_set_mps_mrs(trio_context, | 599 | err = gxio_trio_set_mps_mrs(trio_context, |
602 | dev_control.max_payload_size, | 600 | dev_control.max_payload_size, |
603 | dev_control.max_read_req_sz, | 601 | dev_control.max_read_req_sz, |
604 | mac); | 602 | mac); |
605 | if (err < 0) { | 603 | if (err < 0) { |
606 | pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, " | 604 | pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, " |
607 | "MAC %d on TRIO %d\n", | 605 | "MAC %d on TRIO %d\n", |
608 | mac, controller->trio_index); | 606 | mac, controller->trio_index); |
609 | } | 607 | } |
610 | } | 608 | } |
611 | 609 | ||
612 | static int setup_pcie_rc_delay(char *str) | 610 | static int setup_pcie_rc_delay(char *str) |
613 | { | 611 | { |
614 | unsigned long delay = 0; | 612 | unsigned long delay = 0; |
615 | unsigned long trio_index; | 613 | unsigned long trio_index; |
616 | unsigned long mac; | 614 | unsigned long mac; |
617 | 615 | ||
618 | if (str == NULL || !isdigit(*str)) | 616 | if (str == NULL || !isdigit(*str)) |
619 | return -EINVAL; | 617 | return -EINVAL; |
620 | trio_index = simple_strtoul(str, (char **)&str, 10); | 618 | trio_index = simple_strtoul(str, (char **)&str, 10); |
621 | if (trio_index >= TILEGX_NUM_TRIO) | 619 | if (trio_index >= TILEGX_NUM_TRIO) |
622 | return -EINVAL; | 620 | return -EINVAL; |
623 | 621 | ||
624 | if (*str != ',') | 622 | if (*str != ',') |
625 | return -EINVAL; | 623 | return -EINVAL; |
626 | 624 | ||
627 | str++; | 625 | str++; |
628 | if (!isdigit(*str)) | 626 | if (!isdigit(*str)) |
629 | return -EINVAL; | 627 | return -EINVAL; |
630 | mac = simple_strtoul(str, (char **)&str, 10); | 628 | mac = simple_strtoul(str, (char **)&str, 10); |
631 | if (mac >= TILEGX_TRIO_PCIES) | 629 | if (mac >= TILEGX_TRIO_PCIES) |
632 | return -EINVAL; | 630 | return -EINVAL; |
633 | 631 | ||
634 | if (*str != '\0') { | 632 | if (*str != '\0') { |
635 | if (*str != ',') | 633 | if (*str != ',') |
636 | return -EINVAL; | 634 | return -EINVAL; |
637 | 635 | ||
638 | str++; | 636 | str++; |
639 | if (!isdigit(*str)) | 637 | if (!isdigit(*str)) |
640 | return -EINVAL; | 638 | return -EINVAL; |
641 | delay = simple_strtoul(str, (char **)&str, 10); | 639 | delay = simple_strtoul(str, (char **)&str, 10); |
642 | } | 640 | } |
643 | 641 | ||
644 | rc_delay[trio_index][mac] = delay ? : DEFAULT_RC_DELAY; | 642 | rc_delay[trio_index][mac] = delay ? : DEFAULT_RC_DELAY; |
645 | return 0; | 643 | return 0; |
646 | } | 644 | } |
647 | early_param("pcie_rc_delay", setup_pcie_rc_delay); | 645 | early_param("pcie_rc_delay", setup_pcie_rc_delay); |
648 | 646 | ||
649 | /* PCI initialization entry point, called by subsys_initcall. */ | 647 | /* PCI initialization entry point, called by subsys_initcall. */ |
650 | int __init pcibios_init(void) | 648 | int __init pcibios_init(void) |
651 | { | 649 | { |
652 | resource_size_t offset; | 650 | resource_size_t offset; |
653 | LIST_HEAD(resources); | 651 | LIST_HEAD(resources); |
654 | int next_busno; | 652 | int next_busno; |
655 | int i; | 653 | int i; |
656 | 654 | ||
657 | tile_pci_init(); | 655 | tile_pci_init(); |
658 | 656 | ||
659 | if (num_rc_controllers == 0) | 657 | if (num_rc_controllers == 0) |
660 | return 0; | 658 | return 0; |
661 | 659 | ||
662 | /* | 660 | /* |
663 | * Delay a bit in case devices aren't ready. Some devices are | 661 | * Delay a bit in case devices aren't ready. Some devices are |
664 | * known to require at least 20ms here, but we use a more | 662 | * known to require at least 20ms here, but we use a more |
665 | * conservative value. | 663 | * conservative value. |
666 | */ | 664 | */ |
667 | msleep(250); | 665 | msleep(250); |
668 | 666 | ||
669 | /* Scan all of the recorded PCI controllers. */ | 667 | /* Scan all of the recorded PCI controllers. */ |
670 | for (next_busno = 0, i = 0; i < num_rc_controllers; i++) { | 668 | for (next_busno = 0, i = 0; i < num_rc_controllers; i++) { |
671 | struct pci_controller *controller = &pci_controllers[i]; | 669 | struct pci_controller *controller = &pci_controllers[i]; |
672 | gxio_trio_context_t *trio_context = controller->trio; | 670 | gxio_trio_context_t *trio_context = controller->trio; |
673 | TRIO_PCIE_INTFC_PORT_STATUS_t port_status; | 671 | TRIO_PCIE_INTFC_PORT_STATUS_t port_status; |
674 | TRIO_PCIE_INTFC_TX_FIFO_CTL_t tx_fifo_ctl; | 672 | TRIO_PCIE_INTFC_TX_FIFO_CTL_t tx_fifo_ctl; |
675 | struct pci_bus *bus; | 673 | struct pci_bus *bus; |
676 | unsigned int reg_offset; | 674 | unsigned int reg_offset; |
677 | unsigned int class_code_revision; | 675 | unsigned int class_code_revision; |
678 | int trio_index; | 676 | int trio_index; |
679 | int mac; | 677 | int mac; |
680 | int ret; | 678 | int ret; |
681 | 679 | ||
682 | if (trio_context->fd < 0) | 680 | if (trio_context->fd < 0) |
683 | continue; | 681 | continue; |
684 | 682 | ||
685 | trio_index = controller->trio_index; | 683 | trio_index = controller->trio_index; |
686 | mac = controller->mac; | 684 | mac = controller->mac; |
687 | 685 | ||
688 | /* | 686 | /* |
689 | * Check for PCIe link-up status to decide if we need | 687 | * Check for PCIe link-up status to decide if we need |
690 | * to force the link to come up. | 688 | * to force the link to come up. |
691 | */ | 689 | */ |
692 | reg_offset = | 690 | reg_offset = |
693 | (TRIO_PCIE_INTFC_PORT_STATUS << | 691 | (TRIO_PCIE_INTFC_PORT_STATUS << |
694 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | 692 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | |
695 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << | 693 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << |
696 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT) | | 694 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT) | |
697 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | 695 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); |
698 | 696 | ||
699 | port_status.word = | 697 | port_status.word = |
700 | __gxio_mmio_read(trio_context->mmio_base_mac + | 698 | __gxio_mmio_read(trio_context->mmio_base_mac + |
701 | reg_offset); | 699 | reg_offset); |
702 | if (!port_status.dl_up) { | 700 | if (!port_status.dl_up) { |
703 | if (rc_delay[trio_index][mac]) { | 701 | if (rc_delay[trio_index][mac]) { |
704 | pr_info("Delaying PCIe RC TRIO init %d sec" | 702 | pr_info("Delaying PCIe RC TRIO init %d sec" |
705 | " on MAC %d on TRIO %d\n", | 703 | " on MAC %d on TRIO %d\n", |
706 | rc_delay[trio_index][mac], mac, | 704 | rc_delay[trio_index][mac], mac, |
707 | trio_index); | 705 | trio_index); |
708 | msleep(rc_delay[trio_index][mac] * 1000); | 706 | msleep(rc_delay[trio_index][mac] * 1000); |
709 | } | 707 | } |
710 | ret = gxio_trio_force_rc_link_up(trio_context, mac); | 708 | ret = gxio_trio_force_rc_link_up(trio_context, mac); |
711 | if (ret < 0) | 709 | if (ret < 0) |
712 | pr_err("PCI: PCIE_FORCE_LINK_UP failure, " | 710 | pr_err("PCI: PCIE_FORCE_LINK_UP failure, " |
713 | "MAC %d on TRIO %d\n", mac, trio_index); | 711 | "MAC %d on TRIO %d\n", mac, trio_index); |
714 | } | 712 | } |
715 | 713 | ||
716 | pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", i, | 714 | pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", i, |
717 | trio_index, controller->mac); | 715 | trio_index, controller->mac); |
718 | 716 | ||
719 | /* Delay the bus probe if needed. */ | 717 | /* Delay the bus probe if needed. */ |
720 | if (rc_delay[trio_index][mac]) { | 718 | if (rc_delay[trio_index][mac]) { |
721 | pr_info("Delaying PCIe RC bus enumerating %d sec" | 719 | pr_info("Delaying PCIe RC bus enumerating %d sec" |
722 | " on MAC %d on TRIO %d\n", | 720 | " on MAC %d on TRIO %d\n", |
723 | rc_delay[trio_index][mac], mac, | 721 | rc_delay[trio_index][mac], mac, |
724 | trio_index); | 722 | trio_index); |
725 | msleep(rc_delay[trio_index][mac] * 1000); | 723 | msleep(rc_delay[trio_index][mac] * 1000); |
726 | } else { | 724 | } else { |
727 | /* | 725 | /* |
728 | * Wait a bit here because some EP devices | 726 | * Wait a bit here because some EP devices |
729 | * take longer to come up. | 727 | * take longer to come up. |
730 | */ | 728 | */ |
731 | msleep(1000); | 729 | msleep(1000); |
732 | } | 730 | } |
733 | 731 | ||
734 | /* Check for PCIe link-up status again. */ | 732 | /* Check for PCIe link-up status again. */ |
735 | port_status.word = | 733 | port_status.word = |
736 | __gxio_mmio_read(trio_context->mmio_base_mac + | 734 | __gxio_mmio_read(trio_context->mmio_base_mac + |
737 | reg_offset); | 735 | reg_offset); |
738 | if (!port_status.dl_up) { | 736 | if (!port_status.dl_up) { |
739 | if (pcie_ports[trio_index][mac].removable) { | 737 | if (pcie_ports[trio_index].ports[mac].removable) { |
740 | pr_info("PCI: link is down, MAC %d on TRIO %d\n", | 738 | pr_info("PCI: link is down, MAC %d on TRIO %d\n", |
741 | mac, trio_index); | 739 | mac, trio_index); |
742 | pr_info("This is expected if no PCIe card" | 740 | pr_info("This is expected if no PCIe card" |
743 | " is connected to this link\n"); | 741 | " is connected to this link\n"); |
744 | } else | 742 | } else |
745 | pr_err("PCI: link is down, MAC %d on TRIO %d\n", | 743 | pr_err("PCI: link is down, MAC %d on TRIO %d\n", |
746 | mac, trio_index); | 744 | mac, trio_index); |
747 | continue; | 745 | continue; |
748 | } | 746 | } |
749 | 747 | ||
750 | /* | 748 | /* |
751 | * Ensure that the link can come out of L1 power down state. | 749 | * Ensure that the link can come out of L1 power down state. |
752 | * Strictly speaking, this is needed only in the case of | 750 | * Strictly speaking, this is needed only in the case of |
753 | * heavy RC-initiated DMAs. | 751 | * heavy RC-initiated DMAs. |
754 | */ | 752 | */ |
755 | reg_offset = | 753 | reg_offset = |
756 | (TRIO_PCIE_INTFC_TX_FIFO_CTL << | 754 | (TRIO_PCIE_INTFC_TX_FIFO_CTL << |
757 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | 755 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | |
758 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << | 756 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << |
759 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | 757 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | |
760 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | 758 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); |
761 | tx_fifo_ctl.word = | 759 | tx_fifo_ctl.word = |
762 | __gxio_mmio_read(trio_context->mmio_base_mac + | 760 | __gxio_mmio_read(trio_context->mmio_base_mac + |
763 | reg_offset); | 761 | reg_offset); |
764 | tx_fifo_ctl.min_p_credits = 0; | 762 | tx_fifo_ctl.min_p_credits = 0; |
765 | __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset, | 763 | __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset, |
766 | tx_fifo_ctl.word); | 764 | tx_fifo_ctl.word); |
767 | 765 | ||
768 | /* | 766 | /* |
769 | * Change the device ID so that Linux bus crawl doesn't confuse | 767 | * Change the device ID so that Linux bus crawl doesn't confuse |
770 | * the internal bridge with any Tilera endpoints. | 768 | * the internal bridge with any Tilera endpoints. |
771 | */ | 769 | */ |
772 | reg_offset = | 770 | reg_offset = |
773 | (TRIO_PCIE_RC_DEVICE_ID_VEN_ID << | 771 | (TRIO_PCIE_RC_DEVICE_ID_VEN_ID << |
774 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | 772 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | |
775 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << | 773 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << |
776 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | 774 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | |
777 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | 775 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); |
778 | 776 | ||
779 | __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset, | 777 | __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset, |
780 | (TILERA_GX36_RC_DEV_ID << | 778 | (TILERA_GX36_RC_DEV_ID << |
781 | TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT) | | 779 | TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT) | |
782 | TILERA_VENDOR_ID); | 780 | TILERA_VENDOR_ID); |
783 | 781 | ||
784 | /* Set the internal P2P bridge class code. */ | 782 | /* Set the internal P2P bridge class code. */ |
785 | reg_offset = | 783 | reg_offset = |
786 | (TRIO_PCIE_RC_REVISION_ID << | 784 | (TRIO_PCIE_RC_REVISION_ID << |
787 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | 785 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | |
788 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << | 786 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD << |
789 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | 787 | TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | |
790 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | 788 | (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); |
791 | 789 | ||
792 | class_code_revision = | 790 | class_code_revision = |
793 | __gxio_mmio_read32(trio_context->mmio_base_mac + | 791 | __gxio_mmio_read32(trio_context->mmio_base_mac + |
794 | reg_offset); | 792 | reg_offset); |
795 | class_code_revision = (class_code_revision & 0xff) | | 793 | class_code_revision = (class_code_revision & 0xff) | |
796 | (PCI_CLASS_BRIDGE_PCI << 16); | 794 | (PCI_CLASS_BRIDGE_PCI << 16); |
797 | 795 | ||
798 | __gxio_mmio_write32(trio_context->mmio_base_mac + | 796 | __gxio_mmio_write32(trio_context->mmio_base_mac + |
799 | reg_offset, class_code_revision); | 797 | reg_offset, class_code_revision); |
800 | 798 | ||
801 | #ifdef USE_SHARED_PCIE_CONFIG_REGION | 799 | #ifdef USE_SHARED_PCIE_CONFIG_REGION |
802 | 800 | ||
803 | /* Map in the MMIO space for the PIO region. */ | 801 | /* Map in the MMIO space for the PIO region. */ |
804 | offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index) | | 802 | offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index) | |
805 | (((unsigned long long)mac) << | 803 | (((unsigned long long)mac) << |
806 | TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT); | 804 | TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT); |
807 | 805 | ||
808 | #else | 806 | #else |
809 | 807 | ||
810 | /* Alloc a PIO region for PCI config access per MAC. */ | 808 | /* Alloc a PIO region for PCI config access per MAC. */ |
811 | ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); | 809 | ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); |
812 | if (ret < 0) { | 810 | if (ret < 0) { |
813 | pr_err("PCI: PCI CFG PIO alloc failure for mac %d " | 811 | pr_err("PCI: PCI CFG PIO alloc failure for mac %d " |
814 | "on TRIO %d, give up\n", mac, trio_index); | 812 | "on TRIO %d, give up\n", mac, trio_index); |
815 | 813 | ||
816 | continue; | 814 | continue; |
817 | } | 815 | } |
818 | 816 | ||
819 | trio_context->pio_cfg_index[mac] = ret; | 817 | trio_context->pio_cfg_index[mac] = ret; |
820 | 818 | ||
821 | /* For PIO CFG, the bus_address_hi parameter is 0. */ | 819 | /* For PIO CFG, the bus_address_hi parameter is 0. */ |
822 | ret = gxio_trio_init_pio_region_aux(trio_context, | 820 | ret = gxio_trio_init_pio_region_aux(trio_context, |
823 | trio_context->pio_cfg_index[mac], | 821 | trio_context->pio_cfg_index[mac], |
824 | mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE); | 822 | mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE); |
825 | if (ret < 0) { | 823 | if (ret < 0) { |
826 | pr_err("PCI: PCI CFG PIO init failure for mac %d " | 824 | pr_err("PCI: PCI CFG PIO init failure for mac %d " |
827 | "on TRIO %d, give up\n", mac, trio_index); | 825 | "on TRIO %d, give up\n", mac, trio_index); |
828 | 826 | ||
829 | continue; | 827 | continue; |
830 | } | 828 | } |
831 | 829 | ||
832 | offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index[mac]) | | 830 | offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index[mac]) | |
833 | (((unsigned long long)mac) << | 831 | (((unsigned long long)mac) << |
834 | TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT); | 832 | TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT); |
835 | 833 | ||
836 | #endif | 834 | #endif |
837 | 835 | ||
838 | trio_context->mmio_base_pio_cfg[mac] = | 836 | trio_context->mmio_base_pio_cfg[mac] = |
839 | iorpc_ioremap(trio_context->fd, offset, | 837 | iorpc_ioremap(trio_context->fd, offset, |
840 | (1 << TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT)); | 838 | (1 << TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT)); |
841 | if (trio_context->mmio_base_pio_cfg[mac] == NULL) { | 839 | if (trio_context->mmio_base_pio_cfg[mac] == NULL) { |
842 | pr_err("PCI: PIO map failure for mac %d on TRIO %d\n", | 840 | pr_err("PCI: PIO map failure for mac %d on TRIO %d\n", |
843 | mac, trio_index); | 841 | mac, trio_index); |
844 | 842 | ||
845 | continue; | 843 | continue; |
846 | } | 844 | } |
847 | 845 | ||
848 | /* Initialize the PCIe interrupts. */ | 846 | /* Initialize the PCIe interrupts. */ |
849 | if (tile_init_irqs(controller)) { | 847 | if (tile_init_irqs(controller)) { |
850 | pr_err("PCI: IRQs init failure for mac %d on TRIO %d\n", | 848 | pr_err("PCI: IRQs init failure for mac %d on TRIO %d\n", |
851 | mac, trio_index); | 849 | mac, trio_index); |
852 | 850 | ||
853 | continue; | 851 | continue; |
854 | } | 852 | } |
855 | 853 | ||
856 | /* | 854 | /* |
857 | * The PCI memory resource is located above the PA space. | 855 | * The PCI memory resource is located above the PA space. |
858 | * The memory range for the PCI root bus should not overlap | 856 | * The memory range for the PCI root bus should not overlap |
859 | * with the physical RAM. | 857 | * with the physical RAM. |
860 | */ | 858 | */ |
861 | pci_add_resource_offset(&resources, &controller->mem_space, | 859 | pci_add_resource_offset(&resources, &controller->mem_space, |
862 | controller->mem_offset); | 860 | controller->mem_offset); |
863 | pci_add_resource(&resources, &controller->io_space); | 861 | pci_add_resource(&resources, &controller->io_space); |
864 | controller->first_busno = next_busno; | 862 | controller->first_busno = next_busno; |
865 | bus = pci_scan_root_bus(NULL, next_busno, controller->ops, | 863 | bus = pci_scan_root_bus(NULL, next_busno, controller->ops, |
866 | controller, &resources); | 864 | controller, &resources); |
867 | controller->root_bus = bus; | 865 | controller->root_bus = bus; |
868 | next_busno = bus->busn_res.end + 1; | 866 | next_busno = bus->busn_res.end + 1; |
869 | } | 867 | } |
870 | 868 | ||
871 | /* Do machine dependent PCI interrupt routing */ | 869 | /* Do machine dependent PCI interrupt routing */ |
872 | pci_fixup_irqs(pci_common_swizzle, tile_map_irq); | 870 | pci_fixup_irqs(pci_common_swizzle, tile_map_irq); |
873 | 871 | ||
874 | /* | 872 | /* |
875 | * This comes from the generic Linux PCI driver. | 873 | * This comes from the generic Linux PCI driver. |
876 | * | 874 | * |
877 | * It allocates all of the resources (I/O memory, etc) | 875 | * It allocates all of the resources (I/O memory, etc) |
878 | * associated with the devices read in above. | 876 | * associated with the devices read in above. |
879 | */ | 877 | */ |
880 | pci_assign_unassigned_resources(); | 878 | pci_assign_unassigned_resources(); |
881 | 879 | ||
882 | /* Record the I/O resources in the PCI controller structure. */ | 880 | /* Record the I/O resources in the PCI controller structure. */ |
883 | for (i = 0; i < num_rc_controllers; i++) { | 881 | for (i = 0; i < num_rc_controllers; i++) { |
884 | struct pci_controller *controller = &pci_controllers[i]; | 882 | struct pci_controller *controller = &pci_controllers[i]; |
885 | gxio_trio_context_t *trio_context = controller->trio; | 883 | gxio_trio_context_t *trio_context = controller->trio; |
886 | struct pci_bus *root_bus = pci_controllers[i].root_bus; | 884 | struct pci_bus *root_bus = pci_controllers[i].root_bus; |
887 | int ret; | 885 | int ret; |
888 | int j; | 886 | int j; |
889 | 887 | ||
890 | /* | 888 | /* |
891 | * Skip controllers that are not properly initialized or | 889 | * Skip controllers that are not properly initialized or |
892 | * have down links. | 890 | * have down links. |
893 | */ | 891 | */ |
894 | if (root_bus == NULL) | 892 | if (root_bus == NULL) |
895 | continue; | 893 | continue; |
896 | 894 | ||
897 | /* Configure the max_payload_size values for this domain. */ | 895 | /* Configure the max_payload_size values for this domain. */ |
898 | fixup_read_and_payload_sizes(controller); | 896 | fixup_read_and_payload_sizes(controller); |
899 | 897 | ||
900 | /* Alloc a PIO region for PCI memory access for each RC port. */ | 898 | /* Alloc a PIO region for PCI memory access for each RC port. */ |
901 | ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); | 899 | ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); |
902 | if (ret < 0) { | 900 | if (ret < 0) { |
903 | pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, " | 901 | pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, " |
904 | "give up\n", controller->trio_index, | 902 | "give up\n", controller->trio_index, |
905 | controller->mac); | 903 | controller->mac); |
906 | 904 | ||
907 | continue; | 905 | continue; |
908 | } | 906 | } |
909 | 907 | ||
910 | controller->pio_mem_index = ret; | 908 | controller->pio_mem_index = ret; |
911 | 909 | ||
912 | /* | 910 | /* |
913 | * For PIO MEM, the bus_address_hi parameter is hard-coded 0 | 911 | * For PIO MEM, the bus_address_hi parameter is hard-coded 0 |
914 | * because we always assign 32-bit PCI bus BAR ranges. | 912 | * because we always assign 32-bit PCI bus BAR ranges. |
915 | */ | 913 | */ |
916 | ret = gxio_trio_init_pio_region_aux(trio_context, | 914 | ret = gxio_trio_init_pio_region_aux(trio_context, |
917 | controller->pio_mem_index, | 915 | controller->pio_mem_index, |
918 | controller->mac, | 916 | controller->mac, |
919 | 0, | 917 | 0, |
920 | 0); | 918 | 0); |
921 | if (ret < 0) { | 919 | if (ret < 0) { |
922 | pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, " | 920 | pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, " |
923 | "give up\n", controller->trio_index, | 921 | "give up\n", controller->trio_index, |
924 | controller->mac); | 922 | controller->mac); |
925 | 923 | ||
926 | continue; | 924 | continue; |
927 | } | 925 | } |
928 | 926 | ||
929 | #ifdef CONFIG_TILE_PCI_IO | 927 | #ifdef CONFIG_TILE_PCI_IO |
930 | /* | 928 | /* |
931 | * Alloc a PIO region for PCI I/O space access for each RC port. | 929 | * Alloc a PIO region for PCI I/O space access for each RC port. |
932 | */ | 930 | */ |
933 | ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); | 931 | ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); |
934 | if (ret < 0) { | 932 | if (ret < 0) { |
935 | pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, " | 933 | pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, " |
936 | "give up\n", controller->trio_index, | 934 | "give up\n", controller->trio_index, |
937 | controller->mac); | 935 | controller->mac); |
938 | 936 | ||
939 | continue; | 937 | continue; |
940 | } | 938 | } |
941 | 939 | ||
942 | controller->pio_io_index = ret; | 940 | controller->pio_io_index = ret; |
943 | 941 | ||
944 | /* | 942 | /* |
945 | * For PIO IO, the bus_address_hi parameter is hard-coded 0 | 943 | * For PIO IO, the bus_address_hi parameter is hard-coded 0 |
946 | * because PCI I/O address space is 32-bit. | 944 | * because PCI I/O address space is 32-bit. |
947 | */ | 945 | */ |
948 | ret = gxio_trio_init_pio_region_aux(trio_context, | 946 | ret = gxio_trio_init_pio_region_aux(trio_context, |
949 | controller->pio_io_index, | 947 | controller->pio_io_index, |
950 | controller->mac, | 948 | controller->mac, |
951 | 0, | 949 | 0, |
952 | HV_TRIO_PIO_FLAG_IO_SPACE); | 950 | HV_TRIO_PIO_FLAG_IO_SPACE); |
953 | if (ret < 0) { | 951 | if (ret < 0) { |
954 | pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, " | 952 | pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, " |
955 | "give up\n", controller->trio_index, | 953 | "give up\n", controller->trio_index, |
956 | controller->mac); | 954 | controller->mac); |
957 | 955 | ||
958 | continue; | 956 | continue; |
959 | } | 957 | } |
960 | #endif | 958 | #endif |
961 | 959 | ||
962 | /* | 960 | /* |
963 | * Configure a Mem-Map region for each memory controller so | 961 | * Configure a Mem-Map region for each memory controller so |
964 | * that Linux can map all of its PA space to the PCI bus. | 962 | * that Linux can map all of its PA space to the PCI bus. |
965 | * Use the IOMMU to handle hash-for-home memory. | 963 | * Use the IOMMU to handle hash-for-home memory. |
966 | */ | 964 | */ |
967 | for_each_online_node(j) { | 965 | for_each_online_node(j) { |
968 | unsigned long start_pfn = node_start_pfn[j]; | 966 | unsigned long start_pfn = node_start_pfn[j]; |
969 | unsigned long end_pfn = node_end_pfn[j]; | 967 | unsigned long end_pfn = node_end_pfn[j]; |
970 | unsigned long nr_pages = end_pfn - start_pfn; | 968 | unsigned long nr_pages = end_pfn - start_pfn; |
971 | 969 | ||
972 | ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0, | 970 | ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0, |
973 | 0); | 971 | 0); |
974 | if (ret < 0) { | 972 | if (ret < 0) { |
975 | pr_err("PCI: Mem-Map alloc failure on TRIO %d " | 973 | pr_err("PCI: Mem-Map alloc failure on TRIO %d " |
976 | "mac %d for MC %d, give up\n", | 974 | "mac %d for MC %d, give up\n", |
977 | controller->trio_index, | 975 | controller->trio_index, |
978 | controller->mac, j); | 976 | controller->mac, j); |
979 | 977 | ||
980 | goto alloc_mem_map_failed; | 978 | goto alloc_mem_map_failed; |
981 | } | 979 | } |
982 | 980 | ||
983 | controller->mem_maps[j] = ret; | 981 | controller->mem_maps[j] = ret; |
984 | 982 | ||
985 | /* | 983 | /* |
986 | * Initialize the Mem-Map and the I/O MMU so that all | 984 | * Initialize the Mem-Map and the I/O MMU so that all |
987 | * the physical memory can be accessed by the endpoint | 985 | * the physical memory can be accessed by the endpoint |
988 | * devices. The base bus address is set to the base CPA | 986 | * devices. The base bus address is set to the base CPA |
989 | * of this memory controller plus an offset (see pci.h). | 987 | * of this memory controller plus an offset (see pci.h). |
990 | * The region's base VA is set to the base CPA. The | 988 | * The region's base VA is set to the base CPA. The |
991 | * I/O MMU table essentially translates the CPA to | 989 | * I/O MMU table essentially translates the CPA to |
992 | * the real PA. Implicitly, for node 0, we create | 990 | * the real PA. Implicitly, for node 0, we create |
993 | * a separate Mem-Map region that serves as the inbound | 991 | * a separate Mem-Map region that serves as the inbound |
994 | * window for legacy 32-bit devices. This is a direct | 992 | * window for legacy 32-bit devices. This is a direct |
995 | * map of the low 4GB CPA space. | 993 | * map of the low 4GB CPA space. |
996 | */ | 994 | */ |
997 | ret = gxio_trio_init_memory_map_mmu_aux(trio_context, | 995 | ret = gxio_trio_init_memory_map_mmu_aux(trio_context, |
998 | controller->mem_maps[j], | 996 | controller->mem_maps[j], |
999 | start_pfn << PAGE_SHIFT, | 997 | start_pfn << PAGE_SHIFT, |
1000 | nr_pages << PAGE_SHIFT, | 998 | nr_pages << PAGE_SHIFT, |
1001 | trio_context->asid, | 999 | trio_context->asid, |
1002 | controller->mac, | 1000 | controller->mac, |
1003 | (start_pfn << PAGE_SHIFT) + | 1001 | (start_pfn << PAGE_SHIFT) + |
1004 | TILE_PCI_MEM_MAP_BASE_OFFSET, | 1002 | TILE_PCI_MEM_MAP_BASE_OFFSET, |
1005 | j, | 1003 | j, |
1006 | GXIO_TRIO_ORDER_MODE_UNORDERED); | 1004 | GXIO_TRIO_ORDER_MODE_UNORDERED); |
1007 | if (ret < 0) { | 1005 | if (ret < 0) { |
1008 | pr_err("PCI: Mem-Map init failure on TRIO %d " | 1006 | pr_err("PCI: Mem-Map init failure on TRIO %d " |
1009 | "mac %d for MC %d, give up\n", | 1007 | "mac %d for MC %d, give up\n", |
1010 | controller->trio_index, | 1008 | controller->trio_index, |
1011 | controller->mac, j); | 1009 | controller->mac, j); |
1012 | 1010 | ||
1013 | goto alloc_mem_map_failed; | 1011 | goto alloc_mem_map_failed; |
1014 | } | 1012 | } |
1015 | continue; | 1013 | continue; |
1016 | 1014 | ||
1017 | alloc_mem_map_failed: | 1015 | alloc_mem_map_failed: |
1018 | break; | 1016 | break; |
1019 | } | 1017 | } |
1020 | } | 1018 | } |
1021 | 1019 | ||
1022 | return 0; | 1020 | return 0; |
1023 | } | 1021 | } |
1024 | subsys_initcall(pcibios_init); | 1022 | subsys_initcall(pcibios_init); |
1025 | 1023 | ||
1026 | /* No bus fixups needed. */ | 1024 | /* No bus fixups needed. */ |
1027 | void pcibios_fixup_bus(struct pci_bus *bus) | 1025 | void pcibios_fixup_bus(struct pci_bus *bus) |
1028 | { | 1026 | { |
1029 | } | 1027 | } |
1030 | 1028 | ||
1031 | /* Process any "pci=" kernel boot arguments. */ | 1029 | /* Process any "pci=" kernel boot arguments. */ |
1032 | char *pcibios_setup(char *str) | 1030 | char *pcibios_setup(char *str) |
1033 | { | 1031 | { |
1034 | if (!strcmp(str, "off")) { | 1032 | if (!strcmp(str, "off")) { |
1035 | pci_probe = 0; | 1033 | pci_probe = 0; |
1036 | return NULL; | 1034 | return NULL; |
1037 | } | 1035 | } |
1038 | return str; | 1036 | return str; |
1039 | } | 1037 | } |
1040 | 1038 | ||
1041 | /* | 1039 | /* |
1042 | * Enable memory address decoding, as appropriate, for the | 1040 | * Enable memory address decoding, as appropriate, for the |
1043 | * device described by the 'dev' struct. | 1041 | * device described by the 'dev' struct. |
1044 | * | 1042 | * |
1045 | * This is called from the generic PCI layer, and can be called | 1043 | * This is called from the generic PCI layer, and can be called |
1046 | * for bridges or endpoints. | 1044 | * for bridges or endpoints. |
1047 | */ | 1045 | */ |
1048 | int pcibios_enable_device(struct pci_dev *dev, int mask) | 1046 | int pcibios_enable_device(struct pci_dev *dev, int mask) |
1049 | { | 1047 | { |
1050 | return pci_enable_resources(dev, mask); | 1048 | return pci_enable_resources(dev, mask); |
1051 | } | 1049 | } |
1052 | 1050 | ||
1053 | /* Called for each device after PCI setup is done. */ | 1051 | /* Called for each device after PCI setup is done. */ |
1054 | static void pcibios_fixup_final(struct pci_dev *pdev) | 1052 | static void pcibios_fixup_final(struct pci_dev *pdev) |
1055 | { | 1053 | { |
1056 | set_dma_ops(&pdev->dev, gx_pci_dma_map_ops); | 1054 | set_dma_ops(&pdev->dev, gx_pci_dma_map_ops); |
1057 | set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET); | 1055 | set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET); |
1058 | pdev->dev.archdata.max_direct_dma_addr = | 1056 | pdev->dev.archdata.max_direct_dma_addr = |
1059 | TILE_PCI_MAX_DIRECT_DMA_ADDRESS; | 1057 | TILE_PCI_MAX_DIRECT_DMA_ADDRESS; |
1060 | } | 1058 | } |
1061 | DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final); | 1059 | DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final); |
1062 | 1060 | ||
1063 | /* Map a PCI MMIO bus address into VA space. */ | 1061 | /* Map a PCI MMIO bus address into VA space. */ |
1064 | void __iomem *ioremap(resource_size_t phys_addr, unsigned long size) | 1062 | void __iomem *ioremap(resource_size_t phys_addr, unsigned long size) |
1065 | { | 1063 | { |
1066 | struct pci_controller *controller = NULL; | 1064 | struct pci_controller *controller = NULL; |
1067 | resource_size_t bar_start; | 1065 | resource_size_t bar_start; |
1068 | resource_size_t bar_end; | 1066 | resource_size_t bar_end; |
1069 | resource_size_t offset; | 1067 | resource_size_t offset; |
1070 | resource_size_t start; | 1068 | resource_size_t start; |
1071 | resource_size_t end; | 1069 | resource_size_t end; |
1072 | int trio_fd; | 1070 | int trio_fd; |
1073 | int i; | 1071 | int i; |
1074 | 1072 | ||
1075 | start = phys_addr; | 1073 | start = phys_addr; |
1076 | end = phys_addr + size - 1; | 1074 | end = phys_addr + size - 1; |
1077 | 1075 | ||
1078 | /* | 1076 | /* |
1079 | * By searching phys_addr in each controller's mem_space, we can | 1077 | * By searching phys_addr in each controller's mem_space, we can |
1080 | * determine the controller that should accept the PCI memory access. | 1078 | * determine the controller that should accept the PCI memory access. |
1081 | */ | 1079 | */ |
1082 | for (i = 0; i < num_rc_controllers; i++) { | 1080 | for (i = 0; i < num_rc_controllers; i++) { |
1083 | /* | 1081 | /* |
1084 | * Skip controllers that are not properly initialized or | 1082 | * Skip controllers that are not properly initialized or |
1085 | * have down links. | 1083 | * have down links. |
1086 | */ | 1084 | */ |
1087 | if (pci_controllers[i].root_bus == NULL) | 1085 | if (pci_controllers[i].root_bus == NULL) |
1088 | continue; | 1086 | continue; |
1089 | 1087 | ||
1090 | bar_start = pci_controllers[i].mem_space.start; | 1088 | bar_start = pci_controllers[i].mem_space.start; |
1091 | bar_end = pci_controllers[i].mem_space.end; | 1089 | bar_end = pci_controllers[i].mem_space.end; |
1092 | 1090 | ||
1093 | if ((start >= bar_start) && (end <= bar_end)) { | 1091 | if ((start >= bar_start) && (end <= bar_end)) { |
1094 | controller = &pci_controllers[i]; | 1092 | controller = &pci_controllers[i]; |
1095 | break; | 1093 | break; |
1096 | } | 1094 | } |
1097 | } | 1095 | } |
1098 | 1096 | ||
1099 | if (controller == NULL) | 1097 | if (controller == NULL) |
1100 | return NULL; | 1098 | return NULL; |
1101 | 1099 | ||
1102 | trio_fd = controller->trio->fd; | 1100 | trio_fd = controller->trio->fd; |
1103 | 1101 | ||
1104 | /* Convert the resource start to the bus address offset. */ | 1102 | /* Convert the resource start to the bus address offset. */ |
1105 | start = phys_addr - controller->mem_offset; | 1103 | start = phys_addr - controller->mem_offset; |
1106 | 1104 | ||
1107 | offset = HV_TRIO_PIO_OFFSET(controller->pio_mem_index) + start; | 1105 | offset = HV_TRIO_PIO_OFFSET(controller->pio_mem_index) + start; |
1108 | 1106 | ||
1109 | /* We need to keep the PCI bus address's in-page offset in the VA. */ | 1107 | /* We need to keep the PCI bus address's in-page offset in the VA. */ |
1110 | return iorpc_ioremap(trio_fd, offset, size) + | 1108 | return iorpc_ioremap(trio_fd, offset, size) + |
1111 | (start & (PAGE_SIZE - 1)); | 1109 | (start & (PAGE_SIZE - 1)); |
1112 | } | 1110 | } |
1113 | EXPORT_SYMBOL(ioremap); | 1111 | EXPORT_SYMBOL(ioremap); |
1114 | 1112 | ||
1115 | #ifdef CONFIG_TILE_PCI_IO | 1113 | #ifdef CONFIG_TILE_PCI_IO |
1116 | /* Map a PCI I/O address into VA space. */ | 1114 | /* Map a PCI I/O address into VA space. */ |
1117 | void __iomem *ioport_map(unsigned long port, unsigned int size) | 1115 | void __iomem *ioport_map(unsigned long port, unsigned int size) |
1118 | { | 1116 | { |
1119 | struct pci_controller *controller = NULL; | 1117 | struct pci_controller *controller = NULL; |
1120 | resource_size_t bar_start; | 1118 | resource_size_t bar_start; |
1121 | resource_size_t bar_end; | 1119 | resource_size_t bar_end; |
1122 | resource_size_t offset; | 1120 | resource_size_t offset; |
1123 | resource_size_t start; | 1121 | resource_size_t start; |
1124 | resource_size_t end; | 1122 | resource_size_t end; |
1125 | int trio_fd; | 1123 | int trio_fd; |
1126 | int i; | 1124 | int i; |
1127 | 1125 | ||
1128 | start = port; | 1126 | start = port; |
1129 | end = port + size - 1; | 1127 | end = port + size - 1; |
1130 | 1128 | ||
1131 | /* | 1129 | /* |
1132 | * By searching the port in each controller's io_space, we can | 1130 | * By searching the port in each controller's io_space, we can |
1133 | * determine the controller that should accept the PCI I/O access. | 1131 | * determine the controller that should accept the PCI I/O access. |
1134 | */ | 1132 | */ |
1135 | for (i = 0; i < num_rc_controllers; i++) { | 1133 | for (i = 0; i < num_rc_controllers; i++) { |
1136 | /* | 1134 | /* |
1137 | * Skip controllers that are not properly initialized or | 1135 | * Skip controllers that are not properly initialized or |
1138 | * have down links. | 1136 | * have down links. |
1139 | */ | 1137 | */ |
1140 | if (pci_controllers[i].root_bus == NULL) | 1138 | if (pci_controllers[i].root_bus == NULL) |
1141 | continue; | 1139 | continue; |
1142 | 1140 | ||
1143 | bar_start = pci_controllers[i].io_space.start; | 1141 | bar_start = pci_controllers[i].io_space.start; |
1144 | bar_end = pci_controllers[i].io_space.end; | 1142 | bar_end = pci_controllers[i].io_space.end; |
1145 | 1143 | ||
1146 | if ((start >= bar_start) && (end <= bar_end)) { | 1144 | if ((start >= bar_start) && (end <= bar_end)) { |
1147 | controller = &pci_controllers[i]; | 1145 | controller = &pci_controllers[i]; |
1148 | break; | 1146 | break; |
1149 | } | 1147 | } |
1150 | } | 1148 | } |
1151 | 1149 | ||
1152 | if (controller == NULL) | 1150 | if (controller == NULL) |
1153 | return NULL; | 1151 | return NULL; |
1154 | 1152 | ||
1155 | trio_fd = controller->trio->fd; | 1153 | trio_fd = controller->trio->fd; |
1156 | 1154 | ||
1157 | /* Convert the resource start to the bus address offset. */ | 1155 | /* Convert the resource start to the bus address offset. */ |
1158 | port -= controller->io_space.start; | 1156 | port -= controller->io_space.start; |
1159 | 1157 | ||
1160 | offset = HV_TRIO_PIO_OFFSET(controller->pio_io_index) + port; | 1158 | offset = HV_TRIO_PIO_OFFSET(controller->pio_io_index) + port; |
1161 | 1159 | ||
1162 | /* We need to keep the PCI bus address's in-page offset in the VA. */ | 1160 | /* We need to keep the PCI bus address's in-page offset in the VA. */ |
1163 | return iorpc_ioremap(trio_fd, offset, size) + (port & (PAGE_SIZE - 1)); | 1161 | return iorpc_ioremap(trio_fd, offset, size) + (port & (PAGE_SIZE - 1)); |
1164 | } | 1162 | } |
1165 | EXPORT_SYMBOL(ioport_map); | 1163 | EXPORT_SYMBOL(ioport_map); |
1166 | 1164 | ||
1167 | void ioport_unmap(void __iomem *addr) | 1165 | void ioport_unmap(void __iomem *addr) |
1168 | { | 1166 | { |
1169 | iounmap(addr); | 1167 | iounmap(addr); |
1170 | } | 1168 | } |
1171 | EXPORT_SYMBOL(ioport_unmap); | 1169 | EXPORT_SYMBOL(ioport_unmap); |
1172 | #endif | 1170 | #endif |
1173 | 1171 | ||
1174 | void pci_iounmap(struct pci_dev *dev, void __iomem *addr) | 1172 | void pci_iounmap(struct pci_dev *dev, void __iomem *addr) |
1175 | { | 1173 | { |
1176 | iounmap(addr); | 1174 | iounmap(addr); |
1177 | } | 1175 | } |
1178 | EXPORT_SYMBOL(pci_iounmap); | 1176 | EXPORT_SYMBOL(pci_iounmap); |
1179 | 1177 | ||
1180 | /**************************************************************** | 1178 | /**************************************************************** |
1181 | * | 1179 | * |
1182 | * Tile PCI config space read/write routines | 1180 | * Tile PCI config space read/write routines |
1183 | * | 1181 | * |
1184 | ****************************************************************/ | 1182 | ****************************************************************/ |
1185 | 1183 | ||
1186 | /* | 1184 | /* |
1187 | * These are the normal read and write ops | 1185 | * These are the normal read and write ops |
1188 | * These are expanded with macros from pci_bus_read_config_byte() etc. | 1186 | * These are expanded with macros from pci_bus_read_config_byte() etc. |
1189 | * | 1187 | * |
1190 | * devfn is the combined PCI device & function. | 1188 | * devfn is the combined PCI device & function. |
1191 | * | 1189 | * |
1192 | * offset is in bytes, from the start of config space for the | 1190 | * offset is in bytes, from the start of config space for the |
1193 | * specified bus & device. | 1191 | * specified bus & device. |
1194 | */ | 1192 | */ |
1195 | static int tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset, | 1193 | static int tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset, |
1196 | int size, u32 *val) | 1194 | int size, u32 *val) |
1197 | { | 1195 | { |
1198 | struct pci_controller *controller = bus->sysdata; | 1196 | struct pci_controller *controller = bus->sysdata; |
1199 | gxio_trio_context_t *trio_context = controller->trio; | 1197 | gxio_trio_context_t *trio_context = controller->trio; |
1200 | int busnum = bus->number & 0xff; | 1198 | int busnum = bus->number & 0xff; |
1201 | int device = PCI_SLOT(devfn); | 1199 | int device = PCI_SLOT(devfn); |
1202 | int function = PCI_FUNC(devfn); | 1200 | int function = PCI_FUNC(devfn); |
1203 | int config_type = 1; | 1201 | int config_type = 1; |
1204 | TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr; | 1202 | TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr; |
1205 | void *mmio_addr; | 1203 | void *mmio_addr; |
1206 | 1204 | ||
1207 | /* | 1205 | /* |
1208 | * Map all accesses to the local device on root bus into the | 1206 | * Map all accesses to the local device on root bus into the |
1209 | * MMIO space of the MAC. Accesses to the downstream devices | 1207 | * MMIO space of the MAC. Accesses to the downstream devices |
1210 | * go to the PIO space. | 1208 | * go to the PIO space. |
1211 | */ | 1209 | */ |
1212 | if (pci_is_root_bus(bus)) { | 1210 | if (pci_is_root_bus(bus)) { |
1213 | if (device == 0) { | 1211 | if (device == 0) { |
1214 | /* | 1212 | /* |
1215 | * This is the internal downstream P2P bridge, | 1213 | * This is the internal downstream P2P bridge, |
1216 | * access directly. | 1214 | * access directly. |
1217 | */ | 1215 | */ |
1218 | unsigned int reg_offset; | 1216 | unsigned int reg_offset; |
1219 | 1217 | ||
1220 | reg_offset = ((offset & 0xFFF) << | 1218 | reg_offset = ((offset & 0xFFF) << |
1221 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | 1219 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | |
1222 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED | 1220 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED |
1223 | << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | 1221 | << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | |
1224 | (controller->mac << | 1222 | (controller->mac << |
1225 | TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | 1223 | TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); |
1226 | 1224 | ||
1227 | mmio_addr = trio_context->mmio_base_mac + reg_offset; | 1225 | mmio_addr = trio_context->mmio_base_mac + reg_offset; |
1228 | 1226 | ||
1229 | goto valid_device; | 1227 | goto valid_device; |
1230 | 1228 | ||
1231 | } else { | 1229 | } else { |
1232 | /* | 1230 | /* |
1233 | * We fake an empty device for (device > 0), | 1231 | * We fake an empty device for (device > 0), |
1234 | * since there is only one device on bus 0. | 1232 | * since there is only one device on bus 0. |
1235 | */ | 1233 | */ |
1236 | goto invalid_device; | 1234 | goto invalid_device; |
1237 | } | 1235 | } |
1238 | } | 1236 | } |
1239 | 1237 | ||
1240 | /* | 1238 | /* |
1241 | * Accesses to the directly attached device have to be | 1239 | * Accesses to the directly attached device have to be |
1242 | * sent as type-0 configs. | 1240 | * sent as type-0 configs. |
1243 | */ | 1241 | */ |
1244 | if (busnum == (controller->first_busno + 1)) { | 1242 | if (busnum == (controller->first_busno + 1)) { |
1245 | /* | 1243 | /* |
1246 | * There is only one device off of our built-in P2P bridge. | 1244 | * There is only one device off of our built-in P2P bridge. |
1247 | */ | 1245 | */ |
1248 | if (device != 0) | 1246 | if (device != 0) |
1249 | goto invalid_device; | 1247 | goto invalid_device; |
1250 | 1248 | ||
1251 | config_type = 0; | 1249 | config_type = 0; |
1252 | } | 1250 | } |
1253 | 1251 | ||
1254 | cfg_addr.word = 0; | 1252 | cfg_addr.word = 0; |
1255 | cfg_addr.reg_addr = (offset & 0xFFF); | 1253 | cfg_addr.reg_addr = (offset & 0xFFF); |
1256 | cfg_addr.fn = function; | 1254 | cfg_addr.fn = function; |
1257 | cfg_addr.dev = device; | 1255 | cfg_addr.dev = device; |
1258 | cfg_addr.bus = busnum; | 1256 | cfg_addr.bus = busnum; |
1259 | cfg_addr.type = config_type; | 1257 | cfg_addr.type = config_type; |
1260 | 1258 | ||
1261 | /* | 1259 | /* |
1262 | * Note that we don't set the mac field in cfg_addr because the | 1260 | * Note that we don't set the mac field in cfg_addr because the |
1263 | * mapping is per port. | 1261 | * mapping is per port. |
1264 | */ | 1262 | */ |
1265 | mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] + | 1263 | mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] + |
1266 | cfg_addr.word; | 1264 | cfg_addr.word; |
1267 | 1265 | ||
1268 | valid_device: | 1266 | valid_device: |
1269 | 1267 | ||
1270 | switch (size) { | 1268 | switch (size) { |
1271 | case 4: | 1269 | case 4: |
1272 | *val = __gxio_mmio_read32(mmio_addr); | 1270 | *val = __gxio_mmio_read32(mmio_addr); |
1273 | break; | 1271 | break; |
1274 | 1272 | ||
1275 | case 2: | 1273 | case 2: |
1276 | *val = __gxio_mmio_read16(mmio_addr); | 1274 | *val = __gxio_mmio_read16(mmio_addr); |
1277 | break; | 1275 | break; |
1278 | 1276 | ||
1279 | case 1: | 1277 | case 1: |
1280 | *val = __gxio_mmio_read8(mmio_addr); | 1278 | *val = __gxio_mmio_read8(mmio_addr); |
1281 | break; | 1279 | break; |
1282 | 1280 | ||
1283 | default: | 1281 | default: |
1284 | return PCIBIOS_FUNC_NOT_SUPPORTED; | 1282 | return PCIBIOS_FUNC_NOT_SUPPORTED; |
1285 | } | 1283 | } |
1286 | 1284 | ||
1287 | TRACE_CFG_RD(size, *val, busnum, device, function, offset); | 1285 | TRACE_CFG_RD(size, *val, busnum, device, function, offset); |
1288 | 1286 | ||
1289 | return 0; | 1287 | return 0; |
1290 | 1288 | ||
1291 | invalid_device: | 1289 | invalid_device: |
1292 | 1290 | ||
1293 | switch (size) { | 1291 | switch (size) { |
1294 | case 4: | 1292 | case 4: |
1295 | *val = 0xFFFFFFFF; | 1293 | *val = 0xFFFFFFFF; |
1296 | break; | 1294 | break; |
1297 | 1295 | ||
1298 | case 2: | 1296 | case 2: |
1299 | *val = 0xFFFF; | 1297 | *val = 0xFFFF; |
1300 | break; | 1298 | break; |
1301 | 1299 | ||
1302 | case 1: | 1300 | case 1: |
1303 | *val = 0xFF; | 1301 | *val = 0xFF; |
1304 | break; | 1302 | break; |
1305 | 1303 | ||
1306 | default: | 1304 | default: |
1307 | return PCIBIOS_FUNC_NOT_SUPPORTED; | 1305 | return PCIBIOS_FUNC_NOT_SUPPORTED; |
1308 | } | 1306 | } |
1309 | 1307 | ||
1310 | return 0; | 1308 | return 0; |
1311 | } | 1309 | } |
1312 | 1310 | ||
1313 | 1311 | ||
1314 | /* | 1312 | /* |
1315 | * See tile_cfg_read() for relevent comments. | 1313 | * See tile_cfg_read() for relevent comments. |
1316 | * Note that "val" is the value to write, not a pointer to that value. | 1314 | * Note that "val" is the value to write, not a pointer to that value. |
1317 | */ | 1315 | */ |
1318 | static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset, | 1316 | static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset, |
1319 | int size, u32 val) | 1317 | int size, u32 val) |
1320 | { | 1318 | { |
1321 | struct pci_controller *controller = bus->sysdata; | 1319 | struct pci_controller *controller = bus->sysdata; |
1322 | gxio_trio_context_t *trio_context = controller->trio; | 1320 | gxio_trio_context_t *trio_context = controller->trio; |
1323 | int busnum = bus->number & 0xff; | 1321 | int busnum = bus->number & 0xff; |
1324 | int device = PCI_SLOT(devfn); | 1322 | int device = PCI_SLOT(devfn); |
1325 | int function = PCI_FUNC(devfn); | 1323 | int function = PCI_FUNC(devfn); |
1326 | int config_type = 1; | 1324 | int config_type = 1; |
1327 | TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr; | 1325 | TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr; |
1328 | void *mmio_addr; | 1326 | void *mmio_addr; |
1329 | u32 val_32 = (u32)val; | 1327 | u32 val_32 = (u32)val; |
1330 | u16 val_16 = (u16)val; | 1328 | u16 val_16 = (u16)val; |
1331 | u8 val_8 = (u8)val; | 1329 | u8 val_8 = (u8)val; |
1332 | 1330 | ||
1333 | /* | 1331 | /* |
1334 | * Map all accesses to the local device on root bus into the | 1332 | * Map all accesses to the local device on root bus into the |
1335 | * MMIO space of the MAC. Accesses to the downstream devices | 1333 | * MMIO space of the MAC. Accesses to the downstream devices |
1336 | * go to the PIO space. | 1334 | * go to the PIO space. |
1337 | */ | 1335 | */ |
1338 | if (pci_is_root_bus(bus)) { | 1336 | if (pci_is_root_bus(bus)) { |
1339 | if (device == 0) { | 1337 | if (device == 0) { |
1340 | /* | 1338 | /* |
1341 | * This is the internal downstream P2P bridge, | 1339 | * This is the internal downstream P2P bridge, |
1342 | * access directly. | 1340 | * access directly. |
1343 | */ | 1341 | */ |
1344 | unsigned int reg_offset; | 1342 | unsigned int reg_offset; |
1345 | 1343 | ||
1346 | reg_offset = ((offset & 0xFFF) << | 1344 | reg_offset = ((offset & 0xFFF) << |
1347 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | | 1345 | TRIO_CFG_REGION_ADDR__REG_SHIFT) | |
1348 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED | 1346 | (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED |
1349 | << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | | 1347 | << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | |
1350 | (controller->mac << | 1348 | (controller->mac << |
1351 | TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); | 1349 | TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); |
1352 | 1350 | ||
1353 | mmio_addr = trio_context->mmio_base_mac + reg_offset; | 1351 | mmio_addr = trio_context->mmio_base_mac + reg_offset; |
1354 | 1352 | ||
1355 | goto valid_device; | 1353 | goto valid_device; |
1356 | 1354 | ||
1357 | } else { | 1355 | } else { |
1358 | /* | 1356 | /* |
1359 | * We fake an empty device for (device > 0), | 1357 | * We fake an empty device for (device > 0), |
1360 | * since there is only one device on bus 0. | 1358 | * since there is only one device on bus 0. |
1361 | */ | 1359 | */ |
1362 | goto invalid_device; | 1360 | goto invalid_device; |
1363 | } | 1361 | } |
1364 | } | 1362 | } |
1365 | 1363 | ||
1366 | /* | 1364 | /* |
1367 | * Accesses to the directly attached device have to be | 1365 | * Accesses to the directly attached device have to be |
1368 | * sent as type-0 configs. | 1366 | * sent as type-0 configs. |
1369 | */ | 1367 | */ |
1370 | if (busnum == (controller->first_busno + 1)) { | 1368 | if (busnum == (controller->first_busno + 1)) { |
1371 | /* | 1369 | /* |
1372 | * There is only one device off of our built-in P2P bridge. | 1370 | * There is only one device off of our built-in P2P bridge. |
1373 | */ | 1371 | */ |
1374 | if (device != 0) | 1372 | if (device != 0) |
1375 | goto invalid_device; | 1373 | goto invalid_device; |
1376 | 1374 | ||
1377 | config_type = 0; | 1375 | config_type = 0; |
1378 | } | 1376 | } |
1379 | 1377 | ||
1380 | cfg_addr.word = 0; | 1378 | cfg_addr.word = 0; |
1381 | cfg_addr.reg_addr = (offset & 0xFFF); | 1379 | cfg_addr.reg_addr = (offset & 0xFFF); |
1382 | cfg_addr.fn = function; | 1380 | cfg_addr.fn = function; |
1383 | cfg_addr.dev = device; | 1381 | cfg_addr.dev = device; |
1384 | cfg_addr.bus = busnum; | 1382 | cfg_addr.bus = busnum; |
1385 | cfg_addr.type = config_type; | 1383 | cfg_addr.type = config_type; |
1386 | 1384 | ||
1387 | /* | 1385 | /* |
1388 | * Note that we don't set the mac field in cfg_addr because the | 1386 | * Note that we don't set the mac field in cfg_addr because the |
1389 | * mapping is per port. | 1387 | * mapping is per port. |
1390 | */ | 1388 | */ |
1391 | mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] + | 1389 | mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] + |
1392 | cfg_addr.word; | 1390 | cfg_addr.word; |
1393 | 1391 | ||
1394 | valid_device: | 1392 | valid_device: |
1395 | 1393 | ||
1396 | switch (size) { | 1394 | switch (size) { |
1397 | case 4: | 1395 | case 4: |
1398 | __gxio_mmio_write32(mmio_addr, val_32); | 1396 | __gxio_mmio_write32(mmio_addr, val_32); |
1399 | TRACE_CFG_WR(size, val_32, busnum, device, function, offset); | 1397 | TRACE_CFG_WR(size, val_32, busnum, device, function, offset); |
1400 | break; | 1398 | break; |
1401 | 1399 | ||
1402 | case 2: | 1400 | case 2: |
1403 | __gxio_mmio_write16(mmio_addr, val_16); | 1401 | __gxio_mmio_write16(mmio_addr, val_16); |
1404 | TRACE_CFG_WR(size, val_16, busnum, device, function, offset); | 1402 | TRACE_CFG_WR(size, val_16, busnum, device, function, offset); |
1405 | break; | 1403 | break; |
1406 | 1404 | ||
1407 | case 1: | 1405 | case 1: |
1408 | __gxio_mmio_write8(mmio_addr, val_8); | 1406 | __gxio_mmio_write8(mmio_addr, val_8); |
1409 | TRACE_CFG_WR(size, val_8, busnum, device, function, offset); | 1407 | TRACE_CFG_WR(size, val_8, busnum, device, function, offset); |
1410 | break; | 1408 | break; |
1411 | 1409 | ||
1412 | default: | 1410 | default: |
1413 | return PCIBIOS_FUNC_NOT_SUPPORTED; | 1411 | return PCIBIOS_FUNC_NOT_SUPPORTED; |
1414 | } | 1412 | } |
1415 | 1413 | ||
1416 | invalid_device: | 1414 | invalid_device: |
1417 | 1415 | ||
1418 | return 0; | 1416 | return 0; |
1419 | } | 1417 | } |
1420 | 1418 | ||
1421 | 1419 | ||
1422 | static struct pci_ops tile_cfg_ops = { | 1420 | static struct pci_ops tile_cfg_ops = { |
1423 | .read = tile_cfg_read, | 1421 | .read = tile_cfg_read, |
1424 | .write = tile_cfg_write, | 1422 | .write = tile_cfg_write, |
1425 | }; | 1423 | }; |
1426 | 1424 | ||
1427 | 1425 | ||
1428 | /* MSI support starts here. */ | 1426 | /* MSI support starts here. */ |
1429 | static unsigned int tilegx_msi_startup(struct irq_data *d) | 1427 | static unsigned int tilegx_msi_startup(struct irq_data *d) |
1430 | { | 1428 | { |
1431 | if (d->msi_desc) | 1429 | if (d->msi_desc) |
1432 | unmask_msi_irq(d); | 1430 | unmask_msi_irq(d); |
1433 | 1431 | ||
1434 | return 0; | 1432 | return 0; |
1435 | } | 1433 | } |
1436 | 1434 | ||
1437 | static void tilegx_msi_ack(struct irq_data *d) | 1435 | static void tilegx_msi_ack(struct irq_data *d) |
1438 | { | 1436 | { |
1439 | __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq); | 1437 | __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq); |
1440 | } | 1438 | } |
1441 | 1439 | ||
1442 | static void tilegx_msi_mask(struct irq_data *d) | 1440 | static void tilegx_msi_mask(struct irq_data *d) |
1443 | { | 1441 | { |
1444 | mask_msi_irq(d); | 1442 | mask_msi_irq(d); |
1445 | __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq); | 1443 | __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq); |
1446 | } | 1444 | } |
1447 | 1445 | ||
1448 | static void tilegx_msi_unmask(struct irq_data *d) | 1446 | static void tilegx_msi_unmask(struct irq_data *d) |
1449 | { | 1447 | { |
1450 | __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq); | 1448 | __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq); |
1451 | unmask_msi_irq(d); | 1449 | unmask_msi_irq(d); |
1452 | } | 1450 | } |
1453 | 1451 | ||
1454 | static struct irq_chip tilegx_msi_chip = { | 1452 | static struct irq_chip tilegx_msi_chip = { |
1455 | .name = "tilegx_msi", | 1453 | .name = "tilegx_msi", |
1456 | .irq_startup = tilegx_msi_startup, | 1454 | .irq_startup = tilegx_msi_startup, |
1457 | .irq_ack = tilegx_msi_ack, | 1455 | .irq_ack = tilegx_msi_ack, |
1458 | .irq_mask = tilegx_msi_mask, | 1456 | .irq_mask = tilegx_msi_mask, |
1459 | .irq_unmask = tilegx_msi_unmask, | 1457 | .irq_unmask = tilegx_msi_unmask, |
1460 | 1458 | ||
1461 | /* TBD: support set_affinity. */ | 1459 | /* TBD: support set_affinity. */ |
1462 | }; | 1460 | }; |
1463 | 1461 | ||
1464 | int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) | 1462 | int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) |
1465 | { | 1463 | { |
1466 | struct pci_controller *controller; | 1464 | struct pci_controller *controller; |
1467 | gxio_trio_context_t *trio_context; | 1465 | gxio_trio_context_t *trio_context; |
1468 | struct msi_msg msg; | 1466 | struct msi_msg msg; |
1469 | int default_irq; | 1467 | int default_irq; |
1470 | uint64_t mem_map_base; | 1468 | uint64_t mem_map_base; |
1471 | uint64_t mem_map_limit; | 1469 | uint64_t mem_map_limit; |
1472 | u64 msi_addr; | 1470 | u64 msi_addr; |
1473 | int mem_map; | 1471 | int mem_map; |
1474 | int cpu; | 1472 | int cpu; |
1475 | int irq; | 1473 | int irq; |
1476 | int ret; | 1474 | int ret; |
1477 | 1475 | ||
1478 | irq = create_irq(); | 1476 | irq = create_irq(); |
1479 | if (irq < 0) | 1477 | if (irq < 0) |
1480 | return irq; | 1478 | return irq; |
1481 | 1479 | ||
1482 | /* | 1480 | /* |
1483 | * Since we use a 64-bit Mem-Map to accept the MSI write, we fail | 1481 | * Since we use a 64-bit Mem-Map to accept the MSI write, we fail |
1484 | * devices that are not capable of generating a 64-bit message address. | 1482 | * devices that are not capable of generating a 64-bit message address. |
1485 | * These devices will fall back to using the legacy interrupts. | 1483 | * These devices will fall back to using the legacy interrupts. |
1486 | * Most PCIe endpoint devices do support 64-bit message addressing. | 1484 | * Most PCIe endpoint devices do support 64-bit message addressing. |
1487 | */ | 1485 | */ |
1488 | if (desc->msi_attrib.is_64 == 0) { | 1486 | if (desc->msi_attrib.is_64 == 0) { |
1489 | dev_printk(KERN_INFO, &pdev->dev, | 1487 | dev_printk(KERN_INFO, &pdev->dev, |
1490 | "64-bit MSI message address not supported, " | 1488 | "64-bit MSI message address not supported, " |
1491 | "falling back to legacy interrupts.\n"); | 1489 | "falling back to legacy interrupts.\n"); |
1492 | 1490 | ||
1493 | ret = -ENOMEM; | 1491 | ret = -ENOMEM; |
1494 | goto is_64_failure; | 1492 | goto is_64_failure; |
1495 | } | 1493 | } |
1496 | 1494 | ||
1497 | default_irq = desc->msi_attrib.default_irq; | 1495 | default_irq = desc->msi_attrib.default_irq; |
1498 | controller = irq_get_handler_data(default_irq); | 1496 | controller = irq_get_handler_data(default_irq); |
1499 | 1497 | ||
1500 | BUG_ON(!controller); | 1498 | BUG_ON(!controller); |
1501 | 1499 | ||
1502 | trio_context = controller->trio; | 1500 | trio_context = controller->trio; |
1503 | 1501 | ||
1504 | /* | 1502 | /* |
1505 | * Allocate a scatter-queue that will accept the MSI write and | 1503 | * Allocate a scatter-queue that will accept the MSI write and |
1506 | * trigger the TILE-side interrupts. We use the scatter-queue regions | 1504 | * trigger the TILE-side interrupts. We use the scatter-queue regions |
1507 | * before the mem map regions, because the latter are needed by more | 1505 | * before the mem map regions, because the latter are needed by more |
1508 | * applications. | 1506 | * applications. |
1509 | */ | 1507 | */ |
1510 | mem_map = gxio_trio_alloc_scatter_queues(trio_context, 1, 0, 0); | 1508 | mem_map = gxio_trio_alloc_scatter_queues(trio_context, 1, 0, 0); |
1511 | if (mem_map >= 0) { | 1509 | if (mem_map >= 0) { |
1512 | TRIO_MAP_SQ_DOORBELL_FMT_t doorbell_template = {{ | 1510 | TRIO_MAP_SQ_DOORBELL_FMT_t doorbell_template = {{ |
1513 | .pop = 0, | 1511 | .pop = 0, |
1514 | .doorbell = 1, | 1512 | .doorbell = 1, |
1515 | }}; | 1513 | }}; |
1516 | 1514 | ||
1517 | mem_map += TRIO_NUM_MAP_MEM_REGIONS; | 1515 | mem_map += TRIO_NUM_MAP_MEM_REGIONS; |
1518 | mem_map_base = MEM_MAP_INTR_REGIONS_BASE + | 1516 | mem_map_base = MEM_MAP_INTR_REGIONS_BASE + |
1519 | mem_map * MEM_MAP_INTR_REGION_SIZE; | 1517 | mem_map * MEM_MAP_INTR_REGION_SIZE; |
1520 | mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1; | 1518 | mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1; |
1521 | 1519 | ||
1522 | msi_addr = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 8; | 1520 | msi_addr = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 8; |
1523 | msg.data = (unsigned int)doorbell_template.word; | 1521 | msg.data = (unsigned int)doorbell_template.word; |
1524 | } else { | 1522 | } else { |
1525 | /* SQ regions are out, allocate from map mem regions. */ | 1523 | /* SQ regions are out, allocate from map mem regions. */ |
1526 | mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0); | 1524 | mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0); |
1527 | if (mem_map < 0) { | 1525 | if (mem_map < 0) { |
1528 | dev_printk(KERN_INFO, &pdev->dev, | 1526 | dev_printk(KERN_INFO, &pdev->dev, |
1529 | "%s Mem-Map alloc failure. " | 1527 | "%s Mem-Map alloc failure. " |
1530 | "Failed to initialize MSI interrupts. " | 1528 | "Failed to initialize MSI interrupts. " |
1531 | "Falling back to legacy interrupts.\n", | 1529 | "Falling back to legacy interrupts.\n", |
1532 | desc->msi_attrib.is_msix ? "MSI-X" : "MSI"); | 1530 | desc->msi_attrib.is_msix ? "MSI-X" : "MSI"); |
1533 | ret = -ENOMEM; | 1531 | ret = -ENOMEM; |
1534 | goto msi_mem_map_alloc_failure; | 1532 | goto msi_mem_map_alloc_failure; |
1535 | } | 1533 | } |
1536 | 1534 | ||
1537 | mem_map_base = MEM_MAP_INTR_REGIONS_BASE + | 1535 | mem_map_base = MEM_MAP_INTR_REGIONS_BASE + |
1538 | mem_map * MEM_MAP_INTR_REGION_SIZE; | 1536 | mem_map * MEM_MAP_INTR_REGION_SIZE; |
1539 | mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1; | 1537 | mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1; |
1540 | 1538 | ||
1541 | msi_addr = mem_map_base + TRIO_MAP_MEM_REG_INT3 - | 1539 | msi_addr = mem_map_base + TRIO_MAP_MEM_REG_INT3 - |
1542 | TRIO_MAP_MEM_REG_INT0; | 1540 | TRIO_MAP_MEM_REG_INT0; |
1543 | 1541 | ||
1544 | msg.data = mem_map; | 1542 | msg.data = mem_map; |
1545 | } | 1543 | } |
1546 | 1544 | ||
1547 | /* We try to distribute different IRQs to different tiles. */ | 1545 | /* We try to distribute different IRQs to different tiles. */ |
1548 | cpu = tile_irq_cpu(irq); | 1546 | cpu = tile_irq_cpu(irq); |
1549 | 1547 | ||
1550 | /* | 1548 | /* |
1551 | * Now call up to the HV to configure the MSI interrupt and | 1549 | * Now call up to the HV to configure the MSI interrupt and |
1552 | * set up the IPI binding. | 1550 | * set up the IPI binding. |
1553 | */ | 1551 | */ |
1554 | ret = gxio_trio_config_msi_intr(trio_context, cpu_x(cpu), cpu_y(cpu), | 1552 | ret = gxio_trio_config_msi_intr(trio_context, cpu_x(cpu), cpu_y(cpu), |
1555 | KERNEL_PL, irq, controller->mac, | 1553 | KERNEL_PL, irq, controller->mac, |
1556 | mem_map, mem_map_base, mem_map_limit, | 1554 | mem_map, mem_map_base, mem_map_limit, |
1557 | trio_context->asid); | 1555 | trio_context->asid); |
1558 | if (ret < 0) { | 1556 | if (ret < 0) { |
1559 | dev_printk(KERN_INFO, &pdev->dev, "HV MSI config failed.\n"); | 1557 | dev_printk(KERN_INFO, &pdev->dev, "HV MSI config failed.\n"); |
1560 | 1558 | ||
1561 | goto hv_msi_config_failure; | 1559 | goto hv_msi_config_failure; |
1562 | } | 1560 | } |
1563 | 1561 | ||
1564 | irq_set_msi_desc(irq, desc); | 1562 | irq_set_msi_desc(irq, desc); |
1565 | 1563 | ||
1566 | msg.address_hi = msi_addr >> 32; | 1564 | msg.address_hi = msi_addr >> 32; |
1567 | msg.address_lo = msi_addr & 0xffffffff; | 1565 | msg.address_lo = msi_addr & 0xffffffff; |
1568 | 1566 | ||
1569 | write_msi_msg(irq, &msg); | 1567 | write_msi_msg(irq, &msg); |
1570 | irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq); | 1568 | irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq); |
1571 | irq_set_handler_data(irq, controller); | 1569 | irq_set_handler_data(irq, controller); |
1572 | 1570 | ||
1573 | return 0; | 1571 | return 0; |
1574 | 1572 | ||
1575 | hv_msi_config_failure: | 1573 | hv_msi_config_failure: |
1576 | /* Free mem-map */ | 1574 | /* Free mem-map */ |
1577 | msi_mem_map_alloc_failure: | 1575 | msi_mem_map_alloc_failure: |
1578 | is_64_failure: | 1576 | is_64_failure: |
1579 | destroy_irq(irq); | 1577 | destroy_irq(irq); |
1580 | return ret; | 1578 | return ret; |
1581 | } | 1579 | } |
1582 | 1580 | ||
1583 | void arch_teardown_msi_irq(unsigned int irq) | 1581 | void arch_teardown_msi_irq(unsigned int irq) |
1584 | { | 1582 | { |
1585 | destroy_irq(irq); | 1583 | destroy_irq(irq); |
1586 | } | 1584 | } |