Commit 9bd7ea60b1a0cf9fc745fd2eadb261e4c7719acd
Committed by
Paul Mackerras
1 parent
ee2cdecec4
Exists in
master
and in
7 other branches
[PATCH] powerpc: clean up iommu.h a bit
There was a function declared for CONFIG_PSERIES which no longer exists and the two function declarations for CONFIG_ISERIES have been moved into an include file in platforms/iseries since they are defined and used only there. Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Showing 5 changed files with 40 additions and 19 deletions Inline Diff
arch/powerpc/platforms/iseries/iommu.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation | 2 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation |
3 | * | 3 | * |
4 | * Rewrite, cleanup: | 4 | * Rewrite, cleanup: |
5 | * | 5 | * |
6 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation | 6 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation |
7 | * | 7 | * |
8 | * Dynamic DMA mapping support, iSeries-specific parts. | 8 | * Dynamic DMA mapping support, iSeries-specific parts. |
9 | * | 9 | * |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
13 | * the Free Software Foundation; either version 2 of the License, or | 13 | * the Free Software Foundation; either version 2 of the License, or |
14 | * (at your option) any later version. | 14 | * (at your option) any later version. |
15 | * | 15 | * |
16 | * This program is distributed in the hope that it will be useful, | 16 | * This program is distributed in the hope that it will be useful, |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
19 | * GNU General Public License for more details. | 19 | * GNU General Public License for more details. |
20 | * | 20 | * |
21 | * You should have received a copy of the GNU General Public License | 21 | * You should have received a copy of the GNU General Public License |
22 | * along with this program; if not, write to the Free Software | 22 | * along with this program; if not, write to the Free Software |
23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <linux/dma-mapping.h> | 27 | #include <linux/dma-mapping.h> |
28 | #include <linux/list.h> | 28 | #include <linux/list.h> |
29 | 29 | ||
30 | #include <asm/iommu.h> | 30 | #include <asm/iommu.h> |
31 | #include <asm/tce.h> | 31 | #include <asm/tce.h> |
32 | #include <asm/machdep.h> | 32 | #include <asm/machdep.h> |
33 | #include <asm/abs_addr.h> | 33 | #include <asm/abs_addr.h> |
34 | #include <asm/pci-bridge.h> | 34 | #include <asm/pci-bridge.h> |
35 | #include <asm/iseries/hv_call_xm.h> | 35 | #include <asm/iseries/hv_call_xm.h> |
36 | 36 | ||
37 | #include "iommu.h" | ||
38 | |||
37 | extern struct list_head iSeries_Global_Device_List; | 39 | extern struct list_head iSeries_Global_Device_List; |
38 | 40 | ||
39 | 41 | ||
40 | static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages, | 42 | static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages, |
41 | unsigned long uaddr, enum dma_data_direction direction) | 43 | unsigned long uaddr, enum dma_data_direction direction) |
42 | { | 44 | { |
43 | u64 rc; | 45 | u64 rc; |
44 | union tce_entry tce; | 46 | union tce_entry tce; |
45 | 47 | ||
46 | index <<= TCE_PAGE_FACTOR; | 48 | index <<= TCE_PAGE_FACTOR; |
47 | npages <<= TCE_PAGE_FACTOR; | 49 | npages <<= TCE_PAGE_FACTOR; |
48 | 50 | ||
49 | while (npages--) { | 51 | while (npages--) { |
50 | tce.te_word = 0; | 52 | tce.te_word = 0; |
51 | tce.te_bits.tb_rpn = virt_to_abs(uaddr) >> TCE_SHIFT; | 53 | tce.te_bits.tb_rpn = virt_to_abs(uaddr) >> TCE_SHIFT; |
52 | 54 | ||
53 | if (tbl->it_type == TCE_VB) { | 55 | if (tbl->it_type == TCE_VB) { |
54 | /* Virtual Bus */ | 56 | /* Virtual Bus */ |
55 | tce.te_bits.tb_valid = 1; | 57 | tce.te_bits.tb_valid = 1; |
56 | tce.te_bits.tb_allio = 1; | 58 | tce.te_bits.tb_allio = 1; |
57 | if (direction != DMA_TO_DEVICE) | 59 | if (direction != DMA_TO_DEVICE) |
58 | tce.te_bits.tb_rdwr = 1; | 60 | tce.te_bits.tb_rdwr = 1; |
59 | } else { | 61 | } else { |
60 | /* PCI Bus */ | 62 | /* PCI Bus */ |
61 | tce.te_bits.tb_rdwr = 1; /* Read allowed */ | 63 | tce.te_bits.tb_rdwr = 1; /* Read allowed */ |
62 | if (direction != DMA_TO_DEVICE) | 64 | if (direction != DMA_TO_DEVICE) |
63 | tce.te_bits.tb_pciwr = 1; | 65 | tce.te_bits.tb_pciwr = 1; |
64 | } | 66 | } |
65 | 67 | ||
66 | rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, | 68 | rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, |
67 | tce.te_word); | 69 | tce.te_word); |
68 | if (rc) | 70 | if (rc) |
69 | panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n", | 71 | panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n", |
70 | rc); | 72 | rc); |
71 | index++; | 73 | index++; |
72 | uaddr += TCE_PAGE_SIZE; | 74 | uaddr += TCE_PAGE_SIZE; |
73 | } | 75 | } |
74 | } | 76 | } |
75 | 77 | ||
76 | static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages) | 78 | static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages) |
77 | { | 79 | { |
78 | u64 rc; | 80 | u64 rc; |
79 | 81 | ||
80 | npages <<= TCE_PAGE_FACTOR; | 82 | npages <<= TCE_PAGE_FACTOR; |
81 | index <<= TCE_PAGE_FACTOR; | 83 | index <<= TCE_PAGE_FACTOR; |
82 | 84 | ||
83 | while (npages--) { | 85 | while (npages--) { |
84 | rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0); | 86 | rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0); |
85 | if (rc) | 87 | if (rc) |
86 | panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n", | 88 | panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n", |
87 | rc); | 89 | rc); |
88 | index++; | 90 | index++; |
89 | } | 91 | } |
90 | } | 92 | } |
91 | 93 | ||
92 | /* | 94 | /* |
93 | * Call Hv with the architected data structure to get TCE table info. | 95 | * Call Hv with the architected data structure to get TCE table info. |
94 | * info. Put the returned data into the Linux representation of the | 96 | * info. Put the returned data into the Linux representation of the |
95 | * TCE table data. | 97 | * TCE table data. |
96 | * The Hardware Tce table comes in three flavors. | 98 | * The Hardware Tce table comes in three flavors. |
97 | * 1. TCE table shared between Buses. | 99 | * 1. TCE table shared between Buses. |
98 | * 2. TCE table per Bus. | 100 | * 2. TCE table per Bus. |
99 | * 3. TCE Table per IOA. | 101 | * 3. TCE Table per IOA. |
100 | */ | 102 | */ |
101 | void iommu_table_getparms_iSeries(unsigned long busno, | 103 | void iommu_table_getparms_iSeries(unsigned long busno, |
102 | unsigned char slotno, | 104 | unsigned char slotno, |
103 | unsigned char virtbus, | 105 | unsigned char virtbus, |
104 | struct iommu_table* tbl) | 106 | struct iommu_table* tbl) |
105 | { | 107 | { |
106 | struct iommu_table_cb *parms; | 108 | struct iommu_table_cb *parms; |
107 | 109 | ||
108 | parms = kmalloc(sizeof(*parms), GFP_KERNEL); | 110 | parms = kmalloc(sizeof(*parms), GFP_KERNEL); |
109 | if (parms == NULL) | 111 | if (parms == NULL) |
110 | panic("PCI_DMA: TCE Table Allocation failed."); | 112 | panic("PCI_DMA: TCE Table Allocation failed."); |
111 | 113 | ||
112 | memset(parms, 0, sizeof(*parms)); | 114 | memset(parms, 0, sizeof(*parms)); |
113 | 115 | ||
114 | parms->itc_busno = busno; | 116 | parms->itc_busno = busno; |
115 | parms->itc_slotno = slotno; | 117 | parms->itc_slotno = slotno; |
116 | parms->itc_virtbus = virtbus; | 118 | parms->itc_virtbus = virtbus; |
117 | 119 | ||
118 | HvCallXm_getTceTableParms(iseries_hv_addr(parms)); | 120 | HvCallXm_getTceTableParms(iseries_hv_addr(parms)); |
119 | 121 | ||
120 | if (parms->itc_size == 0) | 122 | if (parms->itc_size == 0) |
121 | panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms); | 123 | panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms); |
122 | 124 | ||
123 | /* itc_size is in pages worth of table, it_size is in # of entries */ | 125 | /* itc_size is in pages worth of table, it_size is in # of entries */ |
124 | tbl->it_size = ((parms->itc_size * TCE_PAGE_SIZE) / | 126 | tbl->it_size = ((parms->itc_size * TCE_PAGE_SIZE) / |
125 | sizeof(union tce_entry)) >> TCE_PAGE_FACTOR; | 127 | sizeof(union tce_entry)) >> TCE_PAGE_FACTOR; |
126 | tbl->it_busno = parms->itc_busno; | 128 | tbl->it_busno = parms->itc_busno; |
127 | tbl->it_offset = parms->itc_offset >> TCE_PAGE_FACTOR; | 129 | tbl->it_offset = parms->itc_offset >> TCE_PAGE_FACTOR; |
128 | tbl->it_index = parms->itc_index; | 130 | tbl->it_index = parms->itc_index; |
129 | tbl->it_blocksize = 1; | 131 | tbl->it_blocksize = 1; |
130 | tbl->it_type = virtbus ? TCE_VB : TCE_PCI; | 132 | tbl->it_type = virtbus ? TCE_VB : TCE_PCI; |
131 | 133 | ||
132 | kfree(parms); | 134 | kfree(parms); |
133 | } | 135 | } |
134 | 136 | ||
135 | 137 | ||
136 | #ifdef CONFIG_PCI | 138 | #ifdef CONFIG_PCI |
137 | /* | 139 | /* |
138 | * This function compares the known tables to find an iommu_table | 140 | * This function compares the known tables to find an iommu_table |
139 | * that has already been built for hardware TCEs. | 141 | * that has already been built for hardware TCEs. |
140 | */ | 142 | */ |
141 | static struct iommu_table *iommu_table_find(struct iommu_table * tbl) | 143 | static struct iommu_table *iommu_table_find(struct iommu_table * tbl) |
142 | { | 144 | { |
143 | struct pci_dn *pdn; | 145 | struct pci_dn *pdn; |
144 | 146 | ||
145 | list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) { | 147 | list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) { |
146 | struct iommu_table *it = pdn->iommu_table; | 148 | struct iommu_table *it = pdn->iommu_table; |
147 | if ((it != NULL) && | 149 | if ((it != NULL) && |
148 | (it->it_type == TCE_PCI) && | 150 | (it->it_type == TCE_PCI) && |
149 | (it->it_offset == tbl->it_offset) && | 151 | (it->it_offset == tbl->it_offset) && |
150 | (it->it_index == tbl->it_index) && | 152 | (it->it_index == tbl->it_index) && |
151 | (it->it_size == tbl->it_size)) | 153 | (it->it_size == tbl->it_size)) |
152 | return it; | 154 | return it; |
153 | } | 155 | } |
154 | return NULL; | 156 | return NULL; |
155 | } | 157 | } |
156 | 158 | ||
157 | 159 | ||
158 | void iommu_devnode_init_iSeries(struct device_node *dn) | 160 | void iommu_devnode_init_iSeries(struct device_node *dn) |
159 | { | 161 | { |
160 | struct iommu_table *tbl; | 162 | struct iommu_table *tbl; |
161 | struct pci_dn *pdn = PCI_DN(dn); | 163 | struct pci_dn *pdn = PCI_DN(dn); |
162 | 164 | ||
163 | tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL); | 165 | tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL); |
164 | 166 | ||
165 | iommu_table_getparms_iSeries(pdn->busno, pdn->LogicalSlot, 0, tbl); | 167 | iommu_table_getparms_iSeries(pdn->busno, pdn->LogicalSlot, 0, tbl); |
166 | 168 | ||
167 | /* Look for existing tce table */ | 169 | /* Look for existing tce table */ |
168 | pdn->iommu_table = iommu_table_find(tbl); | 170 | pdn->iommu_table = iommu_table_find(tbl); |
169 | if (pdn->iommu_table == NULL) | 171 | if (pdn->iommu_table == NULL) |
170 | pdn->iommu_table = iommu_init_table(tbl); | 172 | pdn->iommu_table = iommu_init_table(tbl); |
171 | else | 173 | else |
172 | kfree(tbl); | 174 | kfree(tbl); |
173 | } | 175 | } |
174 | #endif | 176 | #endif |
175 | 177 | ||
176 | static void iommu_dev_setup_iSeries(struct pci_dev *dev) { } | 178 | static void iommu_dev_setup_iSeries(struct pci_dev *dev) { } |
177 | static void iommu_bus_setup_iSeries(struct pci_bus *bus) { } | 179 | static void iommu_bus_setup_iSeries(struct pci_bus *bus) { } |
178 | 180 | ||
179 | void iommu_init_early_iSeries(void) | 181 | void iommu_init_early_iSeries(void) |
180 | { | 182 | { |
181 | ppc_md.tce_build = tce_build_iSeries; | 183 | ppc_md.tce_build = tce_build_iSeries; |
182 | ppc_md.tce_free = tce_free_iSeries; | 184 | ppc_md.tce_free = tce_free_iSeries; |
183 | 185 | ||
184 | ppc_md.iommu_dev_setup = iommu_dev_setup_iSeries; | 186 | ppc_md.iommu_dev_setup = iommu_dev_setup_iSeries; |
185 | ppc_md.iommu_bus_setup = iommu_bus_setup_iSeries; | 187 | ppc_md.iommu_bus_setup = iommu_bus_setup_iSeries; |
186 | 188 | ||
187 | pci_iommu_init(); | 189 | pci_iommu_init(); |
188 | } | 190 | } |
189 | 191 |
arch/powerpc/platforms/iseries/iommu.h
File was created | 1 | #ifndef _PLATFORMS_ISERIES_IOMMU_H | |
2 | #define _PLATFORMS_ISERIES_IOMMU_H | ||
3 | |||
4 | /* | ||
5 | * Copyright (C) 2005 Stephen Rothwell, IBM Corporation | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the: | ||
19 | * Free Software Foundation, Inc., | ||
20 | * 59 Temple Place, Suite 330, | ||
21 | * Boston, MA 02111-1307 USA | ||
22 | */ | ||
23 | |||
24 | struct device_node; | ||
25 | struct iommu_table; | ||
26 | |||
27 | /* Creates table for an individual device node */ | ||
28 | extern void iommu_devnode_init_iSeries(struct device_node *dn); | ||
29 | |||
30 | /* Get table parameters from HV */ | ||
31 | extern void iommu_table_getparms_iSeries(unsigned long busno, | ||
32 | unsigned char slotno, unsigned char virtbus, | ||
33 | struct iommu_table *tbl); | ||
34 | |||
35 | #endif /* _PLATFORMS_ISERIES_IOMMU_H */ | ||
36 |
arch/powerpc/platforms/iseries/pci.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2001 Allan Trautman, IBM Corporation | 2 | * Copyright (C) 2001 Allan Trautman, IBM Corporation |
3 | * | 3 | * |
4 | * iSeries specific routines for PCI. | 4 | * iSeries specific routines for PCI. |
5 | * | 5 | * |
6 | * Based on code from pci.c and iSeries_pci.c 32bit | 6 | * Based on code from pci.c and iSeries_pci.c 32bit |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
10 | * the Free Software Foundation; either version 2 of the License, or | 10 | * the Free Software Foundation; either version 2 of the License, or |
11 | * (at your option) any later version. | 11 | * (at your option) any later version. |
12 | * | 12 | * |
13 | * This program is distributed in the hope that it will be useful, | 13 | * This program is distributed in the hope that it will be useful, |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. | 16 | * GNU General Public License for more details. |
17 | * | 17 | * |
18 | * You should have received a copy of the GNU General Public License | 18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program; if not, write to the Free Software | 19 | * along with this program; if not, write to the Free Software |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
21 | */ | 21 | */ |
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/list.h> | 23 | #include <linux/list.h> |
24 | #include <linux/string.h> | 24 | #include <linux/string.h> |
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/ide.h> | 27 | #include <linux/ide.h> |
28 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
29 | 29 | ||
30 | #include <asm/io.h> | 30 | #include <asm/io.h> |
31 | #include <asm/irq.h> | 31 | #include <asm/irq.h> |
32 | #include <asm/prom.h> | 32 | #include <asm/prom.h> |
33 | #include <asm/machdep.h> | 33 | #include <asm/machdep.h> |
34 | #include <asm/pci-bridge.h> | 34 | #include <asm/pci-bridge.h> |
35 | #include <asm/iommu.h> | 35 | #include <asm/iommu.h> |
36 | #include <asm/abs_addr.h> | 36 | #include <asm/abs_addr.h> |
37 | 37 | ||
38 | #include <asm/iseries/hv_call_xm.h> | 38 | #include <asm/iseries/hv_call_xm.h> |
39 | #include <asm/iseries/mf.h> | 39 | #include <asm/iseries/mf.h> |
40 | 40 | ||
41 | #include <asm/ppc-pci.h> | 41 | #include <asm/ppc-pci.h> |
42 | 42 | ||
43 | #include "irq.h" | 43 | #include "irq.h" |
44 | #include "pci.h" | 44 | #include "pci.h" |
45 | #include "call_pci.h" | 45 | #include "call_pci.h" |
46 | #include "iommu.h" | ||
46 | 47 | ||
47 | extern unsigned long io_page_mask; | 48 | extern unsigned long io_page_mask; |
48 | 49 | ||
49 | /* | 50 | /* |
50 | * Forward declares of prototypes. | 51 | * Forward declares of prototypes. |
51 | */ | 52 | */ |
52 | static struct device_node *find_Device_Node(int bus, int devfn); | 53 | static struct device_node *find_Device_Node(int bus, int devfn); |
53 | static void scan_PHB_slots(struct pci_controller *Phb); | 54 | static void scan_PHB_slots(struct pci_controller *Phb); |
54 | static void scan_EADS_bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel); | 55 | static void scan_EADS_bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel); |
55 | static int scan_bridge_slot(HvBusNumber Bus, struct HvCallPci_BridgeInfo *Info); | 56 | static int scan_bridge_slot(HvBusNumber Bus, struct HvCallPci_BridgeInfo *Info); |
56 | 57 | ||
57 | LIST_HEAD(iSeries_Global_Device_List); | 58 | LIST_HEAD(iSeries_Global_Device_List); |
58 | 59 | ||
59 | static int DeviceCount; | 60 | static int DeviceCount; |
60 | 61 | ||
61 | /* Counters and control flags. */ | 62 | /* Counters and control flags. */ |
62 | static long Pci_Io_Read_Count; | 63 | static long Pci_Io_Read_Count; |
63 | static long Pci_Io_Write_Count; | 64 | static long Pci_Io_Write_Count; |
64 | #if 0 | 65 | #if 0 |
65 | static long Pci_Cfg_Read_Count; | 66 | static long Pci_Cfg_Read_Count; |
66 | static long Pci_Cfg_Write_Count; | 67 | static long Pci_Cfg_Write_Count; |
67 | #endif | 68 | #endif |
68 | static long Pci_Error_Count; | 69 | static long Pci_Error_Count; |
69 | 70 | ||
70 | static int Pci_Retry_Max = 3; /* Only retry 3 times */ | 71 | static int Pci_Retry_Max = 3; /* Only retry 3 times */ |
71 | static int Pci_Error_Flag = 1; /* Set Retry Error on. */ | 72 | static int Pci_Error_Flag = 1; /* Set Retry Error on. */ |
72 | 73 | ||
73 | static struct pci_ops iSeries_pci_ops; | 74 | static struct pci_ops iSeries_pci_ops; |
74 | 75 | ||
75 | /* | 76 | /* |
76 | * Table defines | 77 | * Table defines |
77 | * Each Entry size is 4 MB * 1024 Entries = 4GB I/O address space. | 78 | * Each Entry size is 4 MB * 1024 Entries = 4GB I/O address space. |
78 | */ | 79 | */ |
79 | #define IOMM_TABLE_MAX_ENTRIES 1024 | 80 | #define IOMM_TABLE_MAX_ENTRIES 1024 |
80 | #define IOMM_TABLE_ENTRY_SIZE 0x0000000000400000UL | 81 | #define IOMM_TABLE_ENTRY_SIZE 0x0000000000400000UL |
81 | #define BASE_IO_MEMORY 0xE000000000000000UL | 82 | #define BASE_IO_MEMORY 0xE000000000000000UL |
82 | 83 | ||
83 | static unsigned long max_io_memory = 0xE000000000000000UL; | 84 | static unsigned long max_io_memory = 0xE000000000000000UL; |
84 | static long current_iomm_table_entry; | 85 | static long current_iomm_table_entry; |
85 | 86 | ||
86 | /* | 87 | /* |
87 | * Lookup Tables. | 88 | * Lookup Tables. |
88 | */ | 89 | */ |
89 | static struct device_node **iomm_table; | 90 | static struct device_node **iomm_table; |
90 | static u8 *iobar_table; | 91 | static u8 *iobar_table; |
91 | 92 | ||
92 | /* | 93 | /* |
93 | * Static and Global variables | 94 | * Static and Global variables |
94 | */ | 95 | */ |
95 | static char *pci_io_text = "iSeries PCI I/O"; | 96 | static char *pci_io_text = "iSeries PCI I/O"; |
96 | static DEFINE_SPINLOCK(iomm_table_lock); | 97 | static DEFINE_SPINLOCK(iomm_table_lock); |
97 | 98 | ||
98 | /* | 99 | /* |
99 | * iomm_table_initialize | 100 | * iomm_table_initialize |
100 | * | 101 | * |
101 | * Allocates and initalizes the Address Translation Table and Bar | 102 | * Allocates and initalizes the Address Translation Table and Bar |
102 | * Tables to get them ready for use. Must be called before any | 103 | * Tables to get them ready for use. Must be called before any |
103 | * I/O space is handed out to the device BARs. | 104 | * I/O space is handed out to the device BARs. |
104 | */ | 105 | */ |
105 | static void iomm_table_initialize(void) | 106 | static void iomm_table_initialize(void) |
106 | { | 107 | { |
107 | spin_lock(&iomm_table_lock); | 108 | spin_lock(&iomm_table_lock); |
108 | iomm_table = kmalloc(sizeof(*iomm_table) * IOMM_TABLE_MAX_ENTRIES, | 109 | iomm_table = kmalloc(sizeof(*iomm_table) * IOMM_TABLE_MAX_ENTRIES, |
109 | GFP_KERNEL); | 110 | GFP_KERNEL); |
110 | iobar_table = kmalloc(sizeof(*iobar_table) * IOMM_TABLE_MAX_ENTRIES, | 111 | iobar_table = kmalloc(sizeof(*iobar_table) * IOMM_TABLE_MAX_ENTRIES, |
111 | GFP_KERNEL); | 112 | GFP_KERNEL); |
112 | spin_unlock(&iomm_table_lock); | 113 | spin_unlock(&iomm_table_lock); |
113 | if ((iomm_table == NULL) || (iobar_table == NULL)) | 114 | if ((iomm_table == NULL) || (iobar_table == NULL)) |
114 | panic("PCI: I/O tables allocation failed.\n"); | 115 | panic("PCI: I/O tables allocation failed.\n"); |
115 | } | 116 | } |
116 | 117 | ||
117 | /* | 118 | /* |
118 | * iomm_table_allocate_entry | 119 | * iomm_table_allocate_entry |
119 | * | 120 | * |
120 | * Adds pci_dev entry in address translation table | 121 | * Adds pci_dev entry in address translation table |
121 | * | 122 | * |
122 | * - Allocates the number of entries required in table base on BAR | 123 | * - Allocates the number of entries required in table base on BAR |
123 | * size. | 124 | * size. |
124 | * - Allocates starting at BASE_IO_MEMORY and increases. | 125 | * - Allocates starting at BASE_IO_MEMORY and increases. |
125 | * - The size is round up to be a multiple of entry size. | 126 | * - The size is round up to be a multiple of entry size. |
126 | * - CurrentIndex is incremented to keep track of the last entry. | 127 | * - CurrentIndex is incremented to keep track of the last entry. |
127 | * - Builds the resource entry for allocated BARs. | 128 | * - Builds the resource entry for allocated BARs. |
128 | */ | 129 | */ |
129 | static void iomm_table_allocate_entry(struct pci_dev *dev, int bar_num) | 130 | static void iomm_table_allocate_entry(struct pci_dev *dev, int bar_num) |
130 | { | 131 | { |
131 | struct resource *bar_res = &dev->resource[bar_num]; | 132 | struct resource *bar_res = &dev->resource[bar_num]; |
132 | long bar_size = pci_resource_len(dev, bar_num); | 133 | long bar_size = pci_resource_len(dev, bar_num); |
133 | 134 | ||
134 | /* | 135 | /* |
135 | * No space to allocate, quick exit, skip Allocation. | 136 | * No space to allocate, quick exit, skip Allocation. |
136 | */ | 137 | */ |
137 | if (bar_size == 0) | 138 | if (bar_size == 0) |
138 | return; | 139 | return; |
139 | /* | 140 | /* |
140 | * Set Resource values. | 141 | * Set Resource values. |
141 | */ | 142 | */ |
142 | spin_lock(&iomm_table_lock); | 143 | spin_lock(&iomm_table_lock); |
143 | bar_res->name = pci_io_text; | 144 | bar_res->name = pci_io_text; |
144 | bar_res->start = | 145 | bar_res->start = |
145 | IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry; | 146 | IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry; |
146 | bar_res->start += BASE_IO_MEMORY; | 147 | bar_res->start += BASE_IO_MEMORY; |
147 | bar_res->end = bar_res->start + bar_size - 1; | 148 | bar_res->end = bar_res->start + bar_size - 1; |
148 | /* | 149 | /* |
149 | * Allocate the number of table entries needed for BAR. | 150 | * Allocate the number of table entries needed for BAR. |
150 | */ | 151 | */ |
151 | while (bar_size > 0 ) { | 152 | while (bar_size > 0 ) { |
152 | iomm_table[current_iomm_table_entry] = dev->sysdata; | 153 | iomm_table[current_iomm_table_entry] = dev->sysdata; |
153 | iobar_table[current_iomm_table_entry] = bar_num; | 154 | iobar_table[current_iomm_table_entry] = bar_num; |
154 | bar_size -= IOMM_TABLE_ENTRY_SIZE; | 155 | bar_size -= IOMM_TABLE_ENTRY_SIZE; |
155 | ++current_iomm_table_entry; | 156 | ++current_iomm_table_entry; |
156 | } | 157 | } |
157 | max_io_memory = BASE_IO_MEMORY + | 158 | max_io_memory = BASE_IO_MEMORY + |
158 | (IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry); | 159 | (IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry); |
159 | spin_unlock(&iomm_table_lock); | 160 | spin_unlock(&iomm_table_lock); |
160 | } | 161 | } |
161 | 162 | ||
162 | /* | 163 | /* |
163 | * allocate_device_bars | 164 | * allocate_device_bars |
164 | * | 165 | * |
165 | * - Allocates ALL pci_dev BAR's and updates the resources with the | 166 | * - Allocates ALL pci_dev BAR's and updates the resources with the |
166 | * BAR value. BARS with zero length will have the resources | 167 | * BAR value. BARS with zero length will have the resources |
167 | * The HvCallPci_getBarParms is used to get the size of the BAR | 168 | * The HvCallPci_getBarParms is used to get the size of the BAR |
168 | * space. It calls iomm_table_allocate_entry to allocate | 169 | * space. It calls iomm_table_allocate_entry to allocate |
169 | * each entry. | 170 | * each entry. |
170 | * - Loops through The Bar resources(0 - 5) including the ROM | 171 | * - Loops through The Bar resources(0 - 5) including the ROM |
171 | * is resource(6). | 172 | * is resource(6). |
172 | */ | 173 | */ |
173 | static void allocate_device_bars(struct pci_dev *dev) | 174 | static void allocate_device_bars(struct pci_dev *dev) |
174 | { | 175 | { |
175 | struct resource *bar_res; | 176 | struct resource *bar_res; |
176 | int bar_num; | 177 | int bar_num; |
177 | 178 | ||
178 | for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num) { | 179 | for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num) { |
179 | bar_res = &dev->resource[bar_num]; | 180 | bar_res = &dev->resource[bar_num]; |
180 | iomm_table_allocate_entry(dev, bar_num); | 181 | iomm_table_allocate_entry(dev, bar_num); |
181 | } | 182 | } |
182 | } | 183 | } |
183 | 184 | ||
184 | /* | 185 | /* |
185 | * Log error information to system console. | 186 | * Log error information to system console. |
186 | * Filter out the device not there errors. | 187 | * Filter out the device not there errors. |
187 | * PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx | 188 | * PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx |
188 | * PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx | 189 | * PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx |
189 | * PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx | 190 | * PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx |
190 | */ | 191 | */ |
191 | static void pci_Log_Error(char *Error_Text, int Bus, int SubBus, | 192 | static void pci_Log_Error(char *Error_Text, int Bus, int SubBus, |
192 | int AgentId, int HvRc) | 193 | int AgentId, int HvRc) |
193 | { | 194 | { |
194 | if (HvRc == 0x0302) | 195 | if (HvRc == 0x0302) |
195 | return; | 196 | return; |
196 | printk(KERN_ERR "PCI: %s Failed: 0x%02X.%02X.%02X Rc: 0x%04X", | 197 | printk(KERN_ERR "PCI: %s Failed: 0x%02X.%02X.%02X Rc: 0x%04X", |
197 | Error_Text, Bus, SubBus, AgentId, HvRc); | 198 | Error_Text, Bus, SubBus, AgentId, HvRc); |
198 | } | 199 | } |
199 | 200 | ||
200 | /* | 201 | /* |
201 | * build_device_node(u16 Bus, int SubBus, u8 DevFn) | 202 | * build_device_node(u16 Bus, int SubBus, u8 DevFn) |
202 | */ | 203 | */ |
203 | static struct device_node *build_device_node(HvBusNumber Bus, | 204 | static struct device_node *build_device_node(HvBusNumber Bus, |
204 | HvSubBusNumber SubBus, int AgentId, int Function) | 205 | HvSubBusNumber SubBus, int AgentId, int Function) |
205 | { | 206 | { |
206 | struct device_node *node; | 207 | struct device_node *node; |
207 | struct pci_dn *pdn; | 208 | struct pci_dn *pdn; |
208 | 209 | ||
209 | node = kmalloc(sizeof(struct device_node), GFP_KERNEL); | 210 | node = kmalloc(sizeof(struct device_node), GFP_KERNEL); |
210 | if (node == NULL) | 211 | if (node == NULL) |
211 | return NULL; | 212 | return NULL; |
212 | memset(node, 0, sizeof(struct device_node)); | 213 | memset(node, 0, sizeof(struct device_node)); |
213 | pdn = kzalloc(sizeof(*pdn), GFP_KERNEL); | 214 | pdn = kzalloc(sizeof(*pdn), GFP_KERNEL); |
214 | if (pdn == NULL) { | 215 | if (pdn == NULL) { |
215 | kfree(node); | 216 | kfree(node); |
216 | return NULL; | 217 | return NULL; |
217 | } | 218 | } |
218 | node->data = pdn; | 219 | node->data = pdn; |
219 | pdn->node = node; | 220 | pdn->node = node; |
220 | list_add_tail(&pdn->Device_List, &iSeries_Global_Device_List); | 221 | list_add_tail(&pdn->Device_List, &iSeries_Global_Device_List); |
221 | pdn->busno = Bus; | 222 | pdn->busno = Bus; |
222 | pdn->bussubno = SubBus; | 223 | pdn->bussubno = SubBus; |
223 | pdn->devfn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function); | 224 | pdn->devfn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function); |
224 | return node; | 225 | return node; |
225 | } | 226 | } |
226 | 227 | ||
227 | /* | 228 | /* |
228 | * unsigned long __init find_and_init_phbs(void) | 229 | * unsigned long __init find_and_init_phbs(void) |
229 | * | 230 | * |
230 | * Description: | 231 | * Description: |
231 | * This function checks for all possible system PCI host bridges that connect | 232 | * This function checks for all possible system PCI host bridges that connect |
232 | * PCI buses. The system hypervisor is queried as to the guest partition | 233 | * PCI buses. The system hypervisor is queried as to the guest partition |
233 | * ownership status. A pci_controller is built for any bus which is partially | 234 | * ownership status. A pci_controller is built for any bus which is partially |
234 | * owned or fully owned by this guest partition. | 235 | * owned or fully owned by this guest partition. |
235 | */ | 236 | */ |
236 | unsigned long __init find_and_init_phbs(void) | 237 | unsigned long __init find_and_init_phbs(void) |
237 | { | 238 | { |
238 | struct pci_controller *phb; | 239 | struct pci_controller *phb; |
239 | HvBusNumber bus; | 240 | HvBusNumber bus; |
240 | 241 | ||
241 | /* Check all possible buses. */ | 242 | /* Check all possible buses. */ |
242 | for (bus = 0; bus < 256; bus++) { | 243 | for (bus = 0; bus < 256; bus++) { |
243 | int ret = HvCallXm_testBus(bus); | 244 | int ret = HvCallXm_testBus(bus); |
244 | if (ret == 0) { | 245 | if (ret == 0) { |
245 | printk("bus %d appears to exist\n", bus); | 246 | printk("bus %d appears to exist\n", bus); |
246 | 247 | ||
247 | phb = pcibios_alloc_controller(NULL); | 248 | phb = pcibios_alloc_controller(NULL); |
248 | if (phb == NULL) | 249 | if (phb == NULL) |
249 | return -ENOMEM; | 250 | return -ENOMEM; |
250 | 251 | ||
251 | phb->pci_mem_offset = phb->local_number = bus; | 252 | phb->pci_mem_offset = phb->local_number = bus; |
252 | phb->first_busno = bus; | 253 | phb->first_busno = bus; |
253 | phb->last_busno = bus; | 254 | phb->last_busno = bus; |
254 | phb->ops = &iSeries_pci_ops; | 255 | phb->ops = &iSeries_pci_ops; |
255 | 256 | ||
256 | /* Find and connect the devices. */ | 257 | /* Find and connect the devices. */ |
257 | scan_PHB_slots(phb); | 258 | scan_PHB_slots(phb); |
258 | } | 259 | } |
259 | /* | 260 | /* |
260 | * Check for Unexpected Return code, a clue that something | 261 | * Check for Unexpected Return code, a clue that something |
261 | * has gone wrong. | 262 | * has gone wrong. |
262 | */ | 263 | */ |
263 | else if (ret != 0x0301) | 264 | else if (ret != 0x0301) |
264 | printk(KERN_ERR "Unexpected Return on Probe(0x%04X): 0x%04X", | 265 | printk(KERN_ERR "Unexpected Return on Probe(0x%04X): 0x%04X", |
265 | bus, ret); | 266 | bus, ret); |
266 | } | 267 | } |
267 | return 0; | 268 | return 0; |
268 | } | 269 | } |
269 | 270 | ||
270 | /* | 271 | /* |
271 | * iSeries_pcibios_init | 272 | * iSeries_pcibios_init |
272 | * | 273 | * |
273 | * Chance to initialize and structures or variable before PCI Bus walk. | 274 | * Chance to initialize and structures or variable before PCI Bus walk. |
274 | */ | 275 | */ |
275 | void iSeries_pcibios_init(void) | 276 | void iSeries_pcibios_init(void) |
276 | { | 277 | { |
277 | iomm_table_initialize(); | 278 | iomm_table_initialize(); |
278 | find_and_init_phbs(); | 279 | find_and_init_phbs(); |
279 | io_page_mask = -1; | 280 | io_page_mask = -1; |
280 | } | 281 | } |
281 | 282 | ||
282 | /* | 283 | /* |
283 | * iSeries_pci_final_fixup(void) | 284 | * iSeries_pci_final_fixup(void) |
284 | */ | 285 | */ |
285 | void __init iSeries_pci_final_fixup(void) | 286 | void __init iSeries_pci_final_fixup(void) |
286 | { | 287 | { |
287 | struct pci_dev *pdev = NULL; | 288 | struct pci_dev *pdev = NULL; |
288 | struct device_node *node; | 289 | struct device_node *node; |
289 | int DeviceCount = 0; | 290 | int DeviceCount = 0; |
290 | 291 | ||
291 | /* Fix up at the device node and pci_dev relationship */ | 292 | /* Fix up at the device node and pci_dev relationship */ |
292 | mf_display_src(0xC9000100); | 293 | mf_display_src(0xC9000100); |
293 | 294 | ||
294 | printk("pcibios_final_fixup\n"); | 295 | printk("pcibios_final_fixup\n"); |
295 | for_each_pci_dev(pdev) { | 296 | for_each_pci_dev(pdev) { |
296 | node = find_Device_Node(pdev->bus->number, pdev->devfn); | 297 | node = find_Device_Node(pdev->bus->number, pdev->devfn); |
297 | printk("pci dev %p (%x.%x), node %p\n", pdev, | 298 | printk("pci dev %p (%x.%x), node %p\n", pdev, |
298 | pdev->bus->number, pdev->devfn, node); | 299 | pdev->bus->number, pdev->devfn, node); |
299 | 300 | ||
300 | if (node != NULL) { | 301 | if (node != NULL) { |
301 | ++DeviceCount; | 302 | ++DeviceCount; |
302 | pdev->sysdata = (void *)node; | 303 | pdev->sysdata = (void *)node; |
303 | PCI_DN(node)->pcidev = pdev; | 304 | PCI_DN(node)->pcidev = pdev; |
304 | allocate_device_bars(pdev); | 305 | allocate_device_bars(pdev); |
305 | iSeries_Device_Information(pdev, DeviceCount); | 306 | iSeries_Device_Information(pdev, DeviceCount); |
306 | iommu_devnode_init_iSeries(node); | 307 | iommu_devnode_init_iSeries(node); |
307 | } else | 308 | } else |
308 | printk("PCI: Device Tree not found for 0x%016lX\n", | 309 | printk("PCI: Device Tree not found for 0x%016lX\n", |
309 | (unsigned long)pdev); | 310 | (unsigned long)pdev); |
310 | pdev->irq = PCI_DN(node)->Irq; | 311 | pdev->irq = PCI_DN(node)->Irq; |
311 | } | 312 | } |
312 | iSeries_activate_IRQs(); | 313 | iSeries_activate_IRQs(); |
313 | mf_display_src(0xC9000200); | 314 | mf_display_src(0xC9000200); |
314 | } | 315 | } |
315 | 316 | ||
316 | void pcibios_fixup_bus(struct pci_bus *PciBus) | 317 | void pcibios_fixup_bus(struct pci_bus *PciBus) |
317 | { | 318 | { |
318 | } | 319 | } |
319 | 320 | ||
320 | void pcibios_fixup_resources(struct pci_dev *pdev) | 321 | void pcibios_fixup_resources(struct pci_dev *pdev) |
321 | { | 322 | { |
322 | } | 323 | } |
323 | 324 | ||
324 | /* | 325 | /* |
325 | * Loop through each node function to find usable EADs bridges. | 326 | * Loop through each node function to find usable EADs bridges. |
326 | */ | 327 | */ |
327 | static void scan_PHB_slots(struct pci_controller *Phb) | 328 | static void scan_PHB_slots(struct pci_controller *Phb) |
328 | { | 329 | { |
329 | struct HvCallPci_DeviceInfo *DevInfo; | 330 | struct HvCallPci_DeviceInfo *DevInfo; |
330 | HvBusNumber bus = Phb->local_number; /* System Bus */ | 331 | HvBusNumber bus = Phb->local_number; /* System Bus */ |
331 | const HvSubBusNumber SubBus = 0; /* EADs is always 0. */ | 332 | const HvSubBusNumber SubBus = 0; /* EADs is always 0. */ |
332 | int HvRc = 0; | 333 | int HvRc = 0; |
333 | int IdSel; | 334 | int IdSel; |
334 | const int MaxAgents = 8; | 335 | const int MaxAgents = 8; |
335 | 336 | ||
336 | DevInfo = (struct HvCallPci_DeviceInfo*) | 337 | DevInfo = (struct HvCallPci_DeviceInfo*) |
337 | kmalloc(sizeof(struct HvCallPci_DeviceInfo), GFP_KERNEL); | 338 | kmalloc(sizeof(struct HvCallPci_DeviceInfo), GFP_KERNEL); |
338 | if (DevInfo == NULL) | 339 | if (DevInfo == NULL) |
339 | return; | 340 | return; |
340 | 341 | ||
341 | /* | 342 | /* |
342 | * Probe for EADs Bridges | 343 | * Probe for EADs Bridges |
343 | */ | 344 | */ |
344 | for (IdSel = 1; IdSel < MaxAgents; ++IdSel) { | 345 | for (IdSel = 1; IdSel < MaxAgents; ++IdSel) { |
345 | HvRc = HvCallPci_getDeviceInfo(bus, SubBus, IdSel, | 346 | HvRc = HvCallPci_getDeviceInfo(bus, SubBus, IdSel, |
346 | iseries_hv_addr(DevInfo), | 347 | iseries_hv_addr(DevInfo), |
347 | sizeof(struct HvCallPci_DeviceInfo)); | 348 | sizeof(struct HvCallPci_DeviceInfo)); |
348 | if (HvRc == 0) { | 349 | if (HvRc == 0) { |
349 | if (DevInfo->deviceType == HvCallPci_NodeDevice) | 350 | if (DevInfo->deviceType == HvCallPci_NodeDevice) |
350 | scan_EADS_bridge(bus, SubBus, IdSel); | 351 | scan_EADS_bridge(bus, SubBus, IdSel); |
351 | else | 352 | else |
352 | printk("PCI: Invalid System Configuration(0x%02X)" | 353 | printk("PCI: Invalid System Configuration(0x%02X)" |
353 | " for bus 0x%02x id 0x%02x.\n", | 354 | " for bus 0x%02x id 0x%02x.\n", |
354 | DevInfo->deviceType, bus, IdSel); | 355 | DevInfo->deviceType, bus, IdSel); |
355 | } | 356 | } |
356 | else | 357 | else |
357 | pci_Log_Error("getDeviceInfo", bus, SubBus, IdSel, HvRc); | 358 | pci_Log_Error("getDeviceInfo", bus, SubBus, IdSel, HvRc); |
358 | } | 359 | } |
359 | kfree(DevInfo); | 360 | kfree(DevInfo); |
360 | } | 361 | } |
361 | 362 | ||
362 | static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus, | 363 | static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus, |
363 | int IdSel) | 364 | int IdSel) |
364 | { | 365 | { |
365 | struct HvCallPci_BridgeInfo *BridgeInfo; | 366 | struct HvCallPci_BridgeInfo *BridgeInfo; |
366 | HvAgentId AgentId; | 367 | HvAgentId AgentId; |
367 | int Function; | 368 | int Function; |
368 | int HvRc; | 369 | int HvRc; |
369 | 370 | ||
370 | BridgeInfo = (struct HvCallPci_BridgeInfo *) | 371 | BridgeInfo = (struct HvCallPci_BridgeInfo *) |
371 | kmalloc(sizeof(struct HvCallPci_BridgeInfo), GFP_KERNEL); | 372 | kmalloc(sizeof(struct HvCallPci_BridgeInfo), GFP_KERNEL); |
372 | if (BridgeInfo == NULL) | 373 | if (BridgeInfo == NULL) |
373 | return; | 374 | return; |
374 | 375 | ||
375 | /* Note: hvSubBus and irq is always be 0 at this level! */ | 376 | /* Note: hvSubBus and irq is always be 0 at this level! */ |
376 | for (Function = 0; Function < 8; ++Function) { | 377 | for (Function = 0; Function < 8; ++Function) { |
377 | AgentId = ISERIES_PCI_AGENTID(IdSel, Function); | 378 | AgentId = ISERIES_PCI_AGENTID(IdSel, Function); |
378 | HvRc = HvCallXm_connectBusUnit(bus, SubBus, AgentId, 0); | 379 | HvRc = HvCallXm_connectBusUnit(bus, SubBus, AgentId, 0); |
379 | if (HvRc == 0) { | 380 | if (HvRc == 0) { |
380 | printk("found device at bus %d idsel %d func %d (AgentId %x)\n", | 381 | printk("found device at bus %d idsel %d func %d (AgentId %x)\n", |
381 | bus, IdSel, Function, AgentId); | 382 | bus, IdSel, Function, AgentId); |
382 | /* Connect EADs: 0x18.00.12 = 0x00 */ | 383 | /* Connect EADs: 0x18.00.12 = 0x00 */ |
383 | HvRc = HvCallPci_getBusUnitInfo(bus, SubBus, AgentId, | 384 | HvRc = HvCallPci_getBusUnitInfo(bus, SubBus, AgentId, |
384 | iseries_hv_addr(BridgeInfo), | 385 | iseries_hv_addr(BridgeInfo), |
385 | sizeof(struct HvCallPci_BridgeInfo)); | 386 | sizeof(struct HvCallPci_BridgeInfo)); |
386 | if (HvRc == 0) { | 387 | if (HvRc == 0) { |
387 | printk("bridge info: type %x subbus %x maxAgents %x maxsubbus %x logslot %x\n", | 388 | printk("bridge info: type %x subbus %x maxAgents %x maxsubbus %x logslot %x\n", |
388 | BridgeInfo->busUnitInfo.deviceType, | 389 | BridgeInfo->busUnitInfo.deviceType, |
389 | BridgeInfo->subBusNumber, | 390 | BridgeInfo->subBusNumber, |
390 | BridgeInfo->maxAgents, | 391 | BridgeInfo->maxAgents, |
391 | BridgeInfo->maxSubBusNumber, | 392 | BridgeInfo->maxSubBusNumber, |
392 | BridgeInfo->logicalSlotNumber); | 393 | BridgeInfo->logicalSlotNumber); |
393 | if (BridgeInfo->busUnitInfo.deviceType == | 394 | if (BridgeInfo->busUnitInfo.deviceType == |
394 | HvCallPci_BridgeDevice) { | 395 | HvCallPci_BridgeDevice) { |
395 | /* Scan_Bridge_Slot...: 0x18.00.12 */ | 396 | /* Scan_Bridge_Slot...: 0x18.00.12 */ |
396 | scan_bridge_slot(bus, BridgeInfo); | 397 | scan_bridge_slot(bus, BridgeInfo); |
397 | } else | 398 | } else |
398 | printk("PCI: Invalid Bridge Configuration(0x%02X)", | 399 | printk("PCI: Invalid Bridge Configuration(0x%02X)", |
399 | BridgeInfo->busUnitInfo.deviceType); | 400 | BridgeInfo->busUnitInfo.deviceType); |
400 | } | 401 | } |
401 | } else if (HvRc != 0x000B) | 402 | } else if (HvRc != 0x000B) |
402 | pci_Log_Error("EADs Connect", | 403 | pci_Log_Error("EADs Connect", |
403 | bus, SubBus, AgentId, HvRc); | 404 | bus, SubBus, AgentId, HvRc); |
404 | } | 405 | } |
405 | kfree(BridgeInfo); | 406 | kfree(BridgeInfo); |
406 | } | 407 | } |
407 | 408 | ||
408 | /* | 409 | /* |
409 | * This assumes that the node slot is always on the primary bus! | 410 | * This assumes that the node slot is always on the primary bus! |
410 | */ | 411 | */ |
411 | static int scan_bridge_slot(HvBusNumber Bus, | 412 | static int scan_bridge_slot(HvBusNumber Bus, |
412 | struct HvCallPci_BridgeInfo *BridgeInfo) | 413 | struct HvCallPci_BridgeInfo *BridgeInfo) |
413 | { | 414 | { |
414 | struct device_node *node; | 415 | struct device_node *node; |
415 | HvSubBusNumber SubBus = BridgeInfo->subBusNumber; | 416 | HvSubBusNumber SubBus = BridgeInfo->subBusNumber; |
416 | u16 VendorId = 0; | 417 | u16 VendorId = 0; |
417 | int HvRc = 0; | 418 | int HvRc = 0; |
418 | u8 Irq = 0; | 419 | u8 Irq = 0; |
419 | int IdSel = ISERIES_GET_DEVICE_FROM_SUBBUS(SubBus); | 420 | int IdSel = ISERIES_GET_DEVICE_FROM_SUBBUS(SubBus); |
420 | int Function = ISERIES_GET_FUNCTION_FROM_SUBBUS(SubBus); | 421 | int Function = ISERIES_GET_FUNCTION_FROM_SUBBUS(SubBus); |
421 | HvAgentId EADsIdSel = ISERIES_PCI_AGENTID(IdSel, Function); | 422 | HvAgentId EADsIdSel = ISERIES_PCI_AGENTID(IdSel, Function); |
422 | 423 | ||
423 | /* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */ | 424 | /* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */ |
424 | Irq = iSeries_allocate_IRQ(Bus, 0, EADsIdSel); | 425 | Irq = iSeries_allocate_IRQ(Bus, 0, EADsIdSel); |
425 | 426 | ||
426 | /* | 427 | /* |
427 | * Connect all functions of any device found. | 428 | * Connect all functions of any device found. |
428 | */ | 429 | */ |
429 | for (IdSel = 1; IdSel <= BridgeInfo->maxAgents; ++IdSel) { | 430 | for (IdSel = 1; IdSel <= BridgeInfo->maxAgents; ++IdSel) { |
430 | for (Function = 0; Function < 8; ++Function) { | 431 | for (Function = 0; Function < 8; ++Function) { |
431 | HvAgentId AgentId = ISERIES_PCI_AGENTID(IdSel, Function); | 432 | HvAgentId AgentId = ISERIES_PCI_AGENTID(IdSel, Function); |
432 | HvRc = HvCallXm_connectBusUnit(Bus, SubBus, | 433 | HvRc = HvCallXm_connectBusUnit(Bus, SubBus, |
433 | AgentId, Irq); | 434 | AgentId, Irq); |
434 | if (HvRc != 0) { | 435 | if (HvRc != 0) { |
435 | pci_Log_Error("Connect Bus Unit", | 436 | pci_Log_Error("Connect Bus Unit", |
436 | Bus, SubBus, AgentId, HvRc); | 437 | Bus, SubBus, AgentId, HvRc); |
437 | continue; | 438 | continue; |
438 | } | 439 | } |
439 | 440 | ||
440 | HvRc = HvCallPci_configLoad16(Bus, SubBus, AgentId, | 441 | HvRc = HvCallPci_configLoad16(Bus, SubBus, AgentId, |
441 | PCI_VENDOR_ID, &VendorId); | 442 | PCI_VENDOR_ID, &VendorId); |
442 | if (HvRc != 0) { | 443 | if (HvRc != 0) { |
443 | pci_Log_Error("Read Vendor", | 444 | pci_Log_Error("Read Vendor", |
444 | Bus, SubBus, AgentId, HvRc); | 445 | Bus, SubBus, AgentId, HvRc); |
445 | continue; | 446 | continue; |
446 | } | 447 | } |
447 | printk("read vendor ID: %x\n", VendorId); | 448 | printk("read vendor ID: %x\n", VendorId); |
448 | 449 | ||
449 | /* FoundDevice: 0x18.28.10 = 0x12AE */ | 450 | /* FoundDevice: 0x18.28.10 = 0x12AE */ |
450 | HvRc = HvCallPci_configStore8(Bus, SubBus, AgentId, | 451 | HvRc = HvCallPci_configStore8(Bus, SubBus, AgentId, |
451 | PCI_INTERRUPT_LINE, Irq); | 452 | PCI_INTERRUPT_LINE, Irq); |
452 | if (HvRc != 0) | 453 | if (HvRc != 0) |
453 | pci_Log_Error("PciCfgStore Irq Failed!", | 454 | pci_Log_Error("PciCfgStore Irq Failed!", |
454 | Bus, SubBus, AgentId, HvRc); | 455 | Bus, SubBus, AgentId, HvRc); |
455 | 456 | ||
456 | ++DeviceCount; | 457 | ++DeviceCount; |
457 | node = build_device_node(Bus, SubBus, EADsIdSel, Function); | 458 | node = build_device_node(Bus, SubBus, EADsIdSel, Function); |
458 | PCI_DN(node)->Irq = Irq; | 459 | PCI_DN(node)->Irq = Irq; |
459 | PCI_DN(node)->LogicalSlot = BridgeInfo->logicalSlotNumber; | 460 | PCI_DN(node)->LogicalSlot = BridgeInfo->logicalSlotNumber; |
460 | 461 | ||
461 | } /* for (Function = 0; Function < 8; ++Function) */ | 462 | } /* for (Function = 0; Function < 8; ++Function) */ |
462 | } /* for (IdSel = 1; IdSel <= MaxAgents; ++IdSel) */ | 463 | } /* for (IdSel = 1; IdSel <= MaxAgents; ++IdSel) */ |
463 | return HvRc; | 464 | return HvRc; |
464 | } | 465 | } |
465 | 466 | ||
466 | /* | 467 | /* |
467 | * I/0 Memory copy MUST use mmio commands on iSeries | 468 | * I/0 Memory copy MUST use mmio commands on iSeries |
468 | * To do; For performance, include the hv call directly | 469 | * To do; For performance, include the hv call directly |
469 | */ | 470 | */ |
470 | void iSeries_memset_io(volatile void __iomem *dest, char c, size_t Count) | 471 | void iSeries_memset_io(volatile void __iomem *dest, char c, size_t Count) |
471 | { | 472 | { |
472 | u8 ByteValue = c; | 473 | u8 ByteValue = c; |
473 | long NumberOfBytes = Count; | 474 | long NumberOfBytes = Count; |
474 | 475 | ||
475 | while (NumberOfBytes > 0) { | 476 | while (NumberOfBytes > 0) { |
476 | iSeries_Write_Byte(ByteValue, dest++); | 477 | iSeries_Write_Byte(ByteValue, dest++); |
477 | -- NumberOfBytes; | 478 | -- NumberOfBytes; |
478 | } | 479 | } |
479 | } | 480 | } |
480 | EXPORT_SYMBOL(iSeries_memset_io); | 481 | EXPORT_SYMBOL(iSeries_memset_io); |
481 | 482 | ||
482 | void iSeries_memcpy_toio(volatile void __iomem *dest, void *source, size_t count) | 483 | void iSeries_memcpy_toio(volatile void __iomem *dest, void *source, size_t count) |
483 | { | 484 | { |
484 | char *src = source; | 485 | char *src = source; |
485 | long NumberOfBytes = count; | 486 | long NumberOfBytes = count; |
486 | 487 | ||
487 | while (NumberOfBytes > 0) { | 488 | while (NumberOfBytes > 0) { |
488 | iSeries_Write_Byte(*src++, dest++); | 489 | iSeries_Write_Byte(*src++, dest++); |
489 | -- NumberOfBytes; | 490 | -- NumberOfBytes; |
490 | } | 491 | } |
491 | } | 492 | } |
492 | EXPORT_SYMBOL(iSeries_memcpy_toio); | 493 | EXPORT_SYMBOL(iSeries_memcpy_toio); |
493 | 494 | ||
494 | void iSeries_memcpy_fromio(void *dest, const volatile void __iomem *src, size_t count) | 495 | void iSeries_memcpy_fromio(void *dest, const volatile void __iomem *src, size_t count) |
495 | { | 496 | { |
496 | char *dst = dest; | 497 | char *dst = dest; |
497 | long NumberOfBytes = count; | 498 | long NumberOfBytes = count; |
498 | 499 | ||
499 | while (NumberOfBytes > 0) { | 500 | while (NumberOfBytes > 0) { |
500 | *dst++ = iSeries_Read_Byte(src++); | 501 | *dst++ = iSeries_Read_Byte(src++); |
501 | -- NumberOfBytes; | 502 | -- NumberOfBytes; |
502 | } | 503 | } |
503 | } | 504 | } |
504 | EXPORT_SYMBOL(iSeries_memcpy_fromio); | 505 | EXPORT_SYMBOL(iSeries_memcpy_fromio); |
505 | 506 | ||
506 | /* | 507 | /* |
507 | * Look down the chain to find the matching Device Device | 508 | * Look down the chain to find the matching Device Device |
508 | */ | 509 | */ |
509 | static struct device_node *find_Device_Node(int bus, int devfn) | 510 | static struct device_node *find_Device_Node(int bus, int devfn) |
510 | { | 511 | { |
511 | struct pci_dn *pdn; | 512 | struct pci_dn *pdn; |
512 | 513 | ||
513 | list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) { | 514 | list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) { |
514 | if ((bus == pdn->busno) && (devfn == pdn->devfn)) | 515 | if ((bus == pdn->busno) && (devfn == pdn->devfn)) |
515 | return pdn->node; | 516 | return pdn->node; |
516 | } | 517 | } |
517 | return NULL; | 518 | return NULL; |
518 | } | 519 | } |
519 | 520 | ||
520 | #if 0 | 521 | #if 0 |
521 | /* | 522 | /* |
522 | * Returns the device node for the passed pci_dev | 523 | * Returns the device node for the passed pci_dev |
523 | * Sanity Check Node PciDev to passed pci_dev | 524 | * Sanity Check Node PciDev to passed pci_dev |
524 | * If none is found, returns a NULL which the client must handle. | 525 | * If none is found, returns a NULL which the client must handle. |
525 | */ | 526 | */ |
526 | static struct device_node *get_Device_Node(struct pci_dev *pdev) | 527 | static struct device_node *get_Device_Node(struct pci_dev *pdev) |
527 | { | 528 | { |
528 | struct device_node *node; | 529 | struct device_node *node; |
529 | 530 | ||
530 | node = pdev->sysdata; | 531 | node = pdev->sysdata; |
531 | if (node == NULL || PCI_DN(node)->pcidev != pdev) | 532 | if (node == NULL || PCI_DN(node)->pcidev != pdev) |
532 | node = find_Device_Node(pdev->bus->number, pdev->devfn); | 533 | node = find_Device_Node(pdev->bus->number, pdev->devfn); |
533 | return node; | 534 | return node; |
534 | } | 535 | } |
535 | #endif | 536 | #endif |
536 | 537 | ||
537 | /* | 538 | /* |
538 | * Config space read and write functions. | 539 | * Config space read and write functions. |
539 | * For now at least, we look for the device node for the bus and devfn | 540 | * For now at least, we look for the device node for the bus and devfn |
540 | * that we are asked to access. It may be possible to translate the devfn | 541 | * that we are asked to access. It may be possible to translate the devfn |
541 | * to a subbus and deviceid more directly. | 542 | * to a subbus and deviceid more directly. |
542 | */ | 543 | */ |
543 | static u64 hv_cfg_read_func[4] = { | 544 | static u64 hv_cfg_read_func[4] = { |
544 | HvCallPciConfigLoad8, HvCallPciConfigLoad16, | 545 | HvCallPciConfigLoad8, HvCallPciConfigLoad16, |
545 | HvCallPciConfigLoad32, HvCallPciConfigLoad32 | 546 | HvCallPciConfigLoad32, HvCallPciConfigLoad32 |
546 | }; | 547 | }; |
547 | 548 | ||
548 | static u64 hv_cfg_write_func[4] = { | 549 | static u64 hv_cfg_write_func[4] = { |
549 | HvCallPciConfigStore8, HvCallPciConfigStore16, | 550 | HvCallPciConfigStore8, HvCallPciConfigStore16, |
550 | HvCallPciConfigStore32, HvCallPciConfigStore32 | 551 | HvCallPciConfigStore32, HvCallPciConfigStore32 |
551 | }; | 552 | }; |
552 | 553 | ||
553 | /* | 554 | /* |
554 | * Read PCI config space | 555 | * Read PCI config space |
555 | */ | 556 | */ |
556 | static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn, | 557 | static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn, |
557 | int offset, int size, u32 *val) | 558 | int offset, int size, u32 *val) |
558 | { | 559 | { |
559 | struct device_node *node = find_Device_Node(bus->number, devfn); | 560 | struct device_node *node = find_Device_Node(bus->number, devfn); |
560 | u64 fn; | 561 | u64 fn; |
561 | struct HvCallPci_LoadReturn ret; | 562 | struct HvCallPci_LoadReturn ret; |
562 | 563 | ||
563 | if (node == NULL) | 564 | if (node == NULL) |
564 | return PCIBIOS_DEVICE_NOT_FOUND; | 565 | return PCIBIOS_DEVICE_NOT_FOUND; |
565 | if (offset > 255) { | 566 | if (offset > 255) { |
566 | *val = ~0; | 567 | *val = ~0; |
567 | return PCIBIOS_BAD_REGISTER_NUMBER; | 568 | return PCIBIOS_BAD_REGISTER_NUMBER; |
568 | } | 569 | } |
569 | 570 | ||
570 | fn = hv_cfg_read_func[(size - 1) & 3]; | 571 | fn = hv_cfg_read_func[(size - 1) & 3]; |
571 | HvCall3Ret16(fn, &ret, iseries_ds_addr(node), offset, 0); | 572 | HvCall3Ret16(fn, &ret, iseries_ds_addr(node), offset, 0); |
572 | 573 | ||
573 | if (ret.rc != 0) { | 574 | if (ret.rc != 0) { |
574 | *val = ~0; | 575 | *val = ~0; |
575 | return PCIBIOS_DEVICE_NOT_FOUND; /* or something */ | 576 | return PCIBIOS_DEVICE_NOT_FOUND; /* or something */ |
576 | } | 577 | } |
577 | 578 | ||
578 | *val = ret.value; | 579 | *val = ret.value; |
579 | return 0; | 580 | return 0; |
580 | } | 581 | } |
581 | 582 | ||
582 | /* | 583 | /* |
583 | * Write PCI config space | 584 | * Write PCI config space |
584 | */ | 585 | */ |
585 | 586 | ||
586 | static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn, | 587 | static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn, |
587 | int offset, int size, u32 val) | 588 | int offset, int size, u32 val) |
588 | { | 589 | { |
589 | struct device_node *node = find_Device_Node(bus->number, devfn); | 590 | struct device_node *node = find_Device_Node(bus->number, devfn); |
590 | u64 fn; | 591 | u64 fn; |
591 | u64 ret; | 592 | u64 ret; |
592 | 593 | ||
593 | if (node == NULL) | 594 | if (node == NULL) |
594 | return PCIBIOS_DEVICE_NOT_FOUND; | 595 | return PCIBIOS_DEVICE_NOT_FOUND; |
595 | if (offset > 255) | 596 | if (offset > 255) |
596 | return PCIBIOS_BAD_REGISTER_NUMBER; | 597 | return PCIBIOS_BAD_REGISTER_NUMBER; |
597 | 598 | ||
598 | fn = hv_cfg_write_func[(size - 1) & 3]; | 599 | fn = hv_cfg_write_func[(size - 1) & 3]; |
599 | ret = HvCall4(fn, iseries_ds_addr(node), offset, val, 0); | 600 | ret = HvCall4(fn, iseries_ds_addr(node), offset, val, 0); |
600 | 601 | ||
601 | if (ret != 0) | 602 | if (ret != 0) |
602 | return PCIBIOS_DEVICE_NOT_FOUND; | 603 | return PCIBIOS_DEVICE_NOT_FOUND; |
603 | 604 | ||
604 | return 0; | 605 | return 0; |
605 | } | 606 | } |
606 | 607 | ||
607 | static struct pci_ops iSeries_pci_ops = { | 608 | static struct pci_ops iSeries_pci_ops = { |
608 | .read = iSeries_pci_read_config, | 609 | .read = iSeries_pci_read_config, |
609 | .write = iSeries_pci_write_config | 610 | .write = iSeries_pci_write_config |
610 | }; | 611 | }; |
611 | 612 | ||
612 | /* | 613 | /* |
613 | * Check Return Code | 614 | * Check Return Code |
614 | * -> On Failure, print and log information. | 615 | * -> On Failure, print and log information. |
615 | * Increment Retry Count, if exceeds max, panic partition. | 616 | * Increment Retry Count, if exceeds max, panic partition. |
616 | * | 617 | * |
617 | * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234 | 618 | * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234 |
618 | * PCI: Device 23.90 ReadL Retry( 1) | 619 | * PCI: Device 23.90 ReadL Retry( 1) |
619 | * PCI: Device 23.90 ReadL Retry Successful(1) | 620 | * PCI: Device 23.90 ReadL Retry Successful(1) |
620 | */ | 621 | */ |
621 | static int CheckReturnCode(char *TextHdr, struct device_node *DevNode, | 622 | static int CheckReturnCode(char *TextHdr, struct device_node *DevNode, |
622 | int *retry, u64 ret) | 623 | int *retry, u64 ret) |
623 | { | 624 | { |
624 | if (ret != 0) { | 625 | if (ret != 0) { |
625 | struct pci_dn *pdn = PCI_DN(DevNode); | 626 | struct pci_dn *pdn = PCI_DN(DevNode); |
626 | 627 | ||
627 | ++Pci_Error_Count; | 628 | ++Pci_Error_Count; |
628 | (*retry)++; | 629 | (*retry)++; |
629 | printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n", | 630 | printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n", |
630 | TextHdr, pdn->busno, pdn->devfn, | 631 | TextHdr, pdn->busno, pdn->devfn, |
631 | *retry, (int)ret); | 632 | *retry, (int)ret); |
632 | /* | 633 | /* |
633 | * Bump the retry and check for retry count exceeded. | 634 | * Bump the retry and check for retry count exceeded. |
634 | * If, Exceeded, panic the system. | 635 | * If, Exceeded, panic the system. |
635 | */ | 636 | */ |
636 | if (((*retry) > Pci_Retry_Max) && | 637 | if (((*retry) > Pci_Retry_Max) && |
637 | (Pci_Error_Flag > 0)) { | 638 | (Pci_Error_Flag > 0)) { |
638 | mf_display_src(0xB6000103); | 639 | mf_display_src(0xB6000103); |
639 | panic_timeout = 0; | 640 | panic_timeout = 0; |
640 | panic("PCI: Hardware I/O Error, SRC B6000103, " | 641 | panic("PCI: Hardware I/O Error, SRC B6000103, " |
641 | "Automatic Reboot Disabled.\n"); | 642 | "Automatic Reboot Disabled.\n"); |
642 | } | 643 | } |
643 | return -1; /* Retry Try */ | 644 | return -1; /* Retry Try */ |
644 | } | 645 | } |
645 | return 0; | 646 | return 0; |
646 | } | 647 | } |
647 | 648 | ||
648 | /* | 649 | /* |
649 | * Translate the I/O Address into a device node, bar, and bar offset. | 650 | * Translate the I/O Address into a device node, bar, and bar offset. |
650 | * Note: Make sure the passed variable end up on the stack to avoid | 651 | * Note: Make sure the passed variable end up on the stack to avoid |
651 | * the exposure of being device global. | 652 | * the exposure of being device global. |
652 | */ | 653 | */ |
653 | static inline struct device_node *xlate_iomm_address( | 654 | static inline struct device_node *xlate_iomm_address( |
654 | const volatile void __iomem *IoAddress, | 655 | const volatile void __iomem *IoAddress, |
655 | u64 *dsaptr, u64 *BarOffsetPtr) | 656 | u64 *dsaptr, u64 *BarOffsetPtr) |
656 | { | 657 | { |
657 | unsigned long OrigIoAddr; | 658 | unsigned long OrigIoAddr; |
658 | unsigned long BaseIoAddr; | 659 | unsigned long BaseIoAddr; |
659 | unsigned long TableIndex; | 660 | unsigned long TableIndex; |
660 | struct device_node *DevNode; | 661 | struct device_node *DevNode; |
661 | 662 | ||
662 | OrigIoAddr = (unsigned long __force)IoAddress; | 663 | OrigIoAddr = (unsigned long __force)IoAddress; |
663 | if ((OrigIoAddr < BASE_IO_MEMORY) || (OrigIoAddr >= max_io_memory)) | 664 | if ((OrigIoAddr < BASE_IO_MEMORY) || (OrigIoAddr >= max_io_memory)) |
664 | return NULL; | 665 | return NULL; |
665 | BaseIoAddr = OrigIoAddr - BASE_IO_MEMORY; | 666 | BaseIoAddr = OrigIoAddr - BASE_IO_MEMORY; |
666 | TableIndex = BaseIoAddr / IOMM_TABLE_ENTRY_SIZE; | 667 | TableIndex = BaseIoAddr / IOMM_TABLE_ENTRY_SIZE; |
667 | DevNode = iomm_table[TableIndex]; | 668 | DevNode = iomm_table[TableIndex]; |
668 | 669 | ||
669 | if (DevNode != NULL) { | 670 | if (DevNode != NULL) { |
670 | int barnum = iobar_table[TableIndex]; | 671 | int barnum = iobar_table[TableIndex]; |
671 | *dsaptr = iseries_ds_addr(DevNode) | (barnum << 24); | 672 | *dsaptr = iseries_ds_addr(DevNode) | (barnum << 24); |
672 | *BarOffsetPtr = BaseIoAddr % IOMM_TABLE_ENTRY_SIZE; | 673 | *BarOffsetPtr = BaseIoAddr % IOMM_TABLE_ENTRY_SIZE; |
673 | } else | 674 | } else |
674 | panic("PCI: Invalid PCI IoAddress detected!\n"); | 675 | panic("PCI: Invalid PCI IoAddress detected!\n"); |
675 | return DevNode; | 676 | return DevNode; |
676 | } | 677 | } |
677 | 678 | ||
678 | /* | 679 | /* |
679 | * Read MM I/O Instructions for the iSeries | 680 | * Read MM I/O Instructions for the iSeries |
680 | * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal | 681 | * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal |
681 | * else, data is returned in big Endian format. | 682 | * else, data is returned in big Endian format. |
682 | * | 683 | * |
683 | * iSeries_Read_Byte = Read Byte ( 8 bit) | 684 | * iSeries_Read_Byte = Read Byte ( 8 bit) |
684 | * iSeries_Read_Word = Read Word (16 bit) | 685 | * iSeries_Read_Word = Read Word (16 bit) |
685 | * iSeries_Read_Long = Read Long (32 bit) | 686 | * iSeries_Read_Long = Read Long (32 bit) |
686 | */ | 687 | */ |
687 | u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress) | 688 | u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress) |
688 | { | 689 | { |
689 | u64 BarOffset; | 690 | u64 BarOffset; |
690 | u64 dsa; | 691 | u64 dsa; |
691 | int retry = 0; | 692 | int retry = 0; |
692 | struct HvCallPci_LoadReturn ret; | 693 | struct HvCallPci_LoadReturn ret; |
693 | struct device_node *DevNode = | 694 | struct device_node *DevNode = |
694 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); | 695 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); |
695 | 696 | ||
696 | if (DevNode == NULL) { | 697 | if (DevNode == NULL) { |
697 | static unsigned long last_jiffies; | 698 | static unsigned long last_jiffies; |
698 | static int num_printed; | 699 | static int num_printed; |
699 | 700 | ||
700 | if ((jiffies - last_jiffies) > 60 * HZ) { | 701 | if ((jiffies - last_jiffies) > 60 * HZ) { |
701 | last_jiffies = jiffies; | 702 | last_jiffies = jiffies; |
702 | num_printed = 0; | 703 | num_printed = 0; |
703 | } | 704 | } |
704 | if (num_printed++ < 10) | 705 | if (num_printed++ < 10) |
705 | printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n", IoAddress); | 706 | printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n", IoAddress); |
706 | return 0xff; | 707 | return 0xff; |
707 | } | 708 | } |
708 | do { | 709 | do { |
709 | ++Pci_Io_Read_Count; | 710 | ++Pci_Io_Read_Count; |
710 | HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0); | 711 | HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0); |
711 | } while (CheckReturnCode("RDB", DevNode, &retry, ret.rc) != 0); | 712 | } while (CheckReturnCode("RDB", DevNode, &retry, ret.rc) != 0); |
712 | 713 | ||
713 | return (u8)ret.value; | 714 | return (u8)ret.value; |
714 | } | 715 | } |
715 | EXPORT_SYMBOL(iSeries_Read_Byte); | 716 | EXPORT_SYMBOL(iSeries_Read_Byte); |
716 | 717 | ||
717 | u16 iSeries_Read_Word(const volatile void __iomem *IoAddress) | 718 | u16 iSeries_Read_Word(const volatile void __iomem *IoAddress) |
718 | { | 719 | { |
719 | u64 BarOffset; | 720 | u64 BarOffset; |
720 | u64 dsa; | 721 | u64 dsa; |
721 | int retry = 0; | 722 | int retry = 0; |
722 | struct HvCallPci_LoadReturn ret; | 723 | struct HvCallPci_LoadReturn ret; |
723 | struct device_node *DevNode = | 724 | struct device_node *DevNode = |
724 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); | 725 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); |
725 | 726 | ||
726 | if (DevNode == NULL) { | 727 | if (DevNode == NULL) { |
727 | static unsigned long last_jiffies; | 728 | static unsigned long last_jiffies; |
728 | static int num_printed; | 729 | static int num_printed; |
729 | 730 | ||
730 | if ((jiffies - last_jiffies) > 60 * HZ) { | 731 | if ((jiffies - last_jiffies) > 60 * HZ) { |
731 | last_jiffies = jiffies; | 732 | last_jiffies = jiffies; |
732 | num_printed = 0; | 733 | num_printed = 0; |
733 | } | 734 | } |
734 | if (num_printed++ < 10) | 735 | if (num_printed++ < 10) |
735 | printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n", IoAddress); | 736 | printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n", IoAddress); |
736 | return 0xffff; | 737 | return 0xffff; |
737 | } | 738 | } |
738 | do { | 739 | do { |
739 | ++Pci_Io_Read_Count; | 740 | ++Pci_Io_Read_Count; |
740 | HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa, | 741 | HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa, |
741 | BarOffset, 0); | 742 | BarOffset, 0); |
742 | } while (CheckReturnCode("RDW", DevNode, &retry, ret.rc) != 0); | 743 | } while (CheckReturnCode("RDW", DevNode, &retry, ret.rc) != 0); |
743 | 744 | ||
744 | return swab16((u16)ret.value); | 745 | return swab16((u16)ret.value); |
745 | } | 746 | } |
746 | EXPORT_SYMBOL(iSeries_Read_Word); | 747 | EXPORT_SYMBOL(iSeries_Read_Word); |
747 | 748 | ||
748 | u32 iSeries_Read_Long(const volatile void __iomem *IoAddress) | 749 | u32 iSeries_Read_Long(const volatile void __iomem *IoAddress) |
749 | { | 750 | { |
750 | u64 BarOffset; | 751 | u64 BarOffset; |
751 | u64 dsa; | 752 | u64 dsa; |
752 | int retry = 0; | 753 | int retry = 0; |
753 | struct HvCallPci_LoadReturn ret; | 754 | struct HvCallPci_LoadReturn ret; |
754 | struct device_node *DevNode = | 755 | struct device_node *DevNode = |
755 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); | 756 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); |
756 | 757 | ||
757 | if (DevNode == NULL) { | 758 | if (DevNode == NULL) { |
758 | static unsigned long last_jiffies; | 759 | static unsigned long last_jiffies; |
759 | static int num_printed; | 760 | static int num_printed; |
760 | 761 | ||
761 | if ((jiffies - last_jiffies) > 60 * HZ) { | 762 | if ((jiffies - last_jiffies) > 60 * HZ) { |
762 | last_jiffies = jiffies; | 763 | last_jiffies = jiffies; |
763 | num_printed = 0; | 764 | num_printed = 0; |
764 | } | 765 | } |
765 | if (num_printed++ < 10) | 766 | if (num_printed++ < 10) |
766 | printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n", IoAddress); | 767 | printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n", IoAddress); |
767 | return 0xffffffff; | 768 | return 0xffffffff; |
768 | } | 769 | } |
769 | do { | 770 | do { |
770 | ++Pci_Io_Read_Count; | 771 | ++Pci_Io_Read_Count; |
771 | HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa, | 772 | HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa, |
772 | BarOffset, 0); | 773 | BarOffset, 0); |
773 | } while (CheckReturnCode("RDL", DevNode, &retry, ret.rc) != 0); | 774 | } while (CheckReturnCode("RDL", DevNode, &retry, ret.rc) != 0); |
774 | 775 | ||
775 | return swab32((u32)ret.value); | 776 | return swab32((u32)ret.value); |
776 | } | 777 | } |
777 | EXPORT_SYMBOL(iSeries_Read_Long); | 778 | EXPORT_SYMBOL(iSeries_Read_Long); |
778 | 779 | ||
779 | /* | 780 | /* |
780 | * Write MM I/O Instructions for the iSeries | 781 | * Write MM I/O Instructions for the iSeries |
781 | * | 782 | * |
782 | * iSeries_Write_Byte = Write Byte (8 bit) | 783 | * iSeries_Write_Byte = Write Byte (8 bit) |
783 | * iSeries_Write_Word = Write Word(16 bit) | 784 | * iSeries_Write_Word = Write Word(16 bit) |
784 | * iSeries_Write_Long = Write Long(32 bit) | 785 | * iSeries_Write_Long = Write Long(32 bit) |
785 | */ | 786 | */ |
786 | void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress) | 787 | void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress) |
787 | { | 788 | { |
788 | u64 BarOffset; | 789 | u64 BarOffset; |
789 | u64 dsa; | 790 | u64 dsa; |
790 | int retry = 0; | 791 | int retry = 0; |
791 | u64 rc; | 792 | u64 rc; |
792 | struct device_node *DevNode = | 793 | struct device_node *DevNode = |
793 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); | 794 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); |
794 | 795 | ||
795 | if (DevNode == NULL) { | 796 | if (DevNode == NULL) { |
796 | static unsigned long last_jiffies; | 797 | static unsigned long last_jiffies; |
797 | static int num_printed; | 798 | static int num_printed; |
798 | 799 | ||
799 | if ((jiffies - last_jiffies) > 60 * HZ) { | 800 | if ((jiffies - last_jiffies) > 60 * HZ) { |
800 | last_jiffies = jiffies; | 801 | last_jiffies = jiffies; |
801 | num_printed = 0; | 802 | num_printed = 0; |
802 | } | 803 | } |
803 | if (num_printed++ < 10) | 804 | if (num_printed++ < 10) |
804 | printk(KERN_ERR "iSeries_Write_Byte: invalid access at IO address %p\n", IoAddress); | 805 | printk(KERN_ERR "iSeries_Write_Byte: invalid access at IO address %p\n", IoAddress); |
805 | return; | 806 | return; |
806 | } | 807 | } |
807 | do { | 808 | do { |
808 | ++Pci_Io_Write_Count; | 809 | ++Pci_Io_Write_Count; |
809 | rc = HvCall4(HvCallPciBarStore8, dsa, BarOffset, data, 0); | 810 | rc = HvCall4(HvCallPciBarStore8, dsa, BarOffset, data, 0); |
810 | } while (CheckReturnCode("WWB", DevNode, &retry, rc) != 0); | 811 | } while (CheckReturnCode("WWB", DevNode, &retry, rc) != 0); |
811 | } | 812 | } |
812 | EXPORT_SYMBOL(iSeries_Write_Byte); | 813 | EXPORT_SYMBOL(iSeries_Write_Byte); |
813 | 814 | ||
814 | void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress) | 815 | void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress) |
815 | { | 816 | { |
816 | u64 BarOffset; | 817 | u64 BarOffset; |
817 | u64 dsa; | 818 | u64 dsa; |
818 | int retry = 0; | 819 | int retry = 0; |
819 | u64 rc; | 820 | u64 rc; |
820 | struct device_node *DevNode = | 821 | struct device_node *DevNode = |
821 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); | 822 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); |
822 | 823 | ||
823 | if (DevNode == NULL) { | 824 | if (DevNode == NULL) { |
824 | static unsigned long last_jiffies; | 825 | static unsigned long last_jiffies; |
825 | static int num_printed; | 826 | static int num_printed; |
826 | 827 | ||
827 | if ((jiffies - last_jiffies) > 60 * HZ) { | 828 | if ((jiffies - last_jiffies) > 60 * HZ) { |
828 | last_jiffies = jiffies; | 829 | last_jiffies = jiffies; |
829 | num_printed = 0; | 830 | num_printed = 0; |
830 | } | 831 | } |
831 | if (num_printed++ < 10) | 832 | if (num_printed++ < 10) |
832 | printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n", IoAddress); | 833 | printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n", IoAddress); |
833 | return; | 834 | return; |
834 | } | 835 | } |
835 | do { | 836 | do { |
836 | ++Pci_Io_Write_Count; | 837 | ++Pci_Io_Write_Count; |
837 | rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, swab16(data), 0); | 838 | rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, swab16(data), 0); |
838 | } while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0); | 839 | } while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0); |
839 | } | 840 | } |
840 | EXPORT_SYMBOL(iSeries_Write_Word); | 841 | EXPORT_SYMBOL(iSeries_Write_Word); |
841 | 842 | ||
842 | void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress) | 843 | void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress) |
843 | { | 844 | { |
844 | u64 BarOffset; | 845 | u64 BarOffset; |
845 | u64 dsa; | 846 | u64 dsa; |
846 | int retry = 0; | 847 | int retry = 0; |
847 | u64 rc; | 848 | u64 rc; |
848 | struct device_node *DevNode = | 849 | struct device_node *DevNode = |
849 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); | 850 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); |
850 | 851 | ||
851 | if (DevNode == NULL) { | 852 | if (DevNode == NULL) { |
852 | static unsigned long last_jiffies; | 853 | static unsigned long last_jiffies; |
853 | static int num_printed; | 854 | static int num_printed; |
854 | 855 | ||
855 | if ((jiffies - last_jiffies) > 60 * HZ) { | 856 | if ((jiffies - last_jiffies) > 60 * HZ) { |
856 | last_jiffies = jiffies; | 857 | last_jiffies = jiffies; |
857 | num_printed = 0; | 858 | num_printed = 0; |
858 | } | 859 | } |
859 | if (num_printed++ < 10) | 860 | if (num_printed++ < 10) |
860 | printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n", IoAddress); | 861 | printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n", IoAddress); |
861 | return; | 862 | return; |
862 | } | 863 | } |
863 | do { | 864 | do { |
864 | ++Pci_Io_Write_Count; | 865 | ++Pci_Io_Write_Count; |
865 | rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, swab32(data), 0); | 866 | rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, swab32(data), 0); |
866 | } while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0); | 867 | } while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0); |
867 | } | 868 | } |
868 | EXPORT_SYMBOL(iSeries_Write_Long); | 869 | EXPORT_SYMBOL(iSeries_Write_Long); |
869 | 870 |
arch/powerpc/platforms/iseries/vio.c
1 | /* | 1 | /* |
2 | * IBM PowerPC iSeries Virtual I/O Infrastructure Support. | 2 | * IBM PowerPC iSeries Virtual I/O Infrastructure Support. |
3 | * | 3 | * |
4 | * Copyright (c) 2005 Stephen Rothwell, IBM Corp. | 4 | * Copyright (c) 2005 Stephen Rothwell, IBM Corp. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License | 7 | * modify it under the terms of the GNU General Public License |
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | 14 | ||
15 | #include <asm/vio.h> | 15 | #include <asm/vio.h> |
16 | #include <asm/iommu.h> | 16 | #include <asm/iommu.h> |
17 | #include <asm/tce.h> | 17 | #include <asm/tce.h> |
18 | #include <asm/abs_addr.h> | 18 | #include <asm/abs_addr.h> |
19 | #include <asm/page.h> | 19 | #include <asm/page.h> |
20 | #include <asm/iseries/vio.h> | 20 | #include <asm/iseries/vio.h> |
21 | #include <asm/iseries/hv_types.h> | 21 | #include <asm/iseries/hv_types.h> |
22 | #include <asm/iseries/hv_lp_config.h> | 22 | #include <asm/iseries/hv_lp_config.h> |
23 | #include <asm/iseries/hv_call_xm.h> | 23 | #include <asm/iseries/hv_call_xm.h> |
24 | 24 | ||
25 | #include "iommu.h" | ||
26 | |||
25 | struct device *iSeries_vio_dev = &vio_bus_device.dev; | 27 | struct device *iSeries_vio_dev = &vio_bus_device.dev; |
26 | EXPORT_SYMBOL(iSeries_vio_dev); | 28 | EXPORT_SYMBOL(iSeries_vio_dev); |
27 | 29 | ||
28 | static struct iommu_table veth_iommu_table; | 30 | static struct iommu_table veth_iommu_table; |
29 | static struct iommu_table vio_iommu_table; | 31 | static struct iommu_table vio_iommu_table; |
30 | 32 | ||
31 | static void __init iommu_vio_init(void) | 33 | static void __init iommu_vio_init(void) |
32 | { | 34 | { |
33 | iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table); | 35 | iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table); |
34 | veth_iommu_table.it_size /= 2; | 36 | veth_iommu_table.it_size /= 2; |
35 | vio_iommu_table = veth_iommu_table; | 37 | vio_iommu_table = veth_iommu_table; |
36 | vio_iommu_table.it_offset += veth_iommu_table.it_size; | 38 | vio_iommu_table.it_offset += veth_iommu_table.it_size; |
37 | 39 | ||
38 | if (!iommu_init_table(&veth_iommu_table)) | 40 | if (!iommu_init_table(&veth_iommu_table)) |
39 | printk("Virtual Bus VETH TCE table failed.\n"); | 41 | printk("Virtual Bus VETH TCE table failed.\n"); |
40 | if (!iommu_init_table(&vio_iommu_table)) | 42 | if (!iommu_init_table(&vio_iommu_table)) |
41 | printk("Virtual Bus VIO TCE table failed.\n"); | 43 | printk("Virtual Bus VIO TCE table failed.\n"); |
42 | } | 44 | } |
43 | 45 | ||
44 | /** | 46 | /** |
45 | * vio_register_device_iseries: - Register a new iSeries vio device. | 47 | * vio_register_device_iseries: - Register a new iSeries vio device. |
46 | * @voidev: The device to register. | 48 | * @voidev: The device to register. |
47 | */ | 49 | */ |
48 | static struct vio_dev *__init vio_register_device_iseries(char *type, | 50 | static struct vio_dev *__init vio_register_device_iseries(char *type, |
49 | uint32_t unit_num) | 51 | uint32_t unit_num) |
50 | { | 52 | { |
51 | struct vio_dev *viodev; | 53 | struct vio_dev *viodev; |
52 | 54 | ||
53 | /* allocate a vio_dev for this device */ | 55 | /* allocate a vio_dev for this device */ |
54 | viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL); | 56 | viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL); |
55 | if (!viodev) | 57 | if (!viodev) |
56 | return NULL; | 58 | return NULL; |
57 | memset(viodev, 0, sizeof(struct vio_dev)); | 59 | memset(viodev, 0, sizeof(struct vio_dev)); |
58 | 60 | ||
59 | snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%s%d", type, unit_num); | 61 | snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%s%d", type, unit_num); |
60 | 62 | ||
61 | viodev->name = viodev->dev.bus_id; | 63 | viodev->name = viodev->dev.bus_id; |
62 | viodev->type = type; | 64 | viodev->type = type; |
63 | viodev->unit_address = unit_num; | 65 | viodev->unit_address = unit_num; |
64 | viodev->iommu_table = &vio_iommu_table; | 66 | viodev->iommu_table = &vio_iommu_table; |
65 | if (vio_register_device(viodev) == NULL) { | 67 | if (vio_register_device(viodev) == NULL) { |
66 | kfree(viodev); | 68 | kfree(viodev); |
67 | return NULL; | 69 | return NULL; |
68 | } | 70 | } |
69 | return viodev; | 71 | return viodev; |
70 | } | 72 | } |
71 | 73 | ||
72 | void __init probe_bus_iseries(void) | 74 | void __init probe_bus_iseries(void) |
73 | { | 75 | { |
74 | HvLpIndexMap vlan_map; | 76 | HvLpIndexMap vlan_map; |
75 | struct vio_dev *viodev; | 77 | struct vio_dev *viodev; |
76 | int i; | 78 | int i; |
77 | 79 | ||
78 | /* there is only one of each of these */ | 80 | /* there is only one of each of these */ |
79 | vio_register_device_iseries("viocons", 0); | 81 | vio_register_device_iseries("viocons", 0); |
80 | vio_register_device_iseries("vscsi", 0); | 82 | vio_register_device_iseries("vscsi", 0); |
81 | 83 | ||
82 | vlan_map = HvLpConfig_getVirtualLanIndexMap(); | 84 | vlan_map = HvLpConfig_getVirtualLanIndexMap(); |
83 | for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) { | 85 | for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) { |
84 | if ((vlan_map & (0x8000 >> i)) == 0) | 86 | if ((vlan_map & (0x8000 >> i)) == 0) |
85 | continue; | 87 | continue; |
86 | viodev = vio_register_device_iseries("vlan", i); | 88 | viodev = vio_register_device_iseries("vlan", i); |
87 | /* veth is special and has it own iommu_table */ | 89 | /* veth is special and has it own iommu_table */ |
88 | viodev->iommu_table = &veth_iommu_table; | 90 | viodev->iommu_table = &veth_iommu_table; |
89 | } | 91 | } |
90 | for (i = 0; i < HVMAXARCHITECTEDVIRTUALDISKS; i++) | 92 | for (i = 0; i < HVMAXARCHITECTEDVIRTUALDISKS; i++) |
91 | vio_register_device_iseries("viodasd", i); | 93 | vio_register_device_iseries("viodasd", i); |
92 | for (i = 0; i < HVMAXARCHITECTEDVIRTUALCDROMS; i++) | 94 | for (i = 0; i < HVMAXARCHITECTEDVIRTUALCDROMS; i++) |
93 | vio_register_device_iseries("viocd", i); | 95 | vio_register_device_iseries("viocd", i); |
94 | for (i = 0; i < HVMAXARCHITECTEDVIRTUALTAPES; i++) | 96 | for (i = 0; i < HVMAXARCHITECTEDVIRTUALTAPES; i++) |
95 | vio_register_device_iseries("viotape", i); | 97 | vio_register_device_iseries("viotape", i); |
96 | } | 98 | } |
97 | 99 | ||
98 | /** | 100 | /** |
99 | * vio_match_device_iseries: - Tell if a iSeries VIO device matches a | 101 | * vio_match_device_iseries: - Tell if a iSeries VIO device matches a |
100 | * vio_device_id | 102 | * vio_device_id |
101 | */ | 103 | */ |
102 | static int vio_match_device_iseries(const struct vio_device_id *id, | 104 | static int vio_match_device_iseries(const struct vio_device_id *id, |
103 | const struct vio_dev *dev) | 105 | const struct vio_dev *dev) |
104 | { | 106 | { |
105 | return strncmp(dev->type, id->type, strlen(id->type)) == 0; | 107 | return strncmp(dev->type, id->type, strlen(id->type)) == 0; |
106 | } | 108 | } |
107 | 109 | ||
108 | static struct vio_bus_ops vio_bus_ops_iseries = { | 110 | static struct vio_bus_ops vio_bus_ops_iseries = { |
109 | .match = vio_match_device_iseries, | 111 | .match = vio_match_device_iseries, |
110 | }; | 112 | }; |
111 | 113 | ||
112 | /** | 114 | /** |
113 | * vio_bus_init_iseries: - Initialize the iSeries virtual IO bus | 115 | * vio_bus_init_iseries: - Initialize the iSeries virtual IO bus |
114 | */ | 116 | */ |
115 | static int __init vio_bus_init_iseries(void) | 117 | static int __init vio_bus_init_iseries(void) |
116 | { | 118 | { |
117 | int err; | 119 | int err; |
118 | 120 | ||
119 | err = vio_bus_init(&vio_bus_ops_iseries); | 121 | err = vio_bus_init(&vio_bus_ops_iseries); |
120 | if (err == 0) { | 122 | if (err == 0) { |
121 | iommu_vio_init(); | 123 | iommu_vio_init(); |
122 | vio_bus_device.iommu_table = &vio_iommu_table; | 124 | vio_bus_device.iommu_table = &vio_iommu_table; |
123 | iSeries_vio_dev = &vio_bus_device.dev; | 125 | iSeries_vio_dev = &vio_bus_device.dev; |
124 | probe_bus_iseries(); | 126 | probe_bus_iseries(); |
125 | } | 127 | } |
126 | return err; | 128 | return err; |
127 | } | 129 | } |
128 | 130 | ||
129 | __initcall(vio_bus_init_iseries); | 131 | __initcall(vio_bus_init_iseries); |
130 | 132 |
include/asm-powerpc/iommu.h
1 | /* | 1 | /* |
2 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation | 2 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation |
3 | * Rewrite, cleanup: | 3 | * Rewrite, cleanup: |
4 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation | 4 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #ifndef _ASM_IOMMU_H | 21 | #ifndef _ASM_IOMMU_H |
22 | #define _ASM_IOMMU_H | 22 | #define _ASM_IOMMU_H |
23 | #ifdef __KERNEL__ | 23 | #ifdef __KERNEL__ |
24 | 24 | ||
25 | #include <linux/config.h> | 25 | #include <linux/config.h> |
26 | #include <asm/types.h> | 26 | #include <asm/types.h> |
27 | #include <linux/spinlock.h> | 27 | #include <linux/spinlock.h> |
28 | #include <linux/device.h> | 28 | #include <linux/device.h> |
29 | #include <linux/dma-mapping.h> | 29 | #include <linux/dma-mapping.h> |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * IOMAP_MAX_ORDER defines the largest contiguous block | 32 | * IOMAP_MAX_ORDER defines the largest contiguous block |
33 | * of dma space we can get. IOMAP_MAX_ORDER = 13 | 33 | * of dma space we can get. IOMAP_MAX_ORDER = 13 |
34 | * allows up to 2**12 pages (4096 * 4096) = 16 MB | 34 | * allows up to 2**12 pages (4096 * 4096) = 16 MB |
35 | */ | 35 | */ |
36 | #define IOMAP_MAX_ORDER 13 | 36 | #define IOMAP_MAX_ORDER 13 |
37 | 37 | ||
38 | struct iommu_table { | 38 | struct iommu_table { |
39 | unsigned long it_busno; /* Bus number this table belongs to */ | 39 | unsigned long it_busno; /* Bus number this table belongs to */ |
40 | unsigned long it_size; /* Size of iommu table in entries */ | 40 | unsigned long it_size; /* Size of iommu table in entries */ |
41 | unsigned long it_offset; /* Offset into global table */ | 41 | unsigned long it_offset; /* Offset into global table */ |
42 | unsigned long it_base; /* mapped address of tce table */ | 42 | unsigned long it_base; /* mapped address of tce table */ |
43 | unsigned long it_index; /* which iommu table this is */ | 43 | unsigned long it_index; /* which iommu table this is */ |
44 | unsigned long it_type; /* type: PCI or Virtual Bus */ | 44 | unsigned long it_type; /* type: PCI or Virtual Bus */ |
45 | unsigned long it_blocksize; /* Entries in each block (cacheline) */ | 45 | unsigned long it_blocksize; /* Entries in each block (cacheline) */ |
46 | unsigned long it_hint; /* Hint for next alloc */ | 46 | unsigned long it_hint; /* Hint for next alloc */ |
47 | unsigned long it_largehint; /* Hint for large allocs */ | 47 | unsigned long it_largehint; /* Hint for large allocs */ |
48 | unsigned long it_halfpoint; /* Breaking point for small/large allocs */ | 48 | unsigned long it_halfpoint; /* Breaking point for small/large allocs */ |
49 | spinlock_t it_lock; /* Protects it_map */ | 49 | spinlock_t it_lock; /* Protects it_map */ |
50 | unsigned long *it_map; /* A simple allocation bitmap for now */ | 50 | unsigned long *it_map; /* A simple allocation bitmap for now */ |
51 | }; | 51 | }; |
52 | 52 | ||
53 | struct scatterlist; | 53 | struct scatterlist; |
54 | struct device_node; | 54 | struct device_node; |
55 | 55 | ||
56 | #ifdef CONFIG_PPC_MULTIPLATFORM | 56 | #ifdef CONFIG_PPC_MULTIPLATFORM |
57 | 57 | ||
58 | /* Walks all buses and creates iommu tables */ | 58 | /* Walks all buses and creates iommu tables */ |
59 | extern void iommu_setup_pSeries(void); | 59 | extern void iommu_setup_pSeries(void); |
60 | extern void iommu_setup_dart(void); | 60 | extern void iommu_setup_dart(void); |
61 | 61 | ||
62 | /* Frees table for an individual device node */ | 62 | /* Frees table for an individual device node */ |
63 | extern void iommu_free_table(struct device_node *dn); | 63 | extern void iommu_free_table(struct device_node *dn); |
64 | 64 | ||
65 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | 65 | #endif /* CONFIG_PPC_MULTIPLATFORM */ |
66 | 66 | ||
67 | #ifdef CONFIG_PPC_PSERIES | ||
68 | |||
69 | /* Creates table for an individual device node */ | ||
70 | extern void iommu_devnode_init_pSeries(struct device_node *dn); | ||
71 | |||
72 | #endif /* CONFIG_PPC_PSERIES */ | ||
73 | |||
74 | #ifdef CONFIG_PPC_ISERIES | ||
75 | |||
76 | /* Creates table for an individual device node */ | ||
77 | extern void iommu_devnode_init_iSeries(struct device_node *dn); | ||
78 | /* Get table parameters from HV */ | ||
79 | extern void iommu_table_getparms_iSeries(unsigned long busno, | ||
80 | unsigned char slotno, | ||
81 | unsigned char virtbus, | ||
82 | struct iommu_table* tbl); | ||
83 | |||
84 | #endif /* CONFIG_PPC_ISERIES */ | ||
85 | |||
86 | /* Initializes an iommu_table based in values set in the passed-in | 67 | /* Initializes an iommu_table based in values set in the passed-in |
87 | * structure | 68 | * structure |
88 | */ | 69 | */ |
89 | extern struct iommu_table *iommu_init_table(struct iommu_table * tbl); | 70 | extern struct iommu_table *iommu_init_table(struct iommu_table * tbl); |
90 | 71 | ||
91 | extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl, | 72 | extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl, |
92 | struct scatterlist *sglist, int nelems, | 73 | struct scatterlist *sglist, int nelems, |
93 | enum dma_data_direction direction); | 74 | enum dma_data_direction direction); |
94 | extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, | 75 | extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, |
95 | int nelems, enum dma_data_direction direction); | 76 | int nelems, enum dma_data_direction direction); |
96 | 77 | ||
97 | extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, | 78 | extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, |
98 | dma_addr_t *dma_handle, gfp_t flag); | 79 | dma_addr_t *dma_handle, gfp_t flag); |
99 | extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, | 80 | extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, |
100 | void *vaddr, dma_addr_t dma_handle); | 81 | void *vaddr, dma_addr_t dma_handle); |
101 | extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, | 82 | extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, |
102 | size_t size, enum dma_data_direction direction); | 83 | size_t size, enum dma_data_direction direction); |
103 | extern void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, | 84 | extern void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, |
104 | size_t size, enum dma_data_direction direction); | 85 | size_t size, enum dma_data_direction direction); |
105 | 86 | ||
106 | extern void iommu_init_early_pSeries(void); | 87 | extern void iommu_init_early_pSeries(void); |
107 | extern void iommu_init_early_iSeries(void); | 88 | extern void iommu_init_early_iSeries(void); |
108 | extern void iommu_init_early_dart(void); | 89 | extern void iommu_init_early_dart(void); |
109 | 90 | ||
110 | #ifdef CONFIG_PCI | 91 | #ifdef CONFIG_PCI |
111 | extern void pci_iommu_init(void); | 92 | extern void pci_iommu_init(void); |
112 | extern void pci_direct_iommu_init(void); | 93 | extern void pci_direct_iommu_init(void); |
113 | #else | 94 | #else |
114 | static inline void pci_iommu_init(void) { } | 95 | static inline void pci_iommu_init(void) { } |
115 | #endif | 96 | #endif |
116 | 97 | ||
117 | extern void alloc_dart_table(void); | 98 | extern void alloc_dart_table(void); |
118 | 99 | ||
119 | #endif /* __KERNEL__ */ | 100 | #endif /* __KERNEL__ */ |
120 | #endif /* _ASM_IOMMU_H */ | 101 | #endif /* _ASM_IOMMU_H */ |
121 | 102 |