Commit 84631f37cc405dd6dcd566f9fa4e8a3ca2f03f76

Authored by Michael Ellerman
Committed by Paul Mackerras
1 parent 53024fe250

[POWERPC] Implement pci_set_dma_mask() in terms of the dma_ops

PowerPC currently doesn't implement pci_set_dma_mask(), which means drivers
calling it will get the generic version in drivers/pci/pci.c.

The powerpc dma mapping ops include a dma_set_mask() hook, which luckily is
not implemented by anyone - so there is no bug in the fact that the hook
is currently never called.

However in future we'll add implementation(s) of dma_set_mask(), and so we
need pci_set_dma_mask() to call the hook.

To save adding a hook to the dma mapping ops, pci-set_consistent_dma_mask()
simply calls the dma_set_mask() hook and then copies the new mask into
dev.coherenet_dma_mask.

Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>

Showing 2 changed files with 19 additions and 0 deletions Inline Diff

arch/powerpc/kernel/pci_64.c
1 /* 1 /*
2 * Port for PPC64 David Engebretsen, IBM Corp. 2 * Port for PPC64 David Engebretsen, IBM Corp.
3 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. 3 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
4 * 4 *
5 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM 5 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
6 * Rework, based on alpha PCI code. 6 * Rework, based on alpha PCI code.
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version. 11 * 2 of the License, or (at your option) any later version.
12 */ 12 */
13 13
14 #undef DEBUG 14 #undef DEBUG
15 15
16 #include <linux/kernel.h> 16 #include <linux/kernel.h>
17 #include <linux/pci.h> 17 #include <linux/pci.h>
18 #include <linux/string.h> 18 #include <linux/string.h>
19 #include <linux/init.h> 19 #include <linux/init.h>
20 #include <linux/bootmem.h> 20 #include <linux/bootmem.h>
21 #include <linux/mm.h> 21 #include <linux/mm.h>
22 #include <linux/list.h> 22 #include <linux/list.h>
23 #include <linux/syscalls.h> 23 #include <linux/syscalls.h>
24 #include <linux/irq.h> 24 #include <linux/irq.h>
25 #include <linux/vmalloc.h> 25 #include <linux/vmalloc.h>
26 26
27 #include <asm/processor.h> 27 #include <asm/processor.h>
28 #include <asm/io.h> 28 #include <asm/io.h>
29 #include <asm/prom.h> 29 #include <asm/prom.h>
30 #include <asm/pci-bridge.h> 30 #include <asm/pci-bridge.h>
31 #include <asm/byteorder.h> 31 #include <asm/byteorder.h>
32 #include <asm/machdep.h> 32 #include <asm/machdep.h>
33 #include <asm/ppc-pci.h> 33 #include <asm/ppc-pci.h>
34 34
35 #ifdef DEBUG 35 #ifdef DEBUG
36 #include <asm/udbg.h> 36 #include <asm/udbg.h>
37 #define DBG(fmt...) printk(fmt) 37 #define DBG(fmt...) printk(fmt)
38 #else 38 #else
39 #define DBG(fmt...) 39 #define DBG(fmt...)
40 #endif 40 #endif
41 41
42 unsigned long pci_probe_only = 1; 42 unsigned long pci_probe_only = 1;
43 int pci_assign_all_buses = 0; 43 int pci_assign_all_buses = 0;
44 44
45 static void fixup_resource(struct resource *res, struct pci_dev *dev); 45 static void fixup_resource(struct resource *res, struct pci_dev *dev);
46 static void do_bus_setup(struct pci_bus *bus); 46 static void do_bus_setup(struct pci_bus *bus);
47 47
48 /* pci_io_base -- the base address from which io bars are offsets. 48 /* pci_io_base -- the base address from which io bars are offsets.
49 * This is the lowest I/O base address (so bar values are always positive), 49 * This is the lowest I/O base address (so bar values are always positive),
50 * and it *must* be the start of ISA space if an ISA bus exists because 50 * and it *must* be the start of ISA space if an ISA bus exists because
51 * ISA drivers use hard coded offsets. If no ISA bus exists nothing 51 * ISA drivers use hard coded offsets. If no ISA bus exists nothing
52 * is mapped on the first 64K of IO space 52 * is mapped on the first 64K of IO space
53 */ 53 */
54 unsigned long pci_io_base = ISA_IO_BASE; 54 unsigned long pci_io_base = ISA_IO_BASE;
55 EXPORT_SYMBOL(pci_io_base); 55 EXPORT_SYMBOL(pci_io_base);
56 56
57 LIST_HEAD(hose_list); 57 LIST_HEAD(hose_list);
58 58
59 static struct dma_mapping_ops *pci_dma_ops; 59 static struct dma_mapping_ops *pci_dma_ops;
60 60
61 void set_pci_dma_ops(struct dma_mapping_ops *dma_ops) 61 void set_pci_dma_ops(struct dma_mapping_ops *dma_ops)
62 { 62 {
63 pci_dma_ops = dma_ops; 63 pci_dma_ops = dma_ops;
64 } 64 }
65 65
66 struct dma_mapping_ops *get_pci_dma_ops(void) 66 struct dma_mapping_ops *get_pci_dma_ops(void)
67 { 67 {
68 return pci_dma_ops; 68 return pci_dma_ops;
69 } 69 }
70 EXPORT_SYMBOL(get_pci_dma_ops); 70 EXPORT_SYMBOL(get_pci_dma_ops);
71 71
72
73 int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
74 {
75 return dma_set_mask(&dev->dev, mask);
76 }
77
78 int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
79 {
80 int rc;
81
82 rc = dma_set_mask(&dev->dev, mask);
83 dev->dev.coherent_dma_mask = dev->dma_mask;
84
85 return rc;
86 }
87
72 static void fixup_broken_pcnet32(struct pci_dev* dev) 88 static void fixup_broken_pcnet32(struct pci_dev* dev)
73 { 89 {
74 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) { 90 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
75 dev->vendor = PCI_VENDOR_ID_AMD; 91 dev->vendor = PCI_VENDOR_ID_AMD;
76 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD); 92 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
77 } 93 }
78 } 94 }
79 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32); 95 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
80 96
81 void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, 97 void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
82 struct resource *res) 98 struct resource *res)
83 { 99 {
84 unsigned long offset = 0; 100 unsigned long offset = 0;
85 struct pci_controller *hose = pci_bus_to_host(dev->bus); 101 struct pci_controller *hose = pci_bus_to_host(dev->bus);
86 102
87 if (!hose) 103 if (!hose)
88 return; 104 return;
89 105
90 if (res->flags & IORESOURCE_IO) 106 if (res->flags & IORESOURCE_IO)
91 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 107 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
92 108
93 if (res->flags & IORESOURCE_MEM) 109 if (res->flags & IORESOURCE_MEM)
94 offset = hose->pci_mem_offset; 110 offset = hose->pci_mem_offset;
95 111
96 region->start = res->start - offset; 112 region->start = res->start - offset;
97 region->end = res->end - offset; 113 region->end = res->end - offset;
98 } 114 }
99 115
100 void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, 116 void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
101 struct pci_bus_region *region) 117 struct pci_bus_region *region)
102 { 118 {
103 unsigned long offset = 0; 119 unsigned long offset = 0;
104 struct pci_controller *hose = pci_bus_to_host(dev->bus); 120 struct pci_controller *hose = pci_bus_to_host(dev->bus);
105 121
106 if (!hose) 122 if (!hose)
107 return; 123 return;
108 124
109 if (res->flags & IORESOURCE_IO) 125 if (res->flags & IORESOURCE_IO)
110 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 126 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
111 127
112 if (res->flags & IORESOURCE_MEM) 128 if (res->flags & IORESOURCE_MEM)
113 offset = hose->pci_mem_offset; 129 offset = hose->pci_mem_offset;
114 130
115 res->start = region->start + offset; 131 res->start = region->start + offset;
116 res->end = region->end + offset; 132 res->end = region->end + offset;
117 } 133 }
118 134
119 #ifdef CONFIG_HOTPLUG 135 #ifdef CONFIG_HOTPLUG
120 EXPORT_SYMBOL(pcibios_resource_to_bus); 136 EXPORT_SYMBOL(pcibios_resource_to_bus);
121 EXPORT_SYMBOL(pcibios_bus_to_resource); 137 EXPORT_SYMBOL(pcibios_bus_to_resource);
122 #endif 138 #endif
123 139
124 /* 140 /*
125 * We need to avoid collisions with `mirrored' VGA ports 141 * We need to avoid collisions with `mirrored' VGA ports
126 * and other strange ISA hardware, so we always want the 142 * and other strange ISA hardware, so we always want the
127 * addresses to be allocated in the 0x000-0x0ff region 143 * addresses to be allocated in the 0x000-0x0ff region
128 * modulo 0x400. 144 * modulo 0x400.
129 * 145 *
130 * Why? Because some silly external IO cards only decode 146 * Why? Because some silly external IO cards only decode
131 * the low 10 bits of the IO address. The 0x00-0xff region 147 * the low 10 bits of the IO address. The 0x00-0xff region
132 * is reserved for motherboard devices that decode all 16 148 * is reserved for motherboard devices that decode all 16
133 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, 149 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
134 * but we want to try to avoid allocating at 0x2900-0x2bff 150 * but we want to try to avoid allocating at 0x2900-0x2bff
135 * which might have be mirrored at 0x0100-0x03ff.. 151 * which might have be mirrored at 0x0100-0x03ff..
136 */ 152 */
137 void pcibios_align_resource(void *data, struct resource *res, 153 void pcibios_align_resource(void *data, struct resource *res,
138 resource_size_t size, resource_size_t align) 154 resource_size_t size, resource_size_t align)
139 { 155 {
140 struct pci_dev *dev = data; 156 struct pci_dev *dev = data;
141 struct pci_controller *hose = pci_bus_to_host(dev->bus); 157 struct pci_controller *hose = pci_bus_to_host(dev->bus);
142 resource_size_t start = res->start; 158 resource_size_t start = res->start;
143 unsigned long alignto; 159 unsigned long alignto;
144 160
145 if (res->flags & IORESOURCE_IO) { 161 if (res->flags & IORESOURCE_IO) {
146 unsigned long offset = (unsigned long)hose->io_base_virt - 162 unsigned long offset = (unsigned long)hose->io_base_virt -
147 _IO_BASE; 163 _IO_BASE;
148 /* Make sure we start at our min on all hoses */ 164 /* Make sure we start at our min on all hoses */
149 if (start - offset < PCIBIOS_MIN_IO) 165 if (start - offset < PCIBIOS_MIN_IO)
150 start = PCIBIOS_MIN_IO + offset; 166 start = PCIBIOS_MIN_IO + offset;
151 167
152 /* 168 /*
153 * Put everything into 0x00-0xff region modulo 0x400 169 * Put everything into 0x00-0xff region modulo 0x400
154 */ 170 */
155 if (start & 0x300) 171 if (start & 0x300)
156 start = (start + 0x3ff) & ~0x3ff; 172 start = (start + 0x3ff) & ~0x3ff;
157 173
158 } else if (res->flags & IORESOURCE_MEM) { 174 } else if (res->flags & IORESOURCE_MEM) {
159 /* Make sure we start at our min on all hoses */ 175 /* Make sure we start at our min on all hoses */
160 if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM) 176 if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM)
161 start = PCIBIOS_MIN_MEM + hose->pci_mem_offset; 177 start = PCIBIOS_MIN_MEM + hose->pci_mem_offset;
162 178
163 /* Align to multiple of size of minimum base. */ 179 /* Align to multiple of size of minimum base. */
164 alignto = max(0x1000UL, align); 180 alignto = max(0x1000UL, align);
165 start = ALIGN(start, alignto); 181 start = ALIGN(start, alignto);
166 } 182 }
167 183
168 res->start = start; 184 res->start = start;
169 } 185 }
170 186
171 void __devinit pcibios_claim_one_bus(struct pci_bus *b) 187 void __devinit pcibios_claim_one_bus(struct pci_bus *b)
172 { 188 {
173 struct pci_dev *dev; 189 struct pci_dev *dev;
174 struct pci_bus *child_bus; 190 struct pci_bus *child_bus;
175 191
176 list_for_each_entry(dev, &b->devices, bus_list) { 192 list_for_each_entry(dev, &b->devices, bus_list) {
177 int i; 193 int i;
178 194
179 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 195 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
180 struct resource *r = &dev->resource[i]; 196 struct resource *r = &dev->resource[i];
181 197
182 if (r->parent || !r->start || !r->flags) 198 if (r->parent || !r->start || !r->flags)
183 continue; 199 continue;
184 pci_claim_resource(dev, i); 200 pci_claim_resource(dev, i);
185 } 201 }
186 } 202 }
187 203
188 list_for_each_entry(child_bus, &b->children, node) 204 list_for_each_entry(child_bus, &b->children, node)
189 pcibios_claim_one_bus(child_bus); 205 pcibios_claim_one_bus(child_bus);
190 } 206 }
191 #ifdef CONFIG_HOTPLUG 207 #ifdef CONFIG_HOTPLUG
192 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus); 208 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
193 #endif 209 #endif
194 210
195 static void __init pcibios_claim_of_setup(void) 211 static void __init pcibios_claim_of_setup(void)
196 { 212 {
197 struct pci_bus *b; 213 struct pci_bus *b;
198 214
199 list_for_each_entry(b, &pci_root_buses, node) 215 list_for_each_entry(b, &pci_root_buses, node)
200 pcibios_claim_one_bus(b); 216 pcibios_claim_one_bus(b);
201 } 217 }
202 218
203 static u32 get_int_prop(struct device_node *np, const char *name, u32 def) 219 static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
204 { 220 {
205 const u32 *prop; 221 const u32 *prop;
206 int len; 222 int len;
207 223
208 prop = of_get_property(np, name, &len); 224 prop = of_get_property(np, name, &len);
209 if (prop && len >= 4) 225 if (prop && len >= 4)
210 return *prop; 226 return *prop;
211 return def; 227 return def;
212 } 228 }
213 229
214 static unsigned int pci_parse_of_flags(u32 addr0) 230 static unsigned int pci_parse_of_flags(u32 addr0)
215 { 231 {
216 unsigned int flags = 0; 232 unsigned int flags = 0;
217 233
218 if (addr0 & 0x02000000) { 234 if (addr0 & 0x02000000) {
219 flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY; 235 flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
220 flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64; 236 flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
221 flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M; 237 flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
222 if (addr0 & 0x40000000) 238 if (addr0 & 0x40000000)
223 flags |= IORESOURCE_PREFETCH 239 flags |= IORESOURCE_PREFETCH
224 | PCI_BASE_ADDRESS_MEM_PREFETCH; 240 | PCI_BASE_ADDRESS_MEM_PREFETCH;
225 } else if (addr0 & 0x01000000) 241 } else if (addr0 & 0x01000000)
226 flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO; 242 flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
227 return flags; 243 return flags;
228 } 244 }
229 245
230 246
231 static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev) 247 static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
232 { 248 {
233 u64 base, size; 249 u64 base, size;
234 unsigned int flags; 250 unsigned int flags;
235 struct resource *res; 251 struct resource *res;
236 const u32 *addrs; 252 const u32 *addrs;
237 u32 i; 253 u32 i;
238 int proplen; 254 int proplen;
239 255
240 addrs = of_get_property(node, "assigned-addresses", &proplen); 256 addrs = of_get_property(node, "assigned-addresses", &proplen);
241 if (!addrs) 257 if (!addrs)
242 return; 258 return;
243 DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs); 259 DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
244 for (; proplen >= 20; proplen -= 20, addrs += 5) { 260 for (; proplen >= 20; proplen -= 20, addrs += 5) {
245 flags = pci_parse_of_flags(addrs[0]); 261 flags = pci_parse_of_flags(addrs[0]);
246 if (!flags) 262 if (!flags)
247 continue; 263 continue;
248 base = of_read_number(&addrs[1], 2); 264 base = of_read_number(&addrs[1], 2);
249 size = of_read_number(&addrs[3], 2); 265 size = of_read_number(&addrs[3], 2);
250 if (!size) 266 if (!size)
251 continue; 267 continue;
252 i = addrs[0] & 0xff; 268 i = addrs[0] & 0xff;
253 DBG(" base: %llx, size: %llx, i: %x\n", 269 DBG(" base: %llx, size: %llx, i: %x\n",
254 (unsigned long long)base, (unsigned long long)size, i); 270 (unsigned long long)base, (unsigned long long)size, i);
255 271
256 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { 272 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
257 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; 273 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
258 } else if (i == dev->rom_base_reg) { 274 } else if (i == dev->rom_base_reg) {
259 res = &dev->resource[PCI_ROM_RESOURCE]; 275 res = &dev->resource[PCI_ROM_RESOURCE];
260 flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE; 276 flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
261 } else { 277 } else {
262 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); 278 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
263 continue; 279 continue;
264 } 280 }
265 res->start = base; 281 res->start = base;
266 res->end = base + size - 1; 282 res->end = base + size - 1;
267 res->flags = flags; 283 res->flags = flags;
268 res->name = pci_name(dev); 284 res->name = pci_name(dev);
269 fixup_resource(res, dev); 285 fixup_resource(res, dev);
270 } 286 }
271 } 287 }
272 288
273 struct pci_dev *of_create_pci_dev(struct device_node *node, 289 struct pci_dev *of_create_pci_dev(struct device_node *node,
274 struct pci_bus *bus, int devfn) 290 struct pci_bus *bus, int devfn)
275 { 291 {
276 struct pci_dev *dev; 292 struct pci_dev *dev;
277 const char *type; 293 const char *type;
278 294
279 dev = alloc_pci_dev(); 295 dev = alloc_pci_dev();
280 if (!dev) 296 if (!dev)
281 return NULL; 297 return NULL;
282 type = of_get_property(node, "device_type", NULL); 298 type = of_get_property(node, "device_type", NULL);
283 if (type == NULL) 299 if (type == NULL)
284 type = ""; 300 type = "";
285 301
286 DBG(" create device, devfn: %x, type: %s\n", devfn, type); 302 DBG(" create device, devfn: %x, type: %s\n", devfn, type);
287 303
288 dev->bus = bus; 304 dev->bus = bus;
289 dev->sysdata = node; 305 dev->sysdata = node;
290 dev->dev.parent = bus->bridge; 306 dev->dev.parent = bus->bridge;
291 dev->dev.bus = &pci_bus_type; 307 dev->dev.bus = &pci_bus_type;
292 dev->devfn = devfn; 308 dev->devfn = devfn;
293 dev->multifunction = 0; /* maybe a lie? */ 309 dev->multifunction = 0; /* maybe a lie? */
294 310
295 dev->vendor = get_int_prop(node, "vendor-id", 0xffff); 311 dev->vendor = get_int_prop(node, "vendor-id", 0xffff);
296 dev->device = get_int_prop(node, "device-id", 0xffff); 312 dev->device = get_int_prop(node, "device-id", 0xffff);
297 dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0); 313 dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0);
298 dev->subsystem_device = get_int_prop(node, "subsystem-id", 0); 314 dev->subsystem_device = get_int_prop(node, "subsystem-id", 0);
299 315
300 dev->cfg_size = pci_cfg_space_size(dev); 316 dev->cfg_size = pci_cfg_space_size(dev);
301 317
302 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus), 318 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
303 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); 319 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
304 dev->class = get_int_prop(node, "class-code", 0); 320 dev->class = get_int_prop(node, "class-code", 0);
305 dev->revision = get_int_prop(node, "revision-id", 0); 321 dev->revision = get_int_prop(node, "revision-id", 0);
306 322
307 DBG(" class: 0x%x\n", dev->class); 323 DBG(" class: 0x%x\n", dev->class);
308 DBG(" revision: 0x%x\n", dev->revision); 324 DBG(" revision: 0x%x\n", dev->revision);
309 325
310 dev->current_state = 4; /* unknown power state */ 326 dev->current_state = 4; /* unknown power state */
311 dev->error_state = pci_channel_io_normal; 327 dev->error_state = pci_channel_io_normal;
312 dev->dma_mask = 0xffffffff; 328 dev->dma_mask = 0xffffffff;
313 329
314 if (!strcmp(type, "pci") || !strcmp(type, "pciex")) { 330 if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
315 /* a PCI-PCI bridge */ 331 /* a PCI-PCI bridge */
316 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; 332 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
317 dev->rom_base_reg = PCI_ROM_ADDRESS1; 333 dev->rom_base_reg = PCI_ROM_ADDRESS1;
318 } else if (!strcmp(type, "cardbus")) { 334 } else if (!strcmp(type, "cardbus")) {
319 dev->hdr_type = PCI_HEADER_TYPE_CARDBUS; 335 dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
320 } else { 336 } else {
321 dev->hdr_type = PCI_HEADER_TYPE_NORMAL; 337 dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
322 dev->rom_base_reg = PCI_ROM_ADDRESS; 338 dev->rom_base_reg = PCI_ROM_ADDRESS;
323 /* Maybe do a default OF mapping here */ 339 /* Maybe do a default OF mapping here */
324 dev->irq = NO_IRQ; 340 dev->irq = NO_IRQ;
325 } 341 }
326 342
327 pci_parse_of_addrs(node, dev); 343 pci_parse_of_addrs(node, dev);
328 344
329 DBG(" adding to system ...\n"); 345 DBG(" adding to system ...\n");
330 346
331 pci_device_add(dev, bus); 347 pci_device_add(dev, bus);
332 348
333 return dev; 349 return dev;
334 } 350 }
335 EXPORT_SYMBOL(of_create_pci_dev); 351 EXPORT_SYMBOL(of_create_pci_dev);
336 352
337 void __devinit of_scan_bus(struct device_node *node, 353 void __devinit of_scan_bus(struct device_node *node,
338 struct pci_bus *bus) 354 struct pci_bus *bus)
339 { 355 {
340 struct device_node *child = NULL; 356 struct device_node *child = NULL;
341 const u32 *reg; 357 const u32 *reg;
342 int reglen, devfn; 358 int reglen, devfn;
343 struct pci_dev *dev; 359 struct pci_dev *dev;
344 360
345 DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number); 361 DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number);
346 362
347 while ((child = of_get_next_child(node, child)) != NULL) { 363 while ((child = of_get_next_child(node, child)) != NULL) {
348 DBG(" * %s\n", child->full_name); 364 DBG(" * %s\n", child->full_name);
349 reg = of_get_property(child, "reg", &reglen); 365 reg = of_get_property(child, "reg", &reglen);
350 if (reg == NULL || reglen < 20) 366 if (reg == NULL || reglen < 20)
351 continue; 367 continue;
352 devfn = (reg[0] >> 8) & 0xff; 368 devfn = (reg[0] >> 8) & 0xff;
353 369
354 /* create a new pci_dev for this device */ 370 /* create a new pci_dev for this device */
355 dev = of_create_pci_dev(child, bus, devfn); 371 dev = of_create_pci_dev(child, bus, devfn);
356 if (!dev) 372 if (!dev)
357 continue; 373 continue;
358 DBG("dev header type: %x\n", dev->hdr_type); 374 DBG("dev header type: %x\n", dev->hdr_type);
359 375
360 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 376 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
361 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 377 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
362 of_scan_pci_bridge(child, dev); 378 of_scan_pci_bridge(child, dev);
363 } 379 }
364 380
365 do_bus_setup(bus); 381 do_bus_setup(bus);
366 } 382 }
367 EXPORT_SYMBOL(of_scan_bus); 383 EXPORT_SYMBOL(of_scan_bus);
368 384
369 void __devinit of_scan_pci_bridge(struct device_node *node, 385 void __devinit of_scan_pci_bridge(struct device_node *node,
370 struct pci_dev *dev) 386 struct pci_dev *dev)
371 { 387 {
372 struct pci_bus *bus; 388 struct pci_bus *bus;
373 const u32 *busrange, *ranges; 389 const u32 *busrange, *ranges;
374 int len, i, mode; 390 int len, i, mode;
375 struct resource *res; 391 struct resource *res;
376 unsigned int flags; 392 unsigned int flags;
377 u64 size; 393 u64 size;
378 394
379 DBG("of_scan_pci_bridge(%s)\n", node->full_name); 395 DBG("of_scan_pci_bridge(%s)\n", node->full_name);
380 396
381 /* parse bus-range property */ 397 /* parse bus-range property */
382 busrange = of_get_property(node, "bus-range", &len); 398 busrange = of_get_property(node, "bus-range", &len);
383 if (busrange == NULL || len != 8) { 399 if (busrange == NULL || len != 8) {
384 printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n", 400 printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
385 node->full_name); 401 node->full_name);
386 return; 402 return;
387 } 403 }
388 ranges = of_get_property(node, "ranges", &len); 404 ranges = of_get_property(node, "ranges", &len);
389 if (ranges == NULL) { 405 if (ranges == NULL) {
390 printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n", 406 printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n",
391 node->full_name); 407 node->full_name);
392 return; 408 return;
393 } 409 }
394 410
395 bus = pci_add_new_bus(dev->bus, dev, busrange[0]); 411 bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
396 if (!bus) { 412 if (!bus) {
397 printk(KERN_ERR "Failed to create pci bus for %s\n", 413 printk(KERN_ERR "Failed to create pci bus for %s\n",
398 node->full_name); 414 node->full_name);
399 return; 415 return;
400 } 416 }
401 417
402 bus->primary = dev->bus->number; 418 bus->primary = dev->bus->number;
403 bus->subordinate = busrange[1]; 419 bus->subordinate = busrange[1];
404 bus->bridge_ctl = 0; 420 bus->bridge_ctl = 0;
405 bus->sysdata = node; 421 bus->sysdata = node;
406 422
407 /* parse ranges property */ 423 /* parse ranges property */
408 /* PCI #address-cells == 3 and #size-cells == 2 always */ 424 /* PCI #address-cells == 3 and #size-cells == 2 always */
409 res = &dev->resource[PCI_BRIDGE_RESOURCES]; 425 res = &dev->resource[PCI_BRIDGE_RESOURCES];
410 for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) { 426 for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
411 res->flags = 0; 427 res->flags = 0;
412 bus->resource[i] = res; 428 bus->resource[i] = res;
413 ++res; 429 ++res;
414 } 430 }
415 i = 1; 431 i = 1;
416 for (; len >= 32; len -= 32, ranges += 8) { 432 for (; len >= 32; len -= 32, ranges += 8) {
417 flags = pci_parse_of_flags(ranges[0]); 433 flags = pci_parse_of_flags(ranges[0]);
418 size = of_read_number(&ranges[6], 2); 434 size = of_read_number(&ranges[6], 2);
419 if (flags == 0 || size == 0) 435 if (flags == 0 || size == 0)
420 continue; 436 continue;
421 if (flags & IORESOURCE_IO) { 437 if (flags & IORESOURCE_IO) {
422 res = bus->resource[0]; 438 res = bus->resource[0];
423 if (res->flags) { 439 if (res->flags) {
424 printk(KERN_ERR "PCI: ignoring extra I/O range" 440 printk(KERN_ERR "PCI: ignoring extra I/O range"
425 " for bridge %s\n", node->full_name); 441 " for bridge %s\n", node->full_name);
426 continue; 442 continue;
427 } 443 }
428 } else { 444 } else {
429 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { 445 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
430 printk(KERN_ERR "PCI: too many memory ranges" 446 printk(KERN_ERR "PCI: too many memory ranges"
431 " for bridge %s\n", node->full_name); 447 " for bridge %s\n", node->full_name);
432 continue; 448 continue;
433 } 449 }
434 res = bus->resource[i]; 450 res = bus->resource[i];
435 ++i; 451 ++i;
436 } 452 }
437 res->start = of_read_number(&ranges[1], 2); 453 res->start = of_read_number(&ranges[1], 2);
438 res->end = res->start + size - 1; 454 res->end = res->start + size - 1;
439 res->flags = flags; 455 res->flags = flags;
440 fixup_resource(res, dev); 456 fixup_resource(res, dev);
441 } 457 }
442 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), 458 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
443 bus->number); 459 bus->number);
444 DBG(" bus name: %s\n", bus->name); 460 DBG(" bus name: %s\n", bus->name);
445 461
446 mode = PCI_PROBE_NORMAL; 462 mode = PCI_PROBE_NORMAL;
447 if (ppc_md.pci_probe_mode) 463 if (ppc_md.pci_probe_mode)
448 mode = ppc_md.pci_probe_mode(bus); 464 mode = ppc_md.pci_probe_mode(bus);
449 DBG(" probe mode: %d\n", mode); 465 DBG(" probe mode: %d\n", mode);
450 466
451 if (mode == PCI_PROBE_DEVTREE) 467 if (mode == PCI_PROBE_DEVTREE)
452 of_scan_bus(node, bus); 468 of_scan_bus(node, bus);
453 else if (mode == PCI_PROBE_NORMAL) 469 else if (mode == PCI_PROBE_NORMAL)
454 pci_scan_child_bus(bus); 470 pci_scan_child_bus(bus);
455 } 471 }
456 EXPORT_SYMBOL(of_scan_pci_bridge); 472 EXPORT_SYMBOL(of_scan_pci_bridge);
457 473
458 void __devinit scan_phb(struct pci_controller *hose) 474 void __devinit scan_phb(struct pci_controller *hose)
459 { 475 {
460 struct pci_bus *bus; 476 struct pci_bus *bus;
461 struct device_node *node = hose->dn; 477 struct device_node *node = hose->dn;
462 int i, mode; 478 int i, mode;
463 struct resource *res; 479 struct resource *res;
464 480
465 DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>"); 481 DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
466 482
467 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node); 483 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
468 if (bus == NULL) { 484 if (bus == NULL) {
469 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", 485 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
470 hose->global_number); 486 hose->global_number);
471 return; 487 return;
472 } 488 }
473 bus->secondary = hose->first_busno; 489 bus->secondary = hose->first_busno;
474 hose->bus = bus; 490 hose->bus = bus;
475 491
476 pcibios_map_io_space(bus); 492 pcibios_map_io_space(bus);
477 493
478 bus->resource[0] = res = &hose->io_resource; 494 bus->resource[0] = res = &hose->io_resource;
479 if (res->flags && request_resource(&ioport_resource, res)) { 495 if (res->flags && request_resource(&ioport_resource, res)) {
480 printk(KERN_ERR "Failed to request PCI IO region " 496 printk(KERN_ERR "Failed to request PCI IO region "
481 "on PCI domain %04x\n", hose->global_number); 497 "on PCI domain %04x\n", hose->global_number);
482 DBG("res->start = 0x%016lx, res->end = 0x%016lx\n", 498 DBG("res->start = 0x%016lx, res->end = 0x%016lx\n",
483 res->start, res->end); 499 res->start, res->end);
484 } 500 }
485 501
486 for (i = 0; i < 3; ++i) { 502 for (i = 0; i < 3; ++i) {
487 res = &hose->mem_resources[i]; 503 res = &hose->mem_resources[i];
488 bus->resource[i+1] = res; 504 bus->resource[i+1] = res;
489 if (res->flags && request_resource(&iomem_resource, res)) 505 if (res->flags && request_resource(&iomem_resource, res))
490 printk(KERN_ERR "Failed to request PCI memory region " 506 printk(KERN_ERR "Failed to request PCI memory region "
491 "on PCI domain %04x\n", hose->global_number); 507 "on PCI domain %04x\n", hose->global_number);
492 } 508 }
493 509
494 mode = PCI_PROBE_NORMAL; 510 mode = PCI_PROBE_NORMAL;
495 511
496 if (node && ppc_md.pci_probe_mode) 512 if (node && ppc_md.pci_probe_mode)
497 mode = ppc_md.pci_probe_mode(bus); 513 mode = ppc_md.pci_probe_mode(bus);
498 DBG(" probe mode: %d\n", mode); 514 DBG(" probe mode: %d\n", mode);
499 if (mode == PCI_PROBE_DEVTREE) { 515 if (mode == PCI_PROBE_DEVTREE) {
500 bus->subordinate = hose->last_busno; 516 bus->subordinate = hose->last_busno;
501 of_scan_bus(node, bus); 517 of_scan_bus(node, bus);
502 } 518 }
503 519
504 if (mode == PCI_PROBE_NORMAL) 520 if (mode == PCI_PROBE_NORMAL)
505 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); 521 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
506 } 522 }
507 523
508 static int __init pcibios_init(void) 524 static int __init pcibios_init(void)
509 { 525 {
510 struct pci_controller *hose, *tmp; 526 struct pci_controller *hose, *tmp;
511 527
512 /* For now, override phys_mem_access_prot. If we need it, 528 /* For now, override phys_mem_access_prot. If we need it,
513 * later, we may move that initialization to each ppc_md 529 * later, we may move that initialization to each ppc_md
514 */ 530 */
515 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; 531 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
516 532
517 printk(KERN_DEBUG "PCI: Probing PCI hardware\n"); 533 printk(KERN_DEBUG "PCI: Probing PCI hardware\n");
518 534
519 /* Scan all of the recorded PCI controllers. */ 535 /* Scan all of the recorded PCI controllers. */
520 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 536 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
521 scan_phb(hose); 537 scan_phb(hose);
522 pci_bus_add_devices(hose->bus); 538 pci_bus_add_devices(hose->bus);
523 } 539 }
524 540
525 if (pci_probe_only) 541 if (pci_probe_only)
526 pcibios_claim_of_setup(); 542 pcibios_claim_of_setup();
527 else 543 else
528 /* FIXME: `else' will be removed when 544 /* FIXME: `else' will be removed when
529 pci_assign_unassigned_resources() is able to work 545 pci_assign_unassigned_resources() is able to work
530 correctly with [partially] allocated PCI tree. */ 546 correctly with [partially] allocated PCI tree. */
531 pci_assign_unassigned_resources(); 547 pci_assign_unassigned_resources();
532 548
533 /* Call machine dependent final fixup */ 549 /* Call machine dependent final fixup */
534 if (ppc_md.pcibios_fixup) 550 if (ppc_md.pcibios_fixup)
535 ppc_md.pcibios_fixup(); 551 ppc_md.pcibios_fixup();
536 552
537 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n"); 553 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
538 554
539 return 0; 555 return 0;
540 } 556 }
541 557
542 subsys_initcall(pcibios_init); 558 subsys_initcall(pcibios_init);
543 559
544 int pcibios_enable_device(struct pci_dev *dev, int mask) 560 int pcibios_enable_device(struct pci_dev *dev, int mask)
545 { 561 {
546 u16 cmd, oldcmd; 562 u16 cmd, oldcmd;
547 int i; 563 int i;
548 564
549 pci_read_config_word(dev, PCI_COMMAND, &cmd); 565 pci_read_config_word(dev, PCI_COMMAND, &cmd);
550 oldcmd = cmd; 566 oldcmd = cmd;
551 567
552 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 568 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
553 struct resource *res = &dev->resource[i]; 569 struct resource *res = &dev->resource[i];
554 570
555 /* Only set up the requested stuff */ 571 /* Only set up the requested stuff */
556 if (!(mask & (1<<i))) 572 if (!(mask & (1<<i)))
557 continue; 573 continue;
558 574
559 if (res->flags & IORESOURCE_IO) 575 if (res->flags & IORESOURCE_IO)
560 cmd |= PCI_COMMAND_IO; 576 cmd |= PCI_COMMAND_IO;
561 if (res->flags & IORESOURCE_MEM) 577 if (res->flags & IORESOURCE_MEM)
562 cmd |= PCI_COMMAND_MEMORY; 578 cmd |= PCI_COMMAND_MEMORY;
563 } 579 }
564 580
565 if (cmd != oldcmd) { 581 if (cmd != oldcmd) {
566 printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n", 582 printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
567 pci_name(dev), cmd); 583 pci_name(dev), cmd);
568 /* Enable the appropriate bits in the PCI command register. */ 584 /* Enable the appropriate bits in the PCI command register. */
569 pci_write_config_word(dev, PCI_COMMAND, cmd); 585 pci_write_config_word(dev, PCI_COMMAND, cmd);
570 } 586 }
571 return 0; 587 return 0;
572 } 588 }
573 589
574 /* Decide whether to display the domain number in /proc */ 590 /* Decide whether to display the domain number in /proc */
575 int pci_proc_domain(struct pci_bus *bus) 591 int pci_proc_domain(struct pci_bus *bus)
576 { 592 {
577 struct pci_controller *hose = pci_bus_to_host(bus); 593 struct pci_controller *hose = pci_bus_to_host(bus);
578 return hose->buid != 0; 594 return hose->buid != 0;
579 } 595 }
580 596
581 597
582 #ifdef CONFIG_HOTPLUG 598 #ifdef CONFIG_HOTPLUG
583 599
584 int pcibios_unmap_io_space(struct pci_bus *bus) 600 int pcibios_unmap_io_space(struct pci_bus *bus)
585 { 601 {
586 struct pci_controller *hose; 602 struct pci_controller *hose;
587 603
588 WARN_ON(bus == NULL); 604 WARN_ON(bus == NULL);
589 605
590 /* If this is not a PHB, we only flush the hash table over 606 /* If this is not a PHB, we only flush the hash table over
591 * the area mapped by this bridge. We don't play with the PTE 607 * the area mapped by this bridge. We don't play with the PTE
592 * mappings since we might have to deal with sub-page alignemnts 608 * mappings since we might have to deal with sub-page alignemnts
593 * so flushing the hash table is the only sane way to make sure 609 * so flushing the hash table is the only sane way to make sure
594 * that no hash entries are covering that removed bridge area 610 * that no hash entries are covering that removed bridge area
595 * while still allowing other busses overlapping those pages 611 * while still allowing other busses overlapping those pages
596 */ 612 */
597 if (bus->self) { 613 if (bus->self) {
598 struct resource *res = bus->resource[0]; 614 struct resource *res = bus->resource[0];
599 615
600 DBG("IO unmapping for PCI-PCI bridge %s\n", 616 DBG("IO unmapping for PCI-PCI bridge %s\n",
601 pci_name(bus->self)); 617 pci_name(bus->self));
602 618
603 __flush_hash_table_range(&init_mm, res->start + _IO_BASE, 619 __flush_hash_table_range(&init_mm, res->start + _IO_BASE,
604 res->end - res->start + 1); 620 res->end - res->start + 1);
605 return 0; 621 return 0;
606 } 622 }
607 623
608 /* Get the host bridge */ 624 /* Get the host bridge */
609 hose = pci_bus_to_host(bus); 625 hose = pci_bus_to_host(bus);
610 626
611 /* Check if we have IOs allocated */ 627 /* Check if we have IOs allocated */
612 if (hose->io_base_alloc == 0) 628 if (hose->io_base_alloc == 0)
613 return 0; 629 return 0;
614 630
615 DBG("IO unmapping for PHB %s\n", hose->dn->full_name); 631 DBG("IO unmapping for PHB %s\n", hose->dn->full_name);
616 DBG(" alloc=0x%p\n", hose->io_base_alloc); 632 DBG(" alloc=0x%p\n", hose->io_base_alloc);
617 633
618 /* This is a PHB, we fully unmap the IO area */ 634 /* This is a PHB, we fully unmap the IO area */
619 vunmap(hose->io_base_alloc); 635 vunmap(hose->io_base_alloc);
620 636
621 return 0; 637 return 0;
622 } 638 }
623 EXPORT_SYMBOL_GPL(pcibios_unmap_io_space); 639 EXPORT_SYMBOL_GPL(pcibios_unmap_io_space);
624 640
625 #endif /* CONFIG_HOTPLUG */ 641 #endif /* CONFIG_HOTPLUG */
626 642
627 int __devinit pcibios_map_io_space(struct pci_bus *bus) 643 int __devinit pcibios_map_io_space(struct pci_bus *bus)
628 { 644 {
629 struct vm_struct *area; 645 struct vm_struct *area;
630 unsigned long phys_page; 646 unsigned long phys_page;
631 unsigned long size_page; 647 unsigned long size_page;
632 unsigned long io_virt_offset; 648 unsigned long io_virt_offset;
633 struct pci_controller *hose; 649 struct pci_controller *hose;
634 650
635 WARN_ON(bus == NULL); 651 WARN_ON(bus == NULL);
636 652
637 /* If this not a PHB, nothing to do, page tables still exist and 653 /* If this not a PHB, nothing to do, page tables still exist and
638 * thus HPTEs will be faulted in when needed 654 * thus HPTEs will be faulted in when needed
639 */ 655 */
640 if (bus->self) { 656 if (bus->self) {
641 DBG("IO mapping for PCI-PCI bridge %s\n", 657 DBG("IO mapping for PCI-PCI bridge %s\n",
642 pci_name(bus->self)); 658 pci_name(bus->self));
643 DBG(" virt=0x%016lx...0x%016lx\n", 659 DBG(" virt=0x%016lx...0x%016lx\n",
644 bus->resource[0]->start + _IO_BASE, 660 bus->resource[0]->start + _IO_BASE,
645 bus->resource[0]->end + _IO_BASE); 661 bus->resource[0]->end + _IO_BASE);
646 return 0; 662 return 0;
647 } 663 }
648 664
649 /* Get the host bridge */ 665 /* Get the host bridge */
650 hose = pci_bus_to_host(bus); 666 hose = pci_bus_to_host(bus);
651 phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE); 667 phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
652 size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE); 668 size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE);
653 669
654 /* Make sure IO area address is clear */ 670 /* Make sure IO area address is clear */
655 hose->io_base_alloc = NULL; 671 hose->io_base_alloc = NULL;
656 672
657 /* If there's no IO to map on that bus, get away too */ 673 /* If there's no IO to map on that bus, get away too */
658 if (hose->pci_io_size == 0 || hose->io_base_phys == 0) 674 if (hose->pci_io_size == 0 || hose->io_base_phys == 0)
659 return 0; 675 return 0;
660 676
661 /* Let's allocate some IO space for that guy. We don't pass 677 /* Let's allocate some IO space for that guy. We don't pass
662 * VM_IOREMAP because we don't care about alignment tricks that 678 * VM_IOREMAP because we don't care about alignment tricks that
663 * the core does in that case. Maybe we should due to stupid card 679 * the core does in that case. Maybe we should due to stupid card
664 * with incomplete address decoding but I'd rather not deal with 680 * with incomplete address decoding but I'd rather not deal with
665 * those outside of the reserved 64K legacy region. 681 * those outside of the reserved 64K legacy region.
666 */ 682 */
667 area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END); 683 area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END);
668 if (area == NULL) 684 if (area == NULL)
669 return -ENOMEM; 685 return -ENOMEM;
670 hose->io_base_alloc = area->addr; 686 hose->io_base_alloc = area->addr;
671 hose->io_base_virt = (void __iomem *)(area->addr + 687 hose->io_base_virt = (void __iomem *)(area->addr +
672 hose->io_base_phys - phys_page); 688 hose->io_base_phys - phys_page);
673 689
674 DBG("IO mapping for PHB %s\n", hose->dn->full_name); 690 DBG("IO mapping for PHB %s\n", hose->dn->full_name);
675 DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n", 691 DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
676 hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc); 692 hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
677 DBG(" size=0x%016lx (alloc=0x%016lx)\n", 693 DBG(" size=0x%016lx (alloc=0x%016lx)\n",
678 hose->pci_io_size, size_page); 694 hose->pci_io_size, size_page);
679 695
680 /* Establish the mapping */ 696 /* Establish the mapping */
681 if (__ioremap_at(phys_page, area->addr, size_page, 697 if (__ioremap_at(phys_page, area->addr, size_page,
682 _PAGE_NO_CACHE | _PAGE_GUARDED) == NULL) 698 _PAGE_NO_CACHE | _PAGE_GUARDED) == NULL)
683 return -ENOMEM; 699 return -ENOMEM;
684 700
685 /* Fixup hose IO resource */ 701 /* Fixup hose IO resource */
686 io_virt_offset = (unsigned long)hose->io_base_virt - _IO_BASE; 702 io_virt_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
687 hose->io_resource.start += io_virt_offset; 703 hose->io_resource.start += io_virt_offset;
688 hose->io_resource.end += io_virt_offset; 704 hose->io_resource.end += io_virt_offset;
689 705
690 DBG(" hose->io_resource=0x%016lx...0x%016lx\n", 706 DBG(" hose->io_resource=0x%016lx...0x%016lx\n",
691 hose->io_resource.start, hose->io_resource.end); 707 hose->io_resource.start, hose->io_resource.end);
692 708
693 return 0; 709 return 0;
694 } 710 }
695 EXPORT_SYMBOL_GPL(pcibios_map_io_space); 711 EXPORT_SYMBOL_GPL(pcibios_map_io_space);
696 712
697 static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev) 713 static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
698 { 714 {
699 struct pci_controller *hose = pci_bus_to_host(dev->bus); 715 struct pci_controller *hose = pci_bus_to_host(dev->bus);
700 unsigned long offset; 716 unsigned long offset;
701 717
702 if (res->flags & IORESOURCE_IO) { 718 if (res->flags & IORESOURCE_IO) {
703 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 719 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
704 res->start += offset; 720 res->start += offset;
705 res->end += offset; 721 res->end += offset;
706 } else if (res->flags & IORESOURCE_MEM) { 722 } else if (res->flags & IORESOURCE_MEM) {
707 res->start += hose->pci_mem_offset; 723 res->start += hose->pci_mem_offset;
708 res->end += hose->pci_mem_offset; 724 res->end += hose->pci_mem_offset;
709 } 725 }
710 } 726 }
711 727
712 void __devinit pcibios_fixup_device_resources(struct pci_dev *dev, 728 void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
713 struct pci_bus *bus) 729 struct pci_bus *bus)
714 { 730 {
715 /* Update device resources. */ 731 /* Update device resources. */
716 int i; 732 int i;
717 733
718 DBG("%s: Fixup resources:\n", pci_name(dev)); 734 DBG("%s: Fixup resources:\n", pci_name(dev));
719 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 735 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
720 struct resource *res = &dev->resource[i]; 736 struct resource *res = &dev->resource[i];
721 if (!res->flags) 737 if (!res->flags)
722 continue; 738 continue;
723 739
724 DBG(" 0x%02x < %08lx:0x%016lx...0x%016lx\n", 740 DBG(" 0x%02x < %08lx:0x%016lx...0x%016lx\n",
725 i, res->flags, res->start, res->end); 741 i, res->flags, res->start, res->end);
726 742
727 fixup_resource(res, dev); 743 fixup_resource(res, dev);
728 744
729 DBG(" > %08lx:0x%016lx...0x%016lx\n", 745 DBG(" > %08lx:0x%016lx...0x%016lx\n",
730 res->flags, res->start, res->end); 746 res->flags, res->start, res->end);
731 } 747 }
732 } 748 }
733 EXPORT_SYMBOL(pcibios_fixup_device_resources); 749 EXPORT_SYMBOL(pcibios_fixup_device_resources);
734 750
735 void __devinit pcibios_setup_new_device(struct pci_dev *dev) 751 void __devinit pcibios_setup_new_device(struct pci_dev *dev)
736 { 752 {
737 struct dev_archdata *sd = &dev->dev.archdata; 753 struct dev_archdata *sd = &dev->dev.archdata;
738 754
739 sd->of_node = pci_device_to_OF_node(dev); 755 sd->of_node = pci_device_to_OF_node(dev);
740 756
741 DBG("PCI device %s OF node: %s\n", pci_name(dev), 757 DBG("PCI device %s OF node: %s\n", pci_name(dev),
742 sd->of_node ? sd->of_node->full_name : "<none>"); 758 sd->of_node ? sd->of_node->full_name : "<none>");
743 759
744 sd->dma_ops = pci_dma_ops; 760 sd->dma_ops = pci_dma_ops;
745 #ifdef CONFIG_NUMA 761 #ifdef CONFIG_NUMA
746 sd->numa_node = pcibus_to_node(dev->bus); 762 sd->numa_node = pcibus_to_node(dev->bus);
747 #else 763 #else
748 sd->numa_node = -1; 764 sd->numa_node = -1;
749 #endif 765 #endif
750 if (ppc_md.pci_dma_dev_setup) 766 if (ppc_md.pci_dma_dev_setup)
751 ppc_md.pci_dma_dev_setup(dev); 767 ppc_md.pci_dma_dev_setup(dev);
752 } 768 }
753 EXPORT_SYMBOL(pcibios_setup_new_device); 769 EXPORT_SYMBOL(pcibios_setup_new_device);
754 770
755 static void __devinit do_bus_setup(struct pci_bus *bus) 771 static void __devinit do_bus_setup(struct pci_bus *bus)
756 { 772 {
757 struct pci_dev *dev; 773 struct pci_dev *dev;
758 774
759 if (ppc_md.pci_dma_bus_setup) 775 if (ppc_md.pci_dma_bus_setup)
760 ppc_md.pci_dma_bus_setup(bus); 776 ppc_md.pci_dma_bus_setup(bus);
761 777
762 list_for_each_entry(dev, &bus->devices, bus_list) 778 list_for_each_entry(dev, &bus->devices, bus_list)
763 pcibios_setup_new_device(dev); 779 pcibios_setup_new_device(dev);
764 780
765 /* Read default IRQs and fixup if necessary */ 781 /* Read default IRQs and fixup if necessary */
766 list_for_each_entry(dev, &bus->devices, bus_list) { 782 list_for_each_entry(dev, &bus->devices, bus_list) {
767 pci_read_irq_line(dev); 783 pci_read_irq_line(dev);
768 if (ppc_md.pci_irq_fixup) 784 if (ppc_md.pci_irq_fixup)
769 ppc_md.pci_irq_fixup(dev); 785 ppc_md.pci_irq_fixup(dev);
770 } 786 }
771 } 787 }
772 788
773 void __devinit pcibios_fixup_bus(struct pci_bus *bus) 789 void __devinit pcibios_fixup_bus(struct pci_bus *bus)
774 { 790 {
775 struct pci_dev *dev = bus->self; 791 struct pci_dev *dev = bus->self;
776 struct device_node *np; 792 struct device_node *np;
777 793
778 np = pci_bus_to_OF_node(bus); 794 np = pci_bus_to_OF_node(bus);
779 795
780 DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>"); 796 DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>");
781 797
782 if (dev && pci_probe_only && 798 if (dev && pci_probe_only &&
783 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { 799 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
784 /* This is a subordinate bridge */ 800 /* This is a subordinate bridge */
785 801
786 pci_read_bridge_bases(bus); 802 pci_read_bridge_bases(bus);
787 pcibios_fixup_device_resources(dev, bus); 803 pcibios_fixup_device_resources(dev, bus);
788 } 804 }
789 805
790 do_bus_setup(bus); 806 do_bus_setup(bus);
791 807
792 if (!pci_probe_only) 808 if (!pci_probe_only)
793 return; 809 return;
794 810
795 list_for_each_entry(dev, &bus->devices, bus_list) 811 list_for_each_entry(dev, &bus->devices, bus_list)
796 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) 812 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
797 pcibios_fixup_device_resources(dev, bus); 813 pcibios_fixup_device_resources(dev, bus);
798 } 814 }
799 EXPORT_SYMBOL(pcibios_fixup_bus); 815 EXPORT_SYMBOL(pcibios_fixup_bus);
800 816
801 unsigned long pci_address_to_pio(phys_addr_t address) 817 unsigned long pci_address_to_pio(phys_addr_t address)
802 { 818 {
803 struct pci_controller *hose, *tmp; 819 struct pci_controller *hose, *tmp;
804 820
805 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 821 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
806 if (address >= hose->io_base_phys && 822 if (address >= hose->io_base_phys &&
807 address < (hose->io_base_phys + hose->pci_io_size)) { 823 address < (hose->io_base_phys + hose->pci_io_size)) {
808 unsigned long base = 824 unsigned long base =
809 (unsigned long)hose->io_base_virt - _IO_BASE; 825 (unsigned long)hose->io_base_virt - _IO_BASE;
810 return base + (address - hose->io_base_phys); 826 return base + (address - hose->io_base_phys);
811 } 827 }
812 } 828 }
813 return (unsigned int)-1; 829 return (unsigned int)-1;
814 } 830 }
815 EXPORT_SYMBOL_GPL(pci_address_to_pio); 831 EXPORT_SYMBOL_GPL(pci_address_to_pio);
816 832
817 833
818 #define IOBASE_BRIDGE_NUMBER 0 834 #define IOBASE_BRIDGE_NUMBER 0
819 #define IOBASE_MEMORY 1 835 #define IOBASE_MEMORY 1
820 #define IOBASE_IO 2 836 #define IOBASE_IO 2
821 #define IOBASE_ISA_IO 3 837 #define IOBASE_ISA_IO 3
822 #define IOBASE_ISA_MEM 4 838 #define IOBASE_ISA_MEM 4
823 839
824 long sys_pciconfig_iobase(long which, unsigned long in_bus, 840 long sys_pciconfig_iobase(long which, unsigned long in_bus,
825 unsigned long in_devfn) 841 unsigned long in_devfn)
826 { 842 {
827 struct pci_controller* hose; 843 struct pci_controller* hose;
828 struct list_head *ln; 844 struct list_head *ln;
829 struct pci_bus *bus = NULL; 845 struct pci_bus *bus = NULL;
830 struct device_node *hose_node; 846 struct device_node *hose_node;
831 847
832 /* Argh ! Please forgive me for that hack, but that's the 848 /* Argh ! Please forgive me for that hack, but that's the
833 * simplest way to get existing XFree to not lockup on some 849 * simplest way to get existing XFree to not lockup on some
834 * G5 machines... So when something asks for bus 0 io base 850 * G5 machines... So when something asks for bus 0 io base
835 * (bus 0 is HT root), we return the AGP one instead. 851 * (bus 0 is HT root), we return the AGP one instead.
836 */ 852 */
837 if (machine_is_compatible("MacRISC4")) 853 if (machine_is_compatible("MacRISC4"))
838 if (in_bus == 0) 854 if (in_bus == 0)
839 in_bus = 0xf0; 855 in_bus = 0xf0;
840 856
841 /* That syscall isn't quite compatible with PCI domains, but it's 857 /* That syscall isn't quite compatible with PCI domains, but it's
842 * used on pre-domains setup. We return the first match 858 * used on pre-domains setup. We return the first match
843 */ 859 */
844 860
845 for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) { 861 for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
846 bus = pci_bus_b(ln); 862 bus = pci_bus_b(ln);
847 if (in_bus >= bus->number && in_bus <= bus->subordinate) 863 if (in_bus >= bus->number && in_bus <= bus->subordinate)
848 break; 864 break;
849 bus = NULL; 865 bus = NULL;
850 } 866 }
851 if (bus == NULL || bus->sysdata == NULL) 867 if (bus == NULL || bus->sysdata == NULL)
852 return -ENODEV; 868 return -ENODEV;
853 869
854 hose_node = (struct device_node *)bus->sysdata; 870 hose_node = (struct device_node *)bus->sysdata;
855 hose = PCI_DN(hose_node)->phb; 871 hose = PCI_DN(hose_node)->phb;
856 872
857 switch (which) { 873 switch (which) {
858 case IOBASE_BRIDGE_NUMBER: 874 case IOBASE_BRIDGE_NUMBER:
859 return (long)hose->first_busno; 875 return (long)hose->first_busno;
860 case IOBASE_MEMORY: 876 case IOBASE_MEMORY:
861 return (long)hose->pci_mem_offset; 877 return (long)hose->pci_mem_offset;
862 case IOBASE_IO: 878 case IOBASE_IO:
863 return (long)hose->io_base_phys; 879 return (long)hose->io_base_phys;
864 case IOBASE_ISA_IO: 880 case IOBASE_ISA_IO:
865 return (long)isa_io_base; 881 return (long)isa_io_base;
866 case IOBASE_ISA_MEM: 882 case IOBASE_ISA_MEM:
867 return -EINVAL; 883 return -EINVAL;
868 } 884 }
869 885
870 return -EOPNOTSUPP; 886 return -EOPNOTSUPP;
871 } 887 }
872 888
873 #ifdef CONFIG_NUMA 889 #ifdef CONFIG_NUMA
874 int pcibus_to_node(struct pci_bus *bus) 890 int pcibus_to_node(struct pci_bus *bus)
875 { 891 {
876 struct pci_controller *phb = pci_bus_to_host(bus); 892 struct pci_controller *phb = pci_bus_to_host(bus);
877 return phb->node; 893 return phb->node;
878 } 894 }
879 EXPORT_SYMBOL(pcibus_to_node); 895 EXPORT_SYMBOL(pcibus_to_node);
880 #endif 896 #endif
881 897
include/asm-powerpc/dma-mapping.h
1 /* 1 /*
2 * Copyright (C) 2004 IBM 2 * Copyright (C) 2004 IBM
3 * 3 *
4 * Implements the generic device dma API for powerpc. 4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses 5 * the pci and vio busses
6 */ 6 */
7 #ifndef _ASM_DMA_MAPPING_H 7 #ifndef _ASM_DMA_MAPPING_H
8 #define _ASM_DMA_MAPPING_H 8 #define _ASM_DMA_MAPPING_H
9 #ifdef __KERNEL__ 9 #ifdef __KERNEL__
10 10
11 #include <linux/types.h> 11 #include <linux/types.h>
12 #include <linux/cache.h> 12 #include <linux/cache.h>
13 /* need struct page definitions */ 13 /* need struct page definitions */
14 #include <linux/mm.h> 14 #include <linux/mm.h>
15 #include <linux/scatterlist.h> 15 #include <linux/scatterlist.h>
16 #include <asm/io.h> 16 #include <asm/io.h>
17 17
18 #define DMA_ERROR_CODE (~(dma_addr_t)0x0) 18 #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
19 19
20 #ifdef CONFIG_NOT_COHERENT_CACHE 20 #ifdef CONFIG_NOT_COHERENT_CACHE
21 /* 21 /*
22 * DMA-consistent mapping functions for PowerPCs that don't support 22 * DMA-consistent mapping functions for PowerPCs that don't support
23 * cache snooping. These allocate/free a region of uncached mapped 23 * cache snooping. These allocate/free a region of uncached mapped
24 * memory space for use with DMA devices. Alternatively, you could 24 * memory space for use with DMA devices. Alternatively, you could
25 * allocate the space "normally" and use the cache management functions 25 * allocate the space "normally" and use the cache management functions
26 * to ensure it is consistent. 26 * to ensure it is consistent.
27 */ 27 */
28 extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp); 28 extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
29 extern void __dma_free_coherent(size_t size, void *vaddr); 29 extern void __dma_free_coherent(size_t size, void *vaddr);
30 extern void __dma_sync(void *vaddr, size_t size, int direction); 30 extern void __dma_sync(void *vaddr, size_t size, int direction);
31 extern void __dma_sync_page(struct page *page, unsigned long offset, 31 extern void __dma_sync_page(struct page *page, unsigned long offset,
32 size_t size, int direction); 32 size_t size, int direction);
33 33
34 #else /* ! CONFIG_NOT_COHERENT_CACHE */ 34 #else /* ! CONFIG_NOT_COHERENT_CACHE */
35 /* 35 /*
36 * Cache coherent cores. 36 * Cache coherent cores.
37 */ 37 */
38 38
39 #define __dma_alloc_coherent(gfp, size, handle) NULL 39 #define __dma_alloc_coherent(gfp, size, handle) NULL
40 #define __dma_free_coherent(size, addr) ((void)0) 40 #define __dma_free_coherent(size, addr) ((void)0)
41 #define __dma_sync(addr, size, rw) ((void)0) 41 #define __dma_sync(addr, size, rw) ((void)0)
42 #define __dma_sync_page(pg, off, sz, rw) ((void)0) 42 #define __dma_sync_page(pg, off, sz, rw) ((void)0)
43 43
44 #endif /* ! CONFIG_NOT_COHERENT_CACHE */ 44 #endif /* ! CONFIG_NOT_COHERENT_CACHE */
45 45
46 #ifdef CONFIG_PPC64 46 #ifdef CONFIG_PPC64
47 /* 47 /*
48 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO 48 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
49 */ 49 */
50 struct dma_mapping_ops { 50 struct dma_mapping_ops {
51 void * (*alloc_coherent)(struct device *dev, size_t size, 51 void * (*alloc_coherent)(struct device *dev, size_t size,
52 dma_addr_t *dma_handle, gfp_t flag); 52 dma_addr_t *dma_handle, gfp_t flag);
53 void (*free_coherent)(struct device *dev, size_t size, 53 void (*free_coherent)(struct device *dev, size_t size,
54 void *vaddr, dma_addr_t dma_handle); 54 void *vaddr, dma_addr_t dma_handle);
55 dma_addr_t (*map_single)(struct device *dev, void *ptr, 55 dma_addr_t (*map_single)(struct device *dev, void *ptr,
56 size_t size, enum dma_data_direction direction); 56 size_t size, enum dma_data_direction direction);
57 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, 57 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
58 size_t size, enum dma_data_direction direction); 58 size_t size, enum dma_data_direction direction);
59 int (*map_sg)(struct device *dev, struct scatterlist *sg, 59 int (*map_sg)(struct device *dev, struct scatterlist *sg,
60 int nents, enum dma_data_direction direction); 60 int nents, enum dma_data_direction direction);
61 void (*unmap_sg)(struct device *dev, struct scatterlist *sg, 61 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
62 int nents, enum dma_data_direction direction); 62 int nents, enum dma_data_direction direction);
63 int (*dma_supported)(struct device *dev, u64 mask); 63 int (*dma_supported)(struct device *dev, u64 mask);
64 int (*set_dma_mask)(struct device *dev, u64 dma_mask); 64 int (*set_dma_mask)(struct device *dev, u64 dma_mask);
65 }; 65 };
66 66
67 static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) 67 static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
68 { 68 {
69 /* We don't handle the NULL dev case for ISA for now. We could 69 /* We don't handle the NULL dev case for ISA for now. We could
70 * do it via an out of line call but it is not needed for now. The 70 * do it via an out of line call but it is not needed for now. The
71 * only ISA DMA device we support is the floppy and we have a hack 71 * only ISA DMA device we support is the floppy and we have a hack
72 * in the floppy driver directly to get a device for us. 72 * in the floppy driver directly to get a device for us.
73 */ 73 */
74 if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL)) 74 if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL))
75 return NULL; 75 return NULL;
76 return dev->archdata.dma_ops; 76 return dev->archdata.dma_ops;
77 } 77 }
78 78
79 static inline int dma_supported(struct device *dev, u64 mask) 79 static inline int dma_supported(struct device *dev, u64 mask)
80 { 80 {
81 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 81 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
82 82
83 if (unlikely(dma_ops == NULL)) 83 if (unlikely(dma_ops == NULL))
84 return 0; 84 return 0;
85 if (dma_ops->dma_supported == NULL) 85 if (dma_ops->dma_supported == NULL)
86 return 1; 86 return 1;
87 return dma_ops->dma_supported(dev, mask); 87 return dma_ops->dma_supported(dev, mask);
88 } 88 }
89 89
90 /* We have our own implementation of pci_set_dma_mask() */
91 #define HAVE_ARCH_PCI_SET_DMA_MASK
92
90 static inline int dma_set_mask(struct device *dev, u64 dma_mask) 93 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
91 { 94 {
92 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 95 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
93 96
94 if (unlikely(dma_ops == NULL)) 97 if (unlikely(dma_ops == NULL))
95 return -EIO; 98 return -EIO;
96 if (dma_ops->set_dma_mask != NULL) 99 if (dma_ops->set_dma_mask != NULL)
97 return dma_ops->set_dma_mask(dev, dma_mask); 100 return dma_ops->set_dma_mask(dev, dma_mask);
98 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 101 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
99 return -EIO; 102 return -EIO;
100 *dev->dma_mask = dma_mask; 103 *dev->dma_mask = dma_mask;
101 return 0; 104 return 0;
102 } 105 }
103 106
104 static inline void *dma_alloc_coherent(struct device *dev, size_t size, 107 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
105 dma_addr_t *dma_handle, gfp_t flag) 108 dma_addr_t *dma_handle, gfp_t flag)
106 { 109 {
107 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 110 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
108 111
109 BUG_ON(!dma_ops); 112 BUG_ON(!dma_ops);
110 return dma_ops->alloc_coherent(dev, size, dma_handle, flag); 113 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
111 } 114 }
112 115
113 static inline void dma_free_coherent(struct device *dev, size_t size, 116 static inline void dma_free_coherent(struct device *dev, size_t size,
114 void *cpu_addr, dma_addr_t dma_handle) 117 void *cpu_addr, dma_addr_t dma_handle)
115 { 118 {
116 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 119 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
117 120
118 BUG_ON(!dma_ops); 121 BUG_ON(!dma_ops);
119 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 122 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
120 } 123 }
121 124
122 static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 125 static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
123 size_t size, 126 size_t size,
124 enum dma_data_direction direction) 127 enum dma_data_direction direction)
125 { 128 {
126 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 129 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
127 130
128 BUG_ON(!dma_ops); 131 BUG_ON(!dma_ops);
129 return dma_ops->map_single(dev, cpu_addr, size, direction); 132 return dma_ops->map_single(dev, cpu_addr, size, direction);
130 } 133 }
131 134
132 static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, 135 static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
133 size_t size, 136 size_t size,
134 enum dma_data_direction direction) 137 enum dma_data_direction direction)
135 { 138 {
136 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 139 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
137 140
138 BUG_ON(!dma_ops); 141 BUG_ON(!dma_ops);
139 dma_ops->unmap_single(dev, dma_addr, size, direction); 142 dma_ops->unmap_single(dev, dma_addr, size, direction);
140 } 143 }
141 144
142 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 145 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
143 unsigned long offset, size_t size, 146 unsigned long offset, size_t size,
144 enum dma_data_direction direction) 147 enum dma_data_direction direction)
145 { 148 {
146 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 149 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
147 150
148 BUG_ON(!dma_ops); 151 BUG_ON(!dma_ops);
149 return dma_ops->map_single(dev, page_address(page) + offset, size, 152 return dma_ops->map_single(dev, page_address(page) + offset, size,
150 direction); 153 direction);
151 } 154 }
152 155
153 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, 156 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
154 size_t size, 157 size_t size,
155 enum dma_data_direction direction) 158 enum dma_data_direction direction)
156 { 159 {
157 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 160 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
158 161
159 BUG_ON(!dma_ops); 162 BUG_ON(!dma_ops);
160 dma_ops->unmap_single(dev, dma_address, size, direction); 163 dma_ops->unmap_single(dev, dma_address, size, direction);
161 } 164 }
162 165
163 static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, 166 static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
164 int nents, enum dma_data_direction direction) 167 int nents, enum dma_data_direction direction)
165 { 168 {
166 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 169 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
167 170
168 BUG_ON(!dma_ops); 171 BUG_ON(!dma_ops);
169 return dma_ops->map_sg(dev, sg, nents, direction); 172 return dma_ops->map_sg(dev, sg, nents, direction);
170 } 173 }
171 174
172 static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, 175 static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
173 int nhwentries, 176 int nhwentries,
174 enum dma_data_direction direction) 177 enum dma_data_direction direction)
175 { 178 {
176 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 179 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
177 180
178 BUG_ON(!dma_ops); 181 BUG_ON(!dma_ops);
179 dma_ops->unmap_sg(dev, sg, nhwentries, direction); 182 dma_ops->unmap_sg(dev, sg, nhwentries, direction);
180 } 183 }
181 184
182 185
183 /* 186 /*
184 * Available generic sets of operations 187 * Available generic sets of operations
185 */ 188 */
186 extern struct dma_mapping_ops dma_iommu_ops; 189 extern struct dma_mapping_ops dma_iommu_ops;
187 extern struct dma_mapping_ops dma_direct_ops; 190 extern struct dma_mapping_ops dma_direct_ops;
188 191
189 extern unsigned long dma_direct_offset; 192 extern unsigned long dma_direct_offset;
190 193
191 #else /* CONFIG_PPC64 */ 194 #else /* CONFIG_PPC64 */
192 195
193 #define dma_supported(dev, mask) (1) 196 #define dma_supported(dev, mask) (1)
194 197
195 static inline int dma_set_mask(struct device *dev, u64 dma_mask) 198 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
196 { 199 {
197 if (!dev->dma_mask || !dma_supported(dev, mask)) 200 if (!dev->dma_mask || !dma_supported(dev, mask))
198 return -EIO; 201 return -EIO;
199 202
200 *dev->dma_mask = dma_mask; 203 *dev->dma_mask = dma_mask;
201 204
202 return 0; 205 return 0;
203 } 206 }
204 207
205 static inline void *dma_alloc_coherent(struct device *dev, size_t size, 208 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
206 dma_addr_t * dma_handle, 209 dma_addr_t * dma_handle,
207 gfp_t gfp) 210 gfp_t gfp)
208 { 211 {
209 #ifdef CONFIG_NOT_COHERENT_CACHE 212 #ifdef CONFIG_NOT_COHERENT_CACHE
210 return __dma_alloc_coherent(size, dma_handle, gfp); 213 return __dma_alloc_coherent(size, dma_handle, gfp);
211 #else 214 #else
212 void *ret; 215 void *ret;
213 /* ignore region specifiers */ 216 /* ignore region specifiers */
214 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); 217 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
215 218
216 if (dev == NULL || dev->coherent_dma_mask < 0xffffffff) 219 if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
217 gfp |= GFP_DMA; 220 gfp |= GFP_DMA;
218 221
219 ret = (void *)__get_free_pages(gfp, get_order(size)); 222 ret = (void *)__get_free_pages(gfp, get_order(size));
220 223
221 if (ret != NULL) { 224 if (ret != NULL) {
222 memset(ret, 0, size); 225 memset(ret, 0, size);
223 *dma_handle = virt_to_bus(ret); 226 *dma_handle = virt_to_bus(ret);
224 } 227 }
225 228
226 return ret; 229 return ret;
227 #endif 230 #endif
228 } 231 }
229 232
230 static inline void 233 static inline void
231 dma_free_coherent(struct device *dev, size_t size, void *vaddr, 234 dma_free_coherent(struct device *dev, size_t size, void *vaddr,
232 dma_addr_t dma_handle) 235 dma_addr_t dma_handle)
233 { 236 {
234 #ifdef CONFIG_NOT_COHERENT_CACHE 237 #ifdef CONFIG_NOT_COHERENT_CACHE
235 __dma_free_coherent(size, vaddr); 238 __dma_free_coherent(size, vaddr);
236 #else 239 #else
237 free_pages((unsigned long)vaddr, get_order(size)); 240 free_pages((unsigned long)vaddr, get_order(size));
238 #endif 241 #endif
239 } 242 }
240 243
241 static inline dma_addr_t 244 static inline dma_addr_t
242 dma_map_single(struct device *dev, void *ptr, size_t size, 245 dma_map_single(struct device *dev, void *ptr, size_t size,
243 enum dma_data_direction direction) 246 enum dma_data_direction direction)
244 { 247 {
245 BUG_ON(direction == DMA_NONE); 248 BUG_ON(direction == DMA_NONE);
246 249
247 __dma_sync(ptr, size, direction); 250 __dma_sync(ptr, size, direction);
248 251
249 return virt_to_bus(ptr); 252 return virt_to_bus(ptr);
250 } 253 }
251 254
252 static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, 255 static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
253 size_t size, 256 size_t size,
254 enum dma_data_direction direction) 257 enum dma_data_direction direction)
255 { 258 {
256 /* We do nothing. */ 259 /* We do nothing. */
257 } 260 }
258 261
259 static inline dma_addr_t 262 static inline dma_addr_t
260 dma_map_page(struct device *dev, struct page *page, 263 dma_map_page(struct device *dev, struct page *page,
261 unsigned long offset, size_t size, 264 unsigned long offset, size_t size,
262 enum dma_data_direction direction) 265 enum dma_data_direction direction)
263 { 266 {
264 BUG_ON(direction == DMA_NONE); 267 BUG_ON(direction == DMA_NONE);
265 268
266 __dma_sync_page(page, offset, size, direction); 269 __dma_sync_page(page, offset, size, direction);
267 270
268 return page_to_bus(page) + offset; 271 return page_to_bus(page) + offset;
269 } 272 }
270 273
271 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, 274 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
272 size_t size, 275 size_t size,
273 enum dma_data_direction direction) 276 enum dma_data_direction direction)
274 { 277 {
275 /* We do nothing. */ 278 /* We do nothing. */
276 } 279 }
277 280
278 static inline int 281 static inline int
279 dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents, 282 dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
280 enum dma_data_direction direction) 283 enum dma_data_direction direction)
281 { 284 {
282 struct scatterlist *sg; 285 struct scatterlist *sg;
283 int i; 286 int i;
284 287
285 BUG_ON(direction == DMA_NONE); 288 BUG_ON(direction == DMA_NONE);
286 289
287 for_each_sg(sgl, sg, nents, i) { 290 for_each_sg(sgl, sg, nents, i) {
288 BUG_ON(!sg_page(sg)); 291 BUG_ON(!sg_page(sg));
289 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); 292 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
290 sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset; 293 sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
291 } 294 }
292 295
293 return nents; 296 return nents;
294 } 297 }
295 298
296 static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, 299 static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
297 int nhwentries, 300 int nhwentries,
298 enum dma_data_direction direction) 301 enum dma_data_direction direction)
299 { 302 {
300 /* We don't do anything here. */ 303 /* We don't do anything here. */
301 } 304 }
302 305
303 #endif /* CONFIG_PPC64 */ 306 #endif /* CONFIG_PPC64 */
304 307
305 static inline void dma_sync_single_for_cpu(struct device *dev, 308 static inline void dma_sync_single_for_cpu(struct device *dev,
306 dma_addr_t dma_handle, size_t size, 309 dma_addr_t dma_handle, size_t size,
307 enum dma_data_direction direction) 310 enum dma_data_direction direction)
308 { 311 {
309 BUG_ON(direction == DMA_NONE); 312 BUG_ON(direction == DMA_NONE);
310 __dma_sync(bus_to_virt(dma_handle), size, direction); 313 __dma_sync(bus_to_virt(dma_handle), size, direction);
311 } 314 }
312 315
313 static inline void dma_sync_single_for_device(struct device *dev, 316 static inline void dma_sync_single_for_device(struct device *dev,
314 dma_addr_t dma_handle, size_t size, 317 dma_addr_t dma_handle, size_t size,
315 enum dma_data_direction direction) 318 enum dma_data_direction direction)
316 { 319 {
317 BUG_ON(direction == DMA_NONE); 320 BUG_ON(direction == DMA_NONE);
318 __dma_sync(bus_to_virt(dma_handle), size, direction); 321 __dma_sync(bus_to_virt(dma_handle), size, direction);
319 } 322 }
320 323
321 static inline void dma_sync_sg_for_cpu(struct device *dev, 324 static inline void dma_sync_sg_for_cpu(struct device *dev,
322 struct scatterlist *sgl, int nents, 325 struct scatterlist *sgl, int nents,
323 enum dma_data_direction direction) 326 enum dma_data_direction direction)
324 { 327 {
325 struct scatterlist *sg; 328 struct scatterlist *sg;
326 int i; 329 int i;
327 330
328 BUG_ON(direction == DMA_NONE); 331 BUG_ON(direction == DMA_NONE);
329 332
330 for_each_sg(sgl, sg, nents, i) 333 for_each_sg(sgl, sg, nents, i)
331 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); 334 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
332 } 335 }
333 336
334 static inline void dma_sync_sg_for_device(struct device *dev, 337 static inline void dma_sync_sg_for_device(struct device *dev,
335 struct scatterlist *sgl, int nents, 338 struct scatterlist *sgl, int nents,
336 enum dma_data_direction direction) 339 enum dma_data_direction direction)
337 { 340 {
338 struct scatterlist *sg; 341 struct scatterlist *sg;
339 int i; 342 int i;
340 343
341 BUG_ON(direction == DMA_NONE); 344 BUG_ON(direction == DMA_NONE);
342 345
343 for_each_sg(sgl, sg, nents, i) 346 for_each_sg(sgl, sg, nents, i)
344 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); 347 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
345 } 348 }
346 349
347 static inline int dma_mapping_error(dma_addr_t dma_addr) 350 static inline int dma_mapping_error(dma_addr_t dma_addr)
348 { 351 {
349 #ifdef CONFIG_PPC64 352 #ifdef CONFIG_PPC64
350 return (dma_addr == DMA_ERROR_CODE); 353 return (dma_addr == DMA_ERROR_CODE);
351 #else 354 #else
352 return 0; 355 return 0;
353 #endif 356 #endif
354 } 357 }
355 358
356 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 359 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
357 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 360 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
358 #ifdef CONFIG_NOT_COHERENT_CACHE 361 #ifdef CONFIG_NOT_COHERENT_CACHE
359 #define dma_is_consistent(d, h) (0) 362 #define dma_is_consistent(d, h) (0)
360 #else 363 #else
361 #define dma_is_consistent(d, h) (1) 364 #define dma_is_consistent(d, h) (1)
362 #endif 365 #endif
363 366
364 static inline int dma_get_cache_alignment(void) 367 static inline int dma_get_cache_alignment(void)
365 { 368 {
366 #ifdef CONFIG_PPC64 369 #ifdef CONFIG_PPC64
367 /* no easy way to get cache size on all processors, so return 370 /* no easy way to get cache size on all processors, so return
368 * the maximum possible, to be safe */ 371 * the maximum possible, to be safe */
369 return (1 << INTERNODE_CACHE_SHIFT); 372 return (1 << INTERNODE_CACHE_SHIFT);
370 #else 373 #else
371 /* 374 /*
372 * Each processor family will define its own L1_CACHE_SHIFT, 375 * Each processor family will define its own L1_CACHE_SHIFT,
373 * L1_CACHE_BYTES wraps to this, so this is always safe. 376 * L1_CACHE_BYTES wraps to this, so this is always safe.
374 */ 377 */
375 return L1_CACHE_BYTES; 378 return L1_CACHE_BYTES;
376 #endif 379 #endif
377 } 380 }
378 381
379 static inline void dma_sync_single_range_for_cpu(struct device *dev, 382 static inline void dma_sync_single_range_for_cpu(struct device *dev,
380 dma_addr_t dma_handle, unsigned long offset, size_t size, 383 dma_addr_t dma_handle, unsigned long offset, size_t size,
381 enum dma_data_direction direction) 384 enum dma_data_direction direction)
382 { 385 {
383 /* just sync everything for now */ 386 /* just sync everything for now */
384 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction); 387 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
385 } 388 }
386 389
387 static inline void dma_sync_single_range_for_device(struct device *dev, 390 static inline void dma_sync_single_range_for_device(struct device *dev,
388 dma_addr_t dma_handle, unsigned long offset, size_t size, 391 dma_addr_t dma_handle, unsigned long offset, size_t size,
389 enum dma_data_direction direction) 392 enum dma_data_direction direction)
390 { 393 {
391 /* just sync everything for now */ 394 /* just sync everything for now */
392 dma_sync_single_for_device(dev, dma_handle, offset + size, direction); 395 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
393 } 396 }
394 397
395 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 398 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
396 enum dma_data_direction direction) 399 enum dma_data_direction direction)
397 { 400 {
398 BUG_ON(direction == DMA_NONE); 401 BUG_ON(direction == DMA_NONE);
399 __dma_sync(vaddr, size, (int)direction); 402 __dma_sync(vaddr, size, (int)direction);
400 } 403 }
401 404
402 #endif /* __KERNEL__ */ 405 #endif /* __KERNEL__ */
403 #endif /* _ASM_DMA_MAPPING_H */ 406 #endif /* _ASM_DMA_MAPPING_H */
404 407