Commit 23ac4ae827e6264e21b898f2cd3f601450aa02a6
Committed by
H. Peter Anvin
1 parent
900f9ac9f1
Exists in
master
and in
20 other branches
x86, k8: Rename k8.[ch] to amd_nb.[ch] and CONFIG_K8_NB to CONFIG_AMD_NB
The file names are somehow misleading as the code is not specific to AMD K8 CPUs anymore. The files accomodate code for other AMD CPU northbridges as well. Same is true for the config option which is valid for AMD CPU northbridges in general and not specific to K8. Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com> LKML-Reference: <20100917160343.GD4958@loge.amd.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Showing 16 changed files with 201 additions and 201 deletions Side-by-side Diff
- arch/x86/Kconfig
- arch/x86/include/asm/amd_nb.h
- arch/x86/include/asm/k8.h
- arch/x86/kernel/Makefile
- arch/x86/kernel/amd_nb.c
- arch/x86/kernel/aperture_64.c
- arch/x86/kernel/cpu/intel_cacheinfo.c
- arch/x86/kernel/k8.c
- arch/x86/kernel/pci-gart_64.c
- arch/x86/kernel/setup.c
- arch/x86/mm/k8topology_64.c
- arch/x86/mm/numa_64.c
- drivers/char/agp/Kconfig
- drivers/char/agp/amd64-agp.c
- drivers/edac/Kconfig
- drivers/edac/amd64_edac.c
arch/x86/Kconfig
... | ... | @@ -670,7 +670,7 @@ |
670 | 670 | bool "GART IOMMU support" if EMBEDDED |
671 | 671 | default y |
672 | 672 | select SWIOTLB |
673 | - depends on X86_64 && PCI && K8_NB | |
673 | + depends on X86_64 && PCI && AMD_NB | |
674 | 674 | ---help--- |
675 | 675 | Support for full DMA access of devices with 32bit memory access only |
676 | 676 | on systems with more than 3GB. This is usually needed for USB, |
... | ... | @@ -2076,7 +2076,7 @@ |
2076 | 2076 | |
2077 | 2077 | endif # X86_32 |
2078 | 2078 | |
2079 | -config K8_NB | |
2079 | +config AMD_NB | |
2080 | 2080 | def_bool y |
2081 | 2081 | depends on CPU_SUP_AMD && PCI |
2082 | 2082 |
arch/x86/include/asm/amd_nb.h
1 | +#ifndef _ASM_X86_AMD_NB_H | |
2 | +#define _ASM_X86_AMD_NB_H | |
3 | + | |
4 | +#include <linux/pci.h> | |
5 | + | |
6 | +extern struct pci_device_id k8_nb_ids[]; | |
7 | +struct bootnode; | |
8 | + | |
9 | +extern int early_is_k8_nb(u32 value); | |
10 | +extern int cache_k8_northbridges(void); | |
11 | +extern void k8_flush_garts(void); | |
12 | +extern int k8_get_nodes(struct bootnode *nodes); | |
13 | +extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn); | |
14 | +extern int k8_scan_nodes(void); | |
15 | + | |
16 | +struct k8_northbridge_info { | |
17 | + u16 num; | |
18 | + u8 gart_supported; | |
19 | + struct pci_dev **nb_misc; | |
20 | +}; | |
21 | +extern struct k8_northbridge_info k8_northbridges; | |
22 | + | |
23 | +#ifdef CONFIG_AMD_NB | |
24 | + | |
25 | +static inline struct pci_dev *node_to_k8_nb_misc(int node) | |
26 | +{ | |
27 | + return (node < k8_northbridges.num) ? k8_northbridges.nb_misc[node] : NULL; | |
28 | +} | |
29 | + | |
30 | +#else | |
31 | + | |
32 | +static inline struct pci_dev *node_to_k8_nb_misc(int node) | |
33 | +{ | |
34 | + return NULL; | |
35 | +} | |
36 | +#endif | |
37 | + | |
38 | + | |
39 | +#endif /* _ASM_X86_AMD_NB_H */ |
arch/x86/include/asm/k8.h
1 | -#ifndef _ASM_X86_K8_H | |
2 | -#define _ASM_X86_K8_H | |
3 | - | |
4 | -#include <linux/pci.h> | |
5 | - | |
6 | -extern struct pci_device_id k8_nb_ids[]; | |
7 | -struct bootnode; | |
8 | - | |
9 | -extern int early_is_k8_nb(u32 value); | |
10 | -extern int cache_k8_northbridges(void); | |
11 | -extern void k8_flush_garts(void); | |
12 | -extern int k8_get_nodes(struct bootnode *nodes); | |
13 | -extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn); | |
14 | -extern int k8_scan_nodes(void); | |
15 | - | |
16 | -struct k8_northbridge_info { | |
17 | - u16 num; | |
18 | - u8 gart_supported; | |
19 | - struct pci_dev **nb_misc; | |
20 | -}; | |
21 | -extern struct k8_northbridge_info k8_northbridges; | |
22 | - | |
23 | -#ifdef CONFIG_K8_NB | |
24 | - | |
25 | -static inline struct pci_dev *node_to_k8_nb_misc(int node) | |
26 | -{ | |
27 | - return (node < k8_northbridges.num) ? k8_northbridges.nb_misc[node] : NULL; | |
28 | -} | |
29 | - | |
30 | -#else | |
31 | - | |
32 | -static inline struct pci_dev *node_to_k8_nb_misc(int node) | |
33 | -{ | |
34 | - return NULL; | |
35 | -} | |
36 | -#endif | |
37 | - | |
38 | - | |
39 | -#endif /* _ASM_X86_K8_H */ |
arch/x86/kernel/Makefile
arch/x86/kernel/amd_nb.c
1 | +/* | |
2 | + * Shared support code for AMD K8 northbridges and derivates. | |
3 | + * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2. | |
4 | + */ | |
5 | +#include <linux/types.h> | |
6 | +#include <linux/slab.h> | |
7 | +#include <linux/init.h> | |
8 | +#include <linux/errno.h> | |
9 | +#include <linux/module.h> | |
10 | +#include <linux/spinlock.h> | |
11 | +#include <asm/amd_nb.h> | |
12 | + | |
13 | +static u32 *flush_words; | |
14 | + | |
15 | +struct pci_device_id k8_nb_ids[] = { | |
16 | + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, | |
17 | + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, | |
18 | + {} | |
19 | +}; | |
20 | +EXPORT_SYMBOL(k8_nb_ids); | |
21 | + | |
22 | +struct k8_northbridge_info k8_northbridges; | |
23 | +EXPORT_SYMBOL(k8_northbridges); | |
24 | + | |
25 | +static struct pci_dev *next_k8_northbridge(struct pci_dev *dev) | |
26 | +{ | |
27 | + do { | |
28 | + dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); | |
29 | + if (!dev) | |
30 | + break; | |
31 | + } while (!pci_match_id(&k8_nb_ids[0], dev)); | |
32 | + return dev; | |
33 | +} | |
34 | + | |
35 | +int cache_k8_northbridges(void) | |
36 | +{ | |
37 | + int i; | |
38 | + struct pci_dev *dev; | |
39 | + | |
40 | + if (k8_northbridges.num) | |
41 | + return 0; | |
42 | + | |
43 | + dev = NULL; | |
44 | + while ((dev = next_k8_northbridge(dev)) != NULL) | |
45 | + k8_northbridges.num++; | |
46 | + | |
47 | + /* some CPU families (e.g. family 0x11) do not support GART */ | |
48 | + if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10) | |
49 | + k8_northbridges.gart_supported = 1; | |
50 | + | |
51 | + k8_northbridges.nb_misc = kmalloc((k8_northbridges.num + 1) * | |
52 | + sizeof(void *), GFP_KERNEL); | |
53 | + if (!k8_northbridges.nb_misc) | |
54 | + return -ENOMEM; | |
55 | + | |
56 | + if (!k8_northbridges.num) { | |
57 | + k8_northbridges.nb_misc[0] = NULL; | |
58 | + return 0; | |
59 | + } | |
60 | + | |
61 | + if (k8_northbridges.gart_supported) { | |
62 | + flush_words = kmalloc(k8_northbridges.num * sizeof(u32), | |
63 | + GFP_KERNEL); | |
64 | + if (!flush_words) { | |
65 | + kfree(k8_northbridges.nb_misc); | |
66 | + return -ENOMEM; | |
67 | + } | |
68 | + } | |
69 | + | |
70 | + dev = NULL; | |
71 | + i = 0; | |
72 | + while ((dev = next_k8_northbridge(dev)) != NULL) { | |
73 | + k8_northbridges.nb_misc[i] = dev; | |
74 | + if (k8_northbridges.gart_supported) | |
75 | + pci_read_config_dword(dev, 0x9c, &flush_words[i++]); | |
76 | + } | |
77 | + k8_northbridges.nb_misc[i] = NULL; | |
78 | + return 0; | |
79 | +} | |
80 | +EXPORT_SYMBOL_GPL(cache_k8_northbridges); | |
81 | + | |
82 | +/* Ignores subdevice/subvendor but as far as I can figure out | |
83 | + they're useless anyways */ | |
84 | +int __init early_is_k8_nb(u32 device) | |
85 | +{ | |
86 | + struct pci_device_id *id; | |
87 | + u32 vendor = device & 0xffff; | |
88 | + device >>= 16; | |
89 | + for (id = k8_nb_ids; id->vendor; id++) | |
90 | + if (vendor == id->vendor && device == id->device) | |
91 | + return 1; | |
92 | + return 0; | |
93 | +} | |
94 | + | |
95 | +void k8_flush_garts(void) | |
96 | +{ | |
97 | + int flushed, i; | |
98 | + unsigned long flags; | |
99 | + static DEFINE_SPINLOCK(gart_lock); | |
100 | + | |
101 | + if (!k8_northbridges.gart_supported) | |
102 | + return; | |
103 | + | |
104 | + /* Avoid races between AGP and IOMMU. In theory it's not needed | |
105 | + but I'm not sure if the hardware won't lose flush requests | |
106 | + when another is pending. This whole thing is so expensive anyways | |
107 | + that it doesn't matter to serialize more. -AK */ | |
108 | + spin_lock_irqsave(&gart_lock, flags); | |
109 | + flushed = 0; | |
110 | + for (i = 0; i < k8_northbridges.num; i++) { | |
111 | + pci_write_config_dword(k8_northbridges.nb_misc[i], 0x9c, | |
112 | + flush_words[i]|1); | |
113 | + flushed++; | |
114 | + } | |
115 | + for (i = 0; i < k8_northbridges.num; i++) { | |
116 | + u32 w; | |
117 | + /* Make sure the hardware actually executed the flush*/ | |
118 | + for (;;) { | |
119 | + pci_read_config_dword(k8_northbridges.nb_misc[i], | |
120 | + 0x9c, &w); | |
121 | + if (!(w & 1)) | |
122 | + break; | |
123 | + cpu_relax(); | |
124 | + } | |
125 | + } | |
126 | + spin_unlock_irqrestore(&gart_lock, flags); | |
127 | + if (!flushed) | |
128 | + printk("nothing to flush?\n"); | |
129 | +} | |
130 | +EXPORT_SYMBOL_GPL(k8_flush_garts); | |
131 | + | |
132 | +static __init int init_k8_nbs(void) | |
133 | +{ | |
134 | + int err = 0; | |
135 | + | |
136 | + err = cache_k8_northbridges(); | |
137 | + | |
138 | + if (err < 0) | |
139 | + printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n"); | |
140 | + | |
141 | + return err; | |
142 | +} | |
143 | + | |
144 | +/* This has to go after the PCI subsystem */ | |
145 | +fs_initcall(init_k8_nbs); |
arch/x86/kernel/aperture_64.c
arch/x86/kernel/cpu/intel_cacheinfo.c
... | ... | @@ -17,7 +17,7 @@ |
17 | 17 | |
18 | 18 | #include <asm/processor.h> |
19 | 19 | #include <linux/smp.h> |
20 | -#include <asm/k8.h> | |
20 | +#include <asm/amd_nb.h> | |
21 | 21 | #include <asm/smp.h> |
22 | 22 | |
23 | 23 | #define LVL_1_INST 1 |
... | ... | @@ -306,7 +306,7 @@ |
306 | 306 | ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count); |
307 | 307 | }; |
308 | 308 | |
309 | -#ifdef CONFIG_K8_NB | |
309 | +#ifdef CONFIG_AMD_NB | |
310 | 310 | |
311 | 311 | /* |
312 | 312 | * L3 cache descriptors |
313 | 313 | |
... | ... | @@ -556,12 +556,12 @@ |
556 | 556 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, |
557 | 557 | show_cache_disable_1, store_cache_disable_1); |
558 | 558 | |
559 | -#else /* CONFIG_K8_NB */ | |
559 | +#else /* CONFIG_AMD_NB */ | |
560 | 560 | static void __cpuinit |
561 | 561 | amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, int index) |
562 | 562 | { |
563 | 563 | }; |
564 | -#endif /* CONFIG_K8_NB */ | |
564 | +#endif /* CONFIG_AMD_NB */ | |
565 | 565 | |
566 | 566 | static int |
567 | 567 | __cpuinit cpuid4_cache_lookup_regs(int index, |
... | ... | @@ -1000,7 +1000,7 @@ |
1000 | 1000 | |
1001 | 1001 | static struct attribute *default_l3_attrs[] = { |
1002 | 1002 | DEFAULT_SYSFS_CACHE_ATTRS, |
1003 | -#ifdef CONFIG_K8_NB | |
1003 | +#ifdef CONFIG_AMD_NB | |
1004 | 1004 | &cache_disable_0.attr, |
1005 | 1005 | &cache_disable_1.attr, |
1006 | 1006 | #endif |
arch/x86/kernel/k8.c
1 | -/* | |
2 | - * Shared support code for AMD K8 northbridges and derivates. | |
3 | - * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2. | |
4 | - */ | |
5 | -#include <linux/types.h> | |
6 | -#include <linux/slab.h> | |
7 | -#include <linux/init.h> | |
8 | -#include <linux/errno.h> | |
9 | -#include <linux/module.h> | |
10 | -#include <linux/spinlock.h> | |
11 | -#include <asm/k8.h> | |
12 | - | |
13 | -static u32 *flush_words; | |
14 | - | |
15 | -struct pci_device_id k8_nb_ids[] = { | |
16 | - { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, | |
17 | - { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, | |
18 | - {} | |
19 | -}; | |
20 | -EXPORT_SYMBOL(k8_nb_ids); | |
21 | - | |
22 | -struct k8_northbridge_info k8_northbridges; | |
23 | -EXPORT_SYMBOL(k8_northbridges); | |
24 | - | |
25 | -static struct pci_dev *next_k8_northbridge(struct pci_dev *dev) | |
26 | -{ | |
27 | - do { | |
28 | - dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); | |
29 | - if (!dev) | |
30 | - break; | |
31 | - } while (!pci_match_id(&k8_nb_ids[0], dev)); | |
32 | - return dev; | |
33 | -} | |
34 | - | |
35 | -int cache_k8_northbridges(void) | |
36 | -{ | |
37 | - int i; | |
38 | - struct pci_dev *dev; | |
39 | - | |
40 | - if (k8_northbridges.num) | |
41 | - return 0; | |
42 | - | |
43 | - dev = NULL; | |
44 | - while ((dev = next_k8_northbridge(dev)) != NULL) | |
45 | - k8_northbridges.num++; | |
46 | - | |
47 | - /* some CPU families (e.g. family 0x11) do not support GART */ | |
48 | - if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10) | |
49 | - k8_northbridges.gart_supported = 1; | |
50 | - | |
51 | - k8_northbridges.nb_misc = kmalloc((k8_northbridges.num + 1) * | |
52 | - sizeof(void *), GFP_KERNEL); | |
53 | - if (!k8_northbridges.nb_misc) | |
54 | - return -ENOMEM; | |
55 | - | |
56 | - if (!k8_northbridges.num) { | |
57 | - k8_northbridges.nb_misc[0] = NULL; | |
58 | - return 0; | |
59 | - } | |
60 | - | |
61 | - if (k8_northbridges.gart_supported) { | |
62 | - flush_words = kmalloc(k8_northbridges.num * sizeof(u32), | |
63 | - GFP_KERNEL); | |
64 | - if (!flush_words) { | |
65 | - kfree(k8_northbridges.nb_misc); | |
66 | - return -ENOMEM; | |
67 | - } | |
68 | - } | |
69 | - | |
70 | - dev = NULL; | |
71 | - i = 0; | |
72 | - while ((dev = next_k8_northbridge(dev)) != NULL) { | |
73 | - k8_northbridges.nb_misc[i] = dev; | |
74 | - if (k8_northbridges.gart_supported) | |
75 | - pci_read_config_dword(dev, 0x9c, &flush_words[i++]); | |
76 | - } | |
77 | - k8_northbridges.nb_misc[i] = NULL; | |
78 | - return 0; | |
79 | -} | |
80 | -EXPORT_SYMBOL_GPL(cache_k8_northbridges); | |
81 | - | |
82 | -/* Ignores subdevice/subvendor but as far as I can figure out | |
83 | - they're useless anyways */ | |
84 | -int __init early_is_k8_nb(u32 device) | |
85 | -{ | |
86 | - struct pci_device_id *id; | |
87 | - u32 vendor = device & 0xffff; | |
88 | - device >>= 16; | |
89 | - for (id = k8_nb_ids; id->vendor; id++) | |
90 | - if (vendor == id->vendor && device == id->device) | |
91 | - return 1; | |
92 | - return 0; | |
93 | -} | |
94 | - | |
95 | -void k8_flush_garts(void) | |
96 | -{ | |
97 | - int flushed, i; | |
98 | - unsigned long flags; | |
99 | - static DEFINE_SPINLOCK(gart_lock); | |
100 | - | |
101 | - if (!k8_northbridges.gart_supported) | |
102 | - return; | |
103 | - | |
104 | - /* Avoid races between AGP and IOMMU. In theory it's not needed | |
105 | - but I'm not sure if the hardware won't lose flush requests | |
106 | - when another is pending. This whole thing is so expensive anyways | |
107 | - that it doesn't matter to serialize more. -AK */ | |
108 | - spin_lock_irqsave(&gart_lock, flags); | |
109 | - flushed = 0; | |
110 | - for (i = 0; i < k8_northbridges.num; i++) { | |
111 | - pci_write_config_dword(k8_northbridges.nb_misc[i], 0x9c, | |
112 | - flush_words[i]|1); | |
113 | - flushed++; | |
114 | - } | |
115 | - for (i = 0; i < k8_northbridges.num; i++) { | |
116 | - u32 w; | |
117 | - /* Make sure the hardware actually executed the flush*/ | |
118 | - for (;;) { | |
119 | - pci_read_config_dword(k8_northbridges.nb_misc[i], | |
120 | - 0x9c, &w); | |
121 | - if (!(w & 1)) | |
122 | - break; | |
123 | - cpu_relax(); | |
124 | - } | |
125 | - } | |
126 | - spin_unlock_irqrestore(&gart_lock, flags); | |
127 | - if (!flushed) | |
128 | - printk("nothing to flush?\n"); | |
129 | -} | |
130 | -EXPORT_SYMBOL_GPL(k8_flush_garts); | |
131 | - | |
132 | -static __init int init_k8_nbs(void) | |
133 | -{ | |
134 | - int err = 0; | |
135 | - | |
136 | - err = cache_k8_northbridges(); | |
137 | - | |
138 | - if (err < 0) | |
139 | - printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n"); | |
140 | - | |
141 | - return err; | |
142 | -} | |
143 | - | |
144 | -/* This has to go after the PCI subsystem */ | |
145 | -fs_initcall(init_k8_nbs); |
arch/x86/kernel/pci-gart_64.c
arch/x86/kernel/setup.c
arch/x86/mm/k8topology_64.c
arch/x86/mm/numa_64.c
drivers/char/agp/Kconfig
... | ... | @@ -57,7 +57,7 @@ |
57 | 57 | |
58 | 58 | config AGP_AMD64 |
59 | 59 | tristate "AMD Opteron/Athlon64 on-CPU GART support" |
60 | - depends on AGP && X86 && K8_NB | |
60 | + depends on AGP && X86 && AMD_NB | |
61 | 61 | help |
62 | 62 | This option gives you AGP support for the GLX component of |
63 | 63 | X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs. |
drivers/char/agp/amd64-agp.c
drivers/edac/Kconfig
... | ... | @@ -66,7 +66,7 @@ |
66 | 66 | |
67 | 67 | config EDAC_AMD64 |
68 | 68 | tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h" |
69 | - depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI && EDAC_DECODE_MCE | |
69 | + depends on EDAC_MM_EDAC && AMD_NB && X86_64 && PCI && EDAC_DECODE_MCE | |
70 | 70 | help |
71 | 71 | Support for error detection and correction on the AMD 64 |
72 | 72 | Families of Memory Controllers (K8, F10h and F11h) |