Commit de3910eb79ac8c0f29a11224661c0ebaaf813039

Authored by Mauro Carvalho Chehab
1 parent e39f4ea9b0

edac: change the mem allocation scheme to make Documentation/kobject.txt happy

Kernel kobjects have rigid rules: each container object should be
dynamically allocated, and can't be allocated into a single kmalloc.

EDAC never obeyed this rule: it has a single malloc function that
allocates all needed data into a single kzalloc.

As this is not accepted anymore, change the allocation schema of the
EDAC *_info structs to enforce this Kernel standard.

Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Cc: Aristeu Rozanski <arozansk@redhat.com>
Cc: Doug Thompson <norsk5@yahoo.com>
Cc: Greg K H <gregkh@linuxfoundation.org>
Cc: Borislav Petkov <borislav.petkov@amd.com>
Cc: Mark Gross <mark.gross@intel.com>
Cc: Tim Small <tim@buttersideup.com>
Cc: Ranganathan Desikan <ravi@jetztechnologies.com>
Cc: "Arvind R." <arvino55@gmail.com>
Cc: Olof Johansson <olof@lixom.net>
Cc: Egor Martovetsky <egor@pasemi.com>
Cc: Michal Marek <mmarek@suse.cz>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Hitoshi Mitake <h.mitake@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Shaohui Xie <Shaohui.Xie@freescale.com>
Cc: linuxppc-dev@lists.ozlabs.org
Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>

Showing 22 changed files with 242 additions and 164 deletions Inline Diff

drivers/edac/amd64_edac.c
1 #include "amd64_edac.h" 1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h> 2 #include <asm/amd_nb.h>
3 3
4 static struct edac_pci_ctl_info *amd64_ctl_pci; 4 static struct edac_pci_ctl_info *amd64_ctl_pci;
5 5
6 static int report_gart_errors; 6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644); 7 module_param(report_gart_errors, int, 0644);
8 8
9 /* 9 /*
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is 10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver. 11 * cleared to prevent re-enabling the hardware by this driver.
12 */ 12 */
13 static int ecc_enable_override; 13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644); 14 module_param(ecc_enable_override, int, 0644);
15 15
16 static struct msr __percpu *msrs; 16 static struct msr __percpu *msrs;
17 17
18 /* 18 /*
19 * count successfully initialized driver instances for setup_pci_device() 19 * count successfully initialized driver instances for setup_pci_device()
20 */ 20 */
21 static atomic_t drv_instances = ATOMIC_INIT(0); 21 static atomic_t drv_instances = ATOMIC_INIT(0);
22 22
23 /* Per-node driver instances */ 23 /* Per-node driver instances */
24 static struct mem_ctl_info **mcis; 24 static struct mem_ctl_info **mcis;
25 static struct ecc_settings **ecc_stngs; 25 static struct ecc_settings **ecc_stngs;
26 26
27 /* 27 /*
28 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing 28 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
29 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching- 29 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
30 * or higher value'. 30 * or higher value'.
31 * 31 *
32 *FIXME: Produce a better mapping/linearisation. 32 *FIXME: Produce a better mapping/linearisation.
33 */ 33 */
34 struct scrubrate { 34 struct scrubrate {
35 u32 scrubval; /* bit pattern for scrub rate */ 35 u32 scrubval; /* bit pattern for scrub rate */
36 u32 bandwidth; /* bandwidth consumed (bytes/sec) */ 36 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
37 } scrubrates[] = { 37 } scrubrates[] = {
38 { 0x01, 1600000000UL}, 38 { 0x01, 1600000000UL},
39 { 0x02, 800000000UL}, 39 { 0x02, 800000000UL},
40 { 0x03, 400000000UL}, 40 { 0x03, 400000000UL},
41 { 0x04, 200000000UL}, 41 { 0x04, 200000000UL},
42 { 0x05, 100000000UL}, 42 { 0x05, 100000000UL},
43 { 0x06, 50000000UL}, 43 { 0x06, 50000000UL},
44 { 0x07, 25000000UL}, 44 { 0x07, 25000000UL},
45 { 0x08, 12284069UL}, 45 { 0x08, 12284069UL},
46 { 0x09, 6274509UL}, 46 { 0x09, 6274509UL},
47 { 0x0A, 3121951UL}, 47 { 0x0A, 3121951UL},
48 { 0x0B, 1560975UL}, 48 { 0x0B, 1560975UL},
49 { 0x0C, 781440UL}, 49 { 0x0C, 781440UL},
50 { 0x0D, 390720UL}, 50 { 0x0D, 390720UL},
51 { 0x0E, 195300UL}, 51 { 0x0E, 195300UL},
52 { 0x0F, 97650UL}, 52 { 0x0F, 97650UL},
53 { 0x10, 48854UL}, 53 { 0x10, 48854UL},
54 { 0x11, 24427UL}, 54 { 0x11, 24427UL},
55 { 0x12, 12213UL}, 55 { 0x12, 12213UL},
56 { 0x13, 6101UL}, 56 { 0x13, 6101UL},
57 { 0x14, 3051UL}, 57 { 0x14, 3051UL},
58 { 0x15, 1523UL}, 58 { 0x15, 1523UL},
59 { 0x16, 761UL}, 59 { 0x16, 761UL},
60 { 0x00, 0UL}, /* scrubbing off */ 60 { 0x00, 0UL}, /* scrubbing off */
61 }; 61 };
62 62
63 static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, 63 static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
64 u32 *val, const char *func) 64 u32 *val, const char *func)
65 { 65 {
66 int err = 0; 66 int err = 0;
67 67
68 err = pci_read_config_dword(pdev, offset, val); 68 err = pci_read_config_dword(pdev, offset, val);
69 if (err) 69 if (err)
70 amd64_warn("%s: error reading F%dx%03x.\n", 70 amd64_warn("%s: error reading F%dx%03x.\n",
71 func, PCI_FUNC(pdev->devfn), offset); 71 func, PCI_FUNC(pdev->devfn), offset);
72 72
73 return err; 73 return err;
74 } 74 }
75 75
76 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, 76 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
77 u32 val, const char *func) 77 u32 val, const char *func)
78 { 78 {
79 int err = 0; 79 int err = 0;
80 80
81 err = pci_write_config_dword(pdev, offset, val); 81 err = pci_write_config_dword(pdev, offset, val);
82 if (err) 82 if (err)
83 amd64_warn("%s: error writing to F%dx%03x.\n", 83 amd64_warn("%s: error writing to F%dx%03x.\n",
84 func, PCI_FUNC(pdev->devfn), offset); 84 func, PCI_FUNC(pdev->devfn), offset);
85 85
86 return err; 86 return err;
87 } 87 }
88 88
89 /* 89 /*
90 * 90 *
91 * Depending on the family, F2 DCT reads need special handling: 91 * Depending on the family, F2 DCT reads need special handling:
92 * 92 *
93 * K8: has a single DCT only 93 * K8: has a single DCT only
94 * 94 *
95 * F10h: each DCT has its own set of regs 95 * F10h: each DCT has its own set of regs
96 * DCT0 -> F2x040.. 96 * DCT0 -> F2x040..
97 * DCT1 -> F2x140.. 97 * DCT1 -> F2x140..
98 * 98 *
99 * F15h: we select which DCT we access using F1x10C[DctCfgSel] 99 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
100 * 100 *
101 */ 101 */
102 static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, 102 static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
103 const char *func) 103 const char *func)
104 { 104 {
105 if (addr >= 0x100) 105 if (addr >= 0x100)
106 return -EINVAL; 106 return -EINVAL;
107 107
108 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); 108 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
109 } 109 }
110 110
111 static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, 111 static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
112 const char *func) 112 const char *func)
113 { 113 {
114 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); 114 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
115 } 115 }
116 116
117 /* 117 /*
118 * Select DCT to which PCI cfg accesses are routed 118 * Select DCT to which PCI cfg accesses are routed
119 */ 119 */
120 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct) 120 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
121 { 121 {
122 u32 reg = 0; 122 u32 reg = 0;
123 123
124 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg); 124 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
125 reg &= 0xfffffffe; 125 reg &= 0xfffffffe;
126 reg |= dct; 126 reg |= dct;
127 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg); 127 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
128 } 128 }
129 129
130 static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, 130 static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
131 const char *func) 131 const char *func)
132 { 132 {
133 u8 dct = 0; 133 u8 dct = 0;
134 134
135 if (addr >= 0x140 && addr <= 0x1a0) { 135 if (addr >= 0x140 && addr <= 0x1a0) {
136 dct = 1; 136 dct = 1;
137 addr -= 0x100; 137 addr -= 0x100;
138 } 138 }
139 139
140 f15h_select_dct(pvt, dct); 140 f15h_select_dct(pvt, dct);
141 141
142 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); 142 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
143 } 143 }
144 144
145 /* 145 /*
146 * Memory scrubber control interface. For K8, memory scrubbing is handled by 146 * Memory scrubber control interface. For K8, memory scrubbing is handled by
147 * hardware and can involve L2 cache, dcache as well as the main memory. With 147 * hardware and can involve L2 cache, dcache as well as the main memory. With
148 * F10, this is extended to L3 cache scrubbing on CPU models sporting that 148 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
149 * functionality. 149 * functionality.
150 * 150 *
151 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks 151 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
152 * (dram) over to cache lines. This is nasty, so we will use bandwidth in 152 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
153 * bytes/sec for the setting. 153 * bytes/sec for the setting.
154 * 154 *
155 * Currently, we only do dram scrubbing. If the scrubbing is done in software on 155 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
156 * other archs, we might not have access to the caches directly. 156 * other archs, we might not have access to the caches directly.
157 */ 157 */
158 158
159 /* 159 /*
160 * scan the scrub rate mapping table for a close or matching bandwidth value to 160 * scan the scrub rate mapping table for a close or matching bandwidth value to
161 * issue. If requested is too big, then use last maximum value found. 161 * issue. If requested is too big, then use last maximum value found.
162 */ 162 */
163 static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) 163 static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
164 { 164 {
165 u32 scrubval; 165 u32 scrubval;
166 int i; 166 int i;
167 167
168 /* 168 /*
169 * map the configured rate (new_bw) to a value specific to the AMD64 169 * map the configured rate (new_bw) to a value specific to the AMD64
170 * memory controller and apply to register. Search for the first 170 * memory controller and apply to register. Search for the first
171 * bandwidth entry that is greater or equal than the setting requested 171 * bandwidth entry that is greater or equal than the setting requested
172 * and program that. If at last entry, turn off DRAM scrubbing. 172 * and program that. If at last entry, turn off DRAM scrubbing.
173 */ 173 */
174 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { 174 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
175 /* 175 /*
176 * skip scrub rates which aren't recommended 176 * skip scrub rates which aren't recommended
177 * (see F10 BKDG, F3x58) 177 * (see F10 BKDG, F3x58)
178 */ 178 */
179 if (scrubrates[i].scrubval < min_rate) 179 if (scrubrates[i].scrubval < min_rate)
180 continue; 180 continue;
181 181
182 if (scrubrates[i].bandwidth <= new_bw) 182 if (scrubrates[i].bandwidth <= new_bw)
183 break; 183 break;
184 184
185 /* 185 /*
186 * if no suitable bandwidth found, turn off DRAM scrubbing 186 * if no suitable bandwidth found, turn off DRAM scrubbing
187 * entirely by falling back to the last element in the 187 * entirely by falling back to the last element in the
188 * scrubrates array. 188 * scrubrates array.
189 */ 189 */
190 } 190 }
191 191
192 scrubval = scrubrates[i].scrubval; 192 scrubval = scrubrates[i].scrubval;
193 193
194 pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F); 194 pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
195 195
196 if (scrubval) 196 if (scrubval)
197 return scrubrates[i].bandwidth; 197 return scrubrates[i].bandwidth;
198 198
199 return 0; 199 return 0;
200 } 200 }
201 201
202 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw) 202 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
203 { 203 {
204 struct amd64_pvt *pvt = mci->pvt_info; 204 struct amd64_pvt *pvt = mci->pvt_info;
205 u32 min_scrubrate = 0x5; 205 u32 min_scrubrate = 0x5;
206 206
207 if (boot_cpu_data.x86 == 0xf) 207 if (boot_cpu_data.x86 == 0xf)
208 min_scrubrate = 0x0; 208 min_scrubrate = 0x0;
209 209
210 /* F15h Erratum #505 */ 210 /* F15h Erratum #505 */
211 if (boot_cpu_data.x86 == 0x15) 211 if (boot_cpu_data.x86 == 0x15)
212 f15h_select_dct(pvt, 0); 212 f15h_select_dct(pvt, 0);
213 213
214 return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate); 214 return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
215 } 215 }
216 216
217 static int amd64_get_scrub_rate(struct mem_ctl_info *mci) 217 static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
218 { 218 {
219 struct amd64_pvt *pvt = mci->pvt_info; 219 struct amd64_pvt *pvt = mci->pvt_info;
220 u32 scrubval = 0; 220 u32 scrubval = 0;
221 int i, retval = -EINVAL; 221 int i, retval = -EINVAL;
222 222
223 /* F15h Erratum #505 */ 223 /* F15h Erratum #505 */
224 if (boot_cpu_data.x86 == 0x15) 224 if (boot_cpu_data.x86 == 0x15)
225 f15h_select_dct(pvt, 0); 225 f15h_select_dct(pvt, 0);
226 226
227 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); 227 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
228 228
229 scrubval = scrubval & 0x001F; 229 scrubval = scrubval & 0x001F;
230 230
231 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { 231 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
232 if (scrubrates[i].scrubval == scrubval) { 232 if (scrubrates[i].scrubval == scrubval) {
233 retval = scrubrates[i].bandwidth; 233 retval = scrubrates[i].bandwidth;
234 break; 234 break;
235 } 235 }
236 } 236 }
237 return retval; 237 return retval;
238 } 238 }
239 239
240 /* 240 /*
241 * returns true if the SysAddr given by sys_addr matches the 241 * returns true if the SysAddr given by sys_addr matches the
242 * DRAM base/limit associated with node_id 242 * DRAM base/limit associated with node_id
243 */ 243 */
244 static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, 244 static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
245 unsigned nid) 245 unsigned nid)
246 { 246 {
247 u64 addr; 247 u64 addr;
248 248
249 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be 249 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
250 * all ones if the most significant implemented address bit is 1. 250 * all ones if the most significant implemented address bit is 1.
251 * Here we discard bits 63-40. See section 3.4.2 of AMD publication 251 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
252 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1 252 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
253 * Application Programming. 253 * Application Programming.
254 */ 254 */
255 addr = sys_addr & 0x000000ffffffffffull; 255 addr = sys_addr & 0x000000ffffffffffull;
256 256
257 return ((addr >= get_dram_base(pvt, nid)) && 257 return ((addr >= get_dram_base(pvt, nid)) &&
258 (addr <= get_dram_limit(pvt, nid))); 258 (addr <= get_dram_limit(pvt, nid)));
259 } 259 }
260 260
261 /* 261 /*
262 * Attempt to map a SysAddr to a node. On success, return a pointer to the 262 * Attempt to map a SysAddr to a node. On success, return a pointer to the
263 * mem_ctl_info structure for the node that the SysAddr maps to. 263 * mem_ctl_info structure for the node that the SysAddr maps to.
264 * 264 *
265 * On failure, return NULL. 265 * On failure, return NULL.
266 */ 266 */
267 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, 267 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
268 u64 sys_addr) 268 u64 sys_addr)
269 { 269 {
270 struct amd64_pvt *pvt; 270 struct amd64_pvt *pvt;
271 unsigned node_id; 271 unsigned node_id;
272 u32 intlv_en, bits; 272 u32 intlv_en, bits;
273 273
274 /* 274 /*
275 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section 275 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
276 * 3.4.4.2) registers to map the SysAddr to a node ID. 276 * 3.4.4.2) registers to map the SysAddr to a node ID.
277 */ 277 */
278 pvt = mci->pvt_info; 278 pvt = mci->pvt_info;
279 279
280 /* 280 /*
281 * The value of this field should be the same for all DRAM Base 281 * The value of this field should be the same for all DRAM Base
282 * registers. Therefore we arbitrarily choose to read it from the 282 * registers. Therefore we arbitrarily choose to read it from the
283 * register for node 0. 283 * register for node 0.
284 */ 284 */
285 intlv_en = dram_intlv_en(pvt, 0); 285 intlv_en = dram_intlv_en(pvt, 0);
286 286
287 if (intlv_en == 0) { 287 if (intlv_en == 0) {
288 for (node_id = 0; node_id < DRAM_RANGES; node_id++) { 288 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
289 if (amd64_base_limit_match(pvt, sys_addr, node_id)) 289 if (amd64_base_limit_match(pvt, sys_addr, node_id))
290 goto found; 290 goto found;
291 } 291 }
292 goto err_no_match; 292 goto err_no_match;
293 } 293 }
294 294
295 if (unlikely((intlv_en != 0x01) && 295 if (unlikely((intlv_en != 0x01) &&
296 (intlv_en != 0x03) && 296 (intlv_en != 0x03) &&
297 (intlv_en != 0x07))) { 297 (intlv_en != 0x07))) {
298 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en); 298 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
299 return NULL; 299 return NULL;
300 } 300 }
301 301
302 bits = (((u32) sys_addr) >> 12) & intlv_en; 302 bits = (((u32) sys_addr) >> 12) & intlv_en;
303 303
304 for (node_id = 0; ; ) { 304 for (node_id = 0; ; ) {
305 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits) 305 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
306 break; /* intlv_sel field matches */ 306 break; /* intlv_sel field matches */
307 307
308 if (++node_id >= DRAM_RANGES) 308 if (++node_id >= DRAM_RANGES)
309 goto err_no_match; 309 goto err_no_match;
310 } 310 }
311 311
312 /* sanity test for sys_addr */ 312 /* sanity test for sys_addr */
313 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { 313 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
314 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address" 314 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
315 "range for node %d with node interleaving enabled.\n", 315 "range for node %d with node interleaving enabled.\n",
316 __func__, sys_addr, node_id); 316 __func__, sys_addr, node_id);
317 return NULL; 317 return NULL;
318 } 318 }
319 319
320 found: 320 found:
321 return edac_mc_find((int)node_id); 321 return edac_mc_find((int)node_id);
322 322
323 err_no_match: 323 err_no_match:
324 debugf2("sys_addr 0x%lx doesn't match any node\n", 324 debugf2("sys_addr 0x%lx doesn't match any node\n",
325 (unsigned long)sys_addr); 325 (unsigned long)sys_addr);
326 326
327 return NULL; 327 return NULL;
328 } 328 }
329 329
330 /* 330 /*
331 * compute the CS base address of the @csrow on the DRAM controller @dct. 331 * compute the CS base address of the @csrow on the DRAM controller @dct.
332 * For details see F2x[5C:40] in the processor's BKDG 332 * For details see F2x[5C:40] in the processor's BKDG
333 */ 333 */
334 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct, 334 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
335 u64 *base, u64 *mask) 335 u64 *base, u64 *mask)
336 { 336 {
337 u64 csbase, csmask, base_bits, mask_bits; 337 u64 csbase, csmask, base_bits, mask_bits;
338 u8 addr_shift; 338 u8 addr_shift;
339 339
340 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { 340 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
341 csbase = pvt->csels[dct].csbases[csrow]; 341 csbase = pvt->csels[dct].csbases[csrow];
342 csmask = pvt->csels[dct].csmasks[csrow]; 342 csmask = pvt->csels[dct].csmasks[csrow];
343 base_bits = GENMASK(21, 31) | GENMASK(9, 15); 343 base_bits = GENMASK(21, 31) | GENMASK(9, 15);
344 mask_bits = GENMASK(21, 29) | GENMASK(9, 15); 344 mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
345 addr_shift = 4; 345 addr_shift = 4;
346 } else { 346 } else {
347 csbase = pvt->csels[dct].csbases[csrow]; 347 csbase = pvt->csels[dct].csbases[csrow];
348 csmask = pvt->csels[dct].csmasks[csrow >> 1]; 348 csmask = pvt->csels[dct].csmasks[csrow >> 1];
349 addr_shift = 8; 349 addr_shift = 8;
350 350
351 if (boot_cpu_data.x86 == 0x15) 351 if (boot_cpu_data.x86 == 0x15)
352 base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13); 352 base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
353 else 353 else
354 base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13); 354 base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
355 } 355 }
356 356
357 *base = (csbase & base_bits) << addr_shift; 357 *base = (csbase & base_bits) << addr_shift;
358 358
359 *mask = ~0ULL; 359 *mask = ~0ULL;
360 /* poke holes for the csmask */ 360 /* poke holes for the csmask */
361 *mask &= ~(mask_bits << addr_shift); 361 *mask &= ~(mask_bits << addr_shift);
362 /* OR them in */ 362 /* OR them in */
363 *mask |= (csmask & mask_bits) << addr_shift; 363 *mask |= (csmask & mask_bits) << addr_shift;
364 } 364 }
365 365
366 #define for_each_chip_select(i, dct, pvt) \ 366 #define for_each_chip_select(i, dct, pvt) \
367 for (i = 0; i < pvt->csels[dct].b_cnt; i++) 367 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
368 368
369 #define chip_select_base(i, dct, pvt) \ 369 #define chip_select_base(i, dct, pvt) \
370 pvt->csels[dct].csbases[i] 370 pvt->csels[dct].csbases[i]
371 371
372 #define for_each_chip_select_mask(i, dct, pvt) \ 372 #define for_each_chip_select_mask(i, dct, pvt) \
373 for (i = 0; i < pvt->csels[dct].m_cnt; i++) 373 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
374 374
375 /* 375 /*
376 * @input_addr is an InputAddr associated with the node given by mci. Return the 376 * @input_addr is an InputAddr associated with the node given by mci. Return the
377 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr). 377 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
378 */ 378 */
379 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) 379 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
380 { 380 {
381 struct amd64_pvt *pvt; 381 struct amd64_pvt *pvt;
382 int csrow; 382 int csrow;
383 u64 base, mask; 383 u64 base, mask;
384 384
385 pvt = mci->pvt_info; 385 pvt = mci->pvt_info;
386 386
387 for_each_chip_select(csrow, 0, pvt) { 387 for_each_chip_select(csrow, 0, pvt) {
388 if (!csrow_enabled(csrow, 0, pvt)) 388 if (!csrow_enabled(csrow, 0, pvt))
389 continue; 389 continue;
390 390
391 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask); 391 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
392 392
393 mask = ~mask; 393 mask = ~mask;
394 394
395 if ((input_addr & mask) == (base & mask)) { 395 if ((input_addr & mask) == (base & mask)) {
396 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", 396 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
397 (unsigned long)input_addr, csrow, 397 (unsigned long)input_addr, csrow,
398 pvt->mc_node_id); 398 pvt->mc_node_id);
399 399
400 return csrow; 400 return csrow;
401 } 401 }
402 } 402 }
403 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", 403 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
404 (unsigned long)input_addr, pvt->mc_node_id); 404 (unsigned long)input_addr, pvt->mc_node_id);
405 405
406 return -1; 406 return -1;
407 } 407 }
408 408
409 /* 409 /*
410 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094) 410 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
411 * for the node represented by mci. Info is passed back in *hole_base, 411 * for the node represented by mci. Info is passed back in *hole_base,
412 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if 412 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
413 * info is invalid. Info may be invalid for either of the following reasons: 413 * info is invalid. Info may be invalid for either of the following reasons:
414 * 414 *
415 * - The revision of the node is not E or greater. In this case, the DRAM Hole 415 * - The revision of the node is not E or greater. In this case, the DRAM Hole
416 * Address Register does not exist. 416 * Address Register does not exist.
417 * 417 *
418 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register, 418 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
419 * indicating that its contents are not valid. 419 * indicating that its contents are not valid.
420 * 420 *
421 * The values passed back in *hole_base, *hole_offset, and *hole_size are 421 * The values passed back in *hole_base, *hole_offset, and *hole_size are
422 * complete 32-bit values despite the fact that the bitfields in the DHAR 422 * complete 32-bit values despite the fact that the bitfields in the DHAR
423 * only represent bits 31-24 of the base and offset values. 423 * only represent bits 31-24 of the base and offset values.
424 */ 424 */
425 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, 425 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
426 u64 *hole_offset, u64 *hole_size) 426 u64 *hole_offset, u64 *hole_size)
427 { 427 {
428 struct amd64_pvt *pvt = mci->pvt_info; 428 struct amd64_pvt *pvt = mci->pvt_info;
429 u64 base; 429 u64 base;
430 430
431 /* only revE and later have the DRAM Hole Address Register */ 431 /* only revE and later have the DRAM Hole Address Register */
432 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) { 432 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
433 debugf1(" revision %d for node %d does not support DHAR\n", 433 debugf1(" revision %d for node %d does not support DHAR\n",
434 pvt->ext_model, pvt->mc_node_id); 434 pvt->ext_model, pvt->mc_node_id);
435 return 1; 435 return 1;
436 } 436 }
437 437
438 /* valid for Fam10h and above */ 438 /* valid for Fam10h and above */
439 if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) { 439 if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
440 debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); 440 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
441 return 1; 441 return 1;
442 } 442 }
443 443
444 if (!dhar_valid(pvt)) { 444 if (!dhar_valid(pvt)) {
445 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", 445 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
446 pvt->mc_node_id); 446 pvt->mc_node_id);
447 return 1; 447 return 1;
448 } 448 }
449 449
450 /* This node has Memory Hoisting */ 450 /* This node has Memory Hoisting */
451 451
452 /* +------------------+--------------------+--------------------+----- 452 /* +------------------+--------------------+--------------------+-----
453 * | memory | DRAM hole | relocated | 453 * | memory | DRAM hole | relocated |
454 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from | 454 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
455 * | | | DRAM hole | 455 * | | | DRAM hole |
456 * | | | [0x100000000, | 456 * | | | [0x100000000, |
457 * | | | (0x100000000+ | 457 * | | | (0x100000000+ |
458 * | | | (0xffffffff-x))] | 458 * | | | (0xffffffff-x))] |
459 * +------------------+--------------------+--------------------+----- 459 * +------------------+--------------------+--------------------+-----
460 * 460 *
461 * Above is a diagram of physical memory showing the DRAM hole and the 461 * Above is a diagram of physical memory showing the DRAM hole and the
462 * relocated addresses from the DRAM hole. As shown, the DRAM hole 462 * relocated addresses from the DRAM hole. As shown, the DRAM hole
463 * starts at address x (the base address) and extends through address 463 * starts at address x (the base address) and extends through address
464 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the 464 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
465 * addresses in the hole so that they start at 0x100000000. 465 * addresses in the hole so that they start at 0x100000000.
466 */ 466 */
467 467
468 base = dhar_base(pvt); 468 base = dhar_base(pvt);
469 469
470 *hole_base = base; 470 *hole_base = base;
471 *hole_size = (0x1ull << 32) - base; 471 *hole_size = (0x1ull << 32) - base;
472 472
473 if (boot_cpu_data.x86 > 0xf) 473 if (boot_cpu_data.x86 > 0xf)
474 *hole_offset = f10_dhar_offset(pvt); 474 *hole_offset = f10_dhar_offset(pvt);
475 else 475 else
476 *hole_offset = k8_dhar_offset(pvt); 476 *hole_offset = k8_dhar_offset(pvt);
477 477
478 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", 478 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
479 pvt->mc_node_id, (unsigned long)*hole_base, 479 pvt->mc_node_id, (unsigned long)*hole_base,
480 (unsigned long)*hole_offset, (unsigned long)*hole_size); 480 (unsigned long)*hole_offset, (unsigned long)*hole_size);
481 481
482 return 0; 482 return 0;
483 } 483 }
484 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info); 484 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
485 485
486 /* 486 /*
487 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is 487 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
488 * assumed that sys_addr maps to the node given by mci. 488 * assumed that sys_addr maps to the node given by mci.
489 * 489 *
490 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section 490 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
491 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a 491 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
492 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled, 492 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
493 * then it is also involved in translating a SysAddr to a DramAddr. Sections 493 * then it is also involved in translating a SysAddr to a DramAddr. Sections
494 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting. 494 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
495 * These parts of the documentation are unclear. I interpret them as follows: 495 * These parts of the documentation are unclear. I interpret them as follows:
496 * 496 *
497 * When node n receives a SysAddr, it processes the SysAddr as follows: 497 * When node n receives a SysAddr, it processes the SysAddr as follows:
498 * 498 *
499 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM 499 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
500 * Limit registers for node n. If the SysAddr is not within the range 500 * Limit registers for node n. If the SysAddr is not within the range
501 * specified by the base and limit values, then node n ignores the Sysaddr 501 * specified by the base and limit values, then node n ignores the Sysaddr
502 * (since it does not map to node n). Otherwise continue to step 2 below. 502 * (since it does not map to node n). Otherwise continue to step 2 below.
503 * 503 *
504 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is 504 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
505 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within 505 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
506 * the range of relocated addresses (starting at 0x100000000) from the DRAM 506 * the range of relocated addresses (starting at 0x100000000) from the DRAM
507 * hole. If not, skip to step 3 below. Else get the value of the 507 * hole. If not, skip to step 3 below. Else get the value of the
508 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the 508 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
509 * offset defined by this value from the SysAddr. 509 * offset defined by this value from the SysAddr.
510 * 510 *
511 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM 511 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
512 * Base register for node n. To obtain the DramAddr, subtract the base 512 * Base register for node n. To obtain the DramAddr, subtract the base
513 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70). 513 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
514 */ 514 */
515 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) 515 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
516 { 516 {
517 struct amd64_pvt *pvt = mci->pvt_info; 517 struct amd64_pvt *pvt = mci->pvt_info;
518 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr; 518 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
519 int ret = 0; 519 int ret = 0;
520 520
521 dram_base = get_dram_base(pvt, pvt->mc_node_id); 521 dram_base = get_dram_base(pvt, pvt->mc_node_id);
522 522
523 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, 523 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
524 &hole_size); 524 &hole_size);
525 if (!ret) { 525 if (!ret) {
526 if ((sys_addr >= (1ull << 32)) && 526 if ((sys_addr >= (1ull << 32)) &&
527 (sys_addr < ((1ull << 32) + hole_size))) { 527 (sys_addr < ((1ull << 32) + hole_size))) {
528 /* use DHAR to translate SysAddr to DramAddr */ 528 /* use DHAR to translate SysAddr to DramAddr */
529 dram_addr = sys_addr - hole_offset; 529 dram_addr = sys_addr - hole_offset;
530 530
531 debugf2("using DHAR to translate SysAddr 0x%lx to " 531 debugf2("using DHAR to translate SysAddr 0x%lx to "
532 "DramAddr 0x%lx\n", 532 "DramAddr 0x%lx\n",
533 (unsigned long)sys_addr, 533 (unsigned long)sys_addr,
534 (unsigned long)dram_addr); 534 (unsigned long)dram_addr);
535 535
536 return dram_addr; 536 return dram_addr;
537 } 537 }
538 } 538 }
539 539
540 /* 540 /*
541 * Translate the SysAddr to a DramAddr as shown near the start of 541 * Translate the SysAddr to a DramAddr as shown near the start of
542 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8 542 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
543 * only deals with 40-bit values. Therefore we discard bits 63-40 of 543 * only deals with 40-bit values. Therefore we discard bits 63-40 of
544 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we 544 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
545 * discard are all 1s. Otherwise the bits we discard are all 0s. See 545 * discard are all 1s. Otherwise the bits we discard are all 0s. See
546 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture 546 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
547 * Programmer's Manual Volume 1 Application Programming. 547 * Programmer's Manual Volume 1 Application Programming.
548 */ 548 */
549 dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base; 549 dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
550 550
551 debugf2("using DRAM Base register to translate SysAddr 0x%lx to " 551 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
552 "DramAddr 0x%lx\n", (unsigned long)sys_addr, 552 "DramAddr 0x%lx\n", (unsigned long)sys_addr,
553 (unsigned long)dram_addr); 553 (unsigned long)dram_addr);
554 return dram_addr; 554 return dram_addr;
555 } 555 }
556 556
557 /* 557 /*
558 * @intlv_en is the value of the IntlvEn field from a DRAM Base register 558 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
559 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used 559 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
560 * for node interleaving. 560 * for node interleaving.
561 */ 561 */
562 static int num_node_interleave_bits(unsigned intlv_en) 562 static int num_node_interleave_bits(unsigned intlv_en)
563 { 563 {
564 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 }; 564 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
565 int n; 565 int n;
566 566
567 BUG_ON(intlv_en > 7); 567 BUG_ON(intlv_en > 7);
568 n = intlv_shift_table[intlv_en]; 568 n = intlv_shift_table[intlv_en];
569 return n; 569 return n;
570 } 570 }
571 571
572 /* Translate the DramAddr given by @dram_addr to an InputAddr. */ 572 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
573 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr) 573 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
574 { 574 {
575 struct amd64_pvt *pvt; 575 struct amd64_pvt *pvt;
576 int intlv_shift; 576 int intlv_shift;
577 u64 input_addr; 577 u64 input_addr;
578 578
579 pvt = mci->pvt_info; 579 pvt = mci->pvt_info;
580 580
581 /* 581 /*
582 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) 582 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
583 * concerning translating a DramAddr to an InputAddr. 583 * concerning translating a DramAddr to an InputAddr.
584 */ 584 */
585 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0)); 585 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
586 input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) + 586 input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
587 (dram_addr & 0xfff); 587 (dram_addr & 0xfff);
588 588
589 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", 589 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
590 intlv_shift, (unsigned long)dram_addr, 590 intlv_shift, (unsigned long)dram_addr,
591 (unsigned long)input_addr); 591 (unsigned long)input_addr);
592 592
593 return input_addr; 593 return input_addr;
594 } 594 }
595 595
596 /* 596 /*
597 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is 597 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
598 * assumed that @sys_addr maps to the node given by mci. 598 * assumed that @sys_addr maps to the node given by mci.
599 */ 599 */
600 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr) 600 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
601 { 601 {
602 u64 input_addr; 602 u64 input_addr;
603 603
604 input_addr = 604 input_addr =
605 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr)); 605 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
606 606
607 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n", 607 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
608 (unsigned long)sys_addr, (unsigned long)input_addr); 608 (unsigned long)sys_addr, (unsigned long)input_addr);
609 609
610 return input_addr; 610 return input_addr;
611 } 611 }
612 612
613 613
614 /* 614 /*
615 * @input_addr is an InputAddr associated with the node represented by mci. 615 * @input_addr is an InputAddr associated with the node represented by mci.
616 * Translate @input_addr to a DramAddr and return the result. 616 * Translate @input_addr to a DramAddr and return the result.
617 */ 617 */
618 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) 618 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
619 { 619 {
620 struct amd64_pvt *pvt; 620 struct amd64_pvt *pvt;
621 unsigned node_id, intlv_shift; 621 unsigned node_id, intlv_shift;
622 u64 bits, dram_addr; 622 u64 bits, dram_addr;
623 u32 intlv_sel; 623 u32 intlv_sel;
624 624
625 /* 625 /*
626 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) 626 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
627 * shows how to translate a DramAddr to an InputAddr. Here we reverse 627 * shows how to translate a DramAddr to an InputAddr. Here we reverse
628 * this procedure. When translating from a DramAddr to an InputAddr, the 628 * this procedure. When translating from a DramAddr to an InputAddr, the
629 * bits used for node interleaving are discarded. Here we recover these 629 * bits used for node interleaving are discarded. Here we recover these
630 * bits from the IntlvSel field of the DRAM Limit register (section 630 * bits from the IntlvSel field of the DRAM Limit register (section
631 * 3.4.4.2) for the node that input_addr is associated with. 631 * 3.4.4.2) for the node that input_addr is associated with.
632 */ 632 */
633 pvt = mci->pvt_info; 633 pvt = mci->pvt_info;
634 node_id = pvt->mc_node_id; 634 node_id = pvt->mc_node_id;
635 635
636 BUG_ON(node_id > 7); 636 BUG_ON(node_id > 7);
637 637
638 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0)); 638 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
639 if (intlv_shift == 0) { 639 if (intlv_shift == 0) {
640 debugf1(" InputAddr 0x%lx translates to DramAddr of " 640 debugf1(" InputAddr 0x%lx translates to DramAddr of "
641 "same value\n", (unsigned long)input_addr); 641 "same value\n", (unsigned long)input_addr);
642 642
643 return input_addr; 643 return input_addr;
644 } 644 }
645 645
646 bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) + 646 bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
647 (input_addr & 0xfff); 647 (input_addr & 0xfff);
648 648
649 intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1); 649 intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
650 dram_addr = bits + (intlv_sel << 12); 650 dram_addr = bits + (intlv_sel << 12);
651 651
652 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx " 652 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
653 "(%d node interleave bits)\n", (unsigned long)input_addr, 653 "(%d node interleave bits)\n", (unsigned long)input_addr,
654 (unsigned long)dram_addr, intlv_shift); 654 (unsigned long)dram_addr, intlv_shift);
655 655
656 return dram_addr; 656 return dram_addr;
657 } 657 }
658 658
659 /* 659 /*
660 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert 660 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
661 * @dram_addr to a SysAddr. 661 * @dram_addr to a SysAddr.
662 */ 662 */
663 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) 663 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
664 { 664 {
665 struct amd64_pvt *pvt = mci->pvt_info; 665 struct amd64_pvt *pvt = mci->pvt_info;
666 u64 hole_base, hole_offset, hole_size, base, sys_addr; 666 u64 hole_base, hole_offset, hole_size, base, sys_addr;
667 int ret = 0; 667 int ret = 0;
668 668
669 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, 669 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
670 &hole_size); 670 &hole_size);
671 if (!ret) { 671 if (!ret) {
672 if ((dram_addr >= hole_base) && 672 if ((dram_addr >= hole_base) &&
673 (dram_addr < (hole_base + hole_size))) { 673 (dram_addr < (hole_base + hole_size))) {
674 sys_addr = dram_addr + hole_offset; 674 sys_addr = dram_addr + hole_offset;
675 675
676 debugf1("using DHAR to translate DramAddr 0x%lx to " 676 debugf1("using DHAR to translate DramAddr 0x%lx to "
677 "SysAddr 0x%lx\n", (unsigned long)dram_addr, 677 "SysAddr 0x%lx\n", (unsigned long)dram_addr,
678 (unsigned long)sys_addr); 678 (unsigned long)sys_addr);
679 679
680 return sys_addr; 680 return sys_addr;
681 } 681 }
682 } 682 }
683 683
684 base = get_dram_base(pvt, pvt->mc_node_id); 684 base = get_dram_base(pvt, pvt->mc_node_id);
685 sys_addr = dram_addr + base; 685 sys_addr = dram_addr + base;
686 686
687 /* 687 /*
688 * The sys_addr we have computed up to this point is a 40-bit value 688 * The sys_addr we have computed up to this point is a 40-bit value
689 * because the k8 deals with 40-bit values. However, the value we are 689 * because the k8 deals with 40-bit values. However, the value we are
690 * supposed to return is a full 64-bit physical address. The AMD 690 * supposed to return is a full 64-bit physical address. The AMD
691 * x86-64 architecture specifies that the most significant implemented 691 * x86-64 architecture specifies that the most significant implemented
692 * address bit through bit 63 of a physical address must be either all 692 * address bit through bit 63 of a physical address must be either all
693 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a 693 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
694 * 64-bit value below. See section 3.4.2 of AMD publication 24592: 694 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
695 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application 695 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
696 * Programming. 696 * Programming.
697 */ 697 */
698 sys_addr |= ~((sys_addr & (1ull << 39)) - 1); 698 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
699 699
700 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n", 700 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
701 pvt->mc_node_id, (unsigned long)dram_addr, 701 pvt->mc_node_id, (unsigned long)dram_addr,
702 (unsigned long)sys_addr); 702 (unsigned long)sys_addr);
703 703
704 return sys_addr; 704 return sys_addr;
705 } 705 }
706 706
707 /* 707 /*
708 * @input_addr is an InputAddr associated with the node given by mci. Translate 708 * @input_addr is an InputAddr associated with the node given by mci. Translate
709 * @input_addr to a SysAddr. 709 * @input_addr to a SysAddr.
710 */ 710 */
711 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci, 711 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
712 u64 input_addr) 712 u64 input_addr)
713 { 713 {
714 return dram_addr_to_sys_addr(mci, 714 return dram_addr_to_sys_addr(mci,
715 input_addr_to_dram_addr(mci, input_addr)); 715 input_addr_to_dram_addr(mci, input_addr));
716 } 716 }
717 717
718 /* Map the Error address to a PAGE and PAGE OFFSET. */ 718 /* Map the Error address to a PAGE and PAGE OFFSET. */
719 static inline void error_address_to_page_and_offset(u64 error_address, 719 static inline void error_address_to_page_and_offset(u64 error_address,
720 u32 *page, u32 *offset) 720 u32 *page, u32 *offset)
721 { 721 {
722 *page = (u32) (error_address >> PAGE_SHIFT); 722 *page = (u32) (error_address >> PAGE_SHIFT);
723 *offset = ((u32) error_address) & ~PAGE_MASK; 723 *offset = ((u32) error_address) & ~PAGE_MASK;
724 } 724 }
725 725
726 /* 726 /*
727 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address 727 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
728 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers 728 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
729 * of a node that detected an ECC memory error. mci represents the node that 729 * of a node that detected an ECC memory error. mci represents the node that
730 * the error address maps to (possibly different from the node that detected 730 * the error address maps to (possibly different from the node that detected
731 * the error). Return the number of the csrow that sys_addr maps to, or -1 on 731 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
732 * error. 732 * error.
733 */ 733 */
734 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr) 734 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
735 { 735 {
736 int csrow; 736 int csrow;
737 737
738 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr)); 738 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
739 739
740 if (csrow == -1) 740 if (csrow == -1)
741 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for " 741 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
742 "address 0x%lx\n", (unsigned long)sys_addr); 742 "address 0x%lx\n", (unsigned long)sys_addr);
743 return csrow; 743 return csrow;
744 } 744 }
745 745
746 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); 746 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
747 747
748 /* 748 /*
749 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs 749 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
750 * are ECC capable. 750 * are ECC capable.
751 */ 751 */
752 static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt) 752 static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
753 { 753 {
754 u8 bit; 754 u8 bit;
755 unsigned long edac_cap = EDAC_FLAG_NONE; 755 unsigned long edac_cap = EDAC_FLAG_NONE;
756 756
757 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) 757 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
758 ? 19 758 ? 19
759 : 17; 759 : 17;
760 760
761 if (pvt->dclr0 & BIT(bit)) 761 if (pvt->dclr0 & BIT(bit))
762 edac_cap = EDAC_FLAG_SECDED; 762 edac_cap = EDAC_FLAG_SECDED;
763 763
764 return edac_cap; 764 return edac_cap;
765 } 765 }
766 766
767 static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8); 767 static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
768 768
769 static void amd64_dump_dramcfg_low(u32 dclr, int chan) 769 static void amd64_dump_dramcfg_low(u32 dclr, int chan)
770 { 770 {
771 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); 771 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
772 772
773 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n", 773 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
774 (dclr & BIT(16)) ? "un" : "", 774 (dclr & BIT(16)) ? "un" : "",
775 (dclr & BIT(19)) ? "yes" : "no"); 775 (dclr & BIT(19)) ? "yes" : "no");
776 776
777 debugf1(" PAR/ERR parity: %s\n", 777 debugf1(" PAR/ERR parity: %s\n",
778 (dclr & BIT(8)) ? "enabled" : "disabled"); 778 (dclr & BIT(8)) ? "enabled" : "disabled");
779 779
780 if (boot_cpu_data.x86 == 0x10) 780 if (boot_cpu_data.x86 == 0x10)
781 debugf1(" DCT 128bit mode width: %s\n", 781 debugf1(" DCT 128bit mode width: %s\n",
782 (dclr & BIT(11)) ? "128b" : "64b"); 782 (dclr & BIT(11)) ? "128b" : "64b");
783 783
784 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", 784 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
785 (dclr & BIT(12)) ? "yes" : "no", 785 (dclr & BIT(12)) ? "yes" : "no",
786 (dclr & BIT(13)) ? "yes" : "no", 786 (dclr & BIT(13)) ? "yes" : "no",
787 (dclr & BIT(14)) ? "yes" : "no", 787 (dclr & BIT(14)) ? "yes" : "no",
788 (dclr & BIT(15)) ? "yes" : "no"); 788 (dclr & BIT(15)) ? "yes" : "no");
789 } 789 }
790 790
791 /* Display and decode various NB registers for debug purposes. */ 791 /* Display and decode various NB registers for debug purposes. */
792 static void dump_misc_regs(struct amd64_pvt *pvt) 792 static void dump_misc_regs(struct amd64_pvt *pvt)
793 { 793 {
794 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); 794 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
795 795
796 debugf1(" NB two channel DRAM capable: %s\n", 796 debugf1(" NB two channel DRAM capable: %s\n",
797 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no"); 797 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
798 798
799 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", 799 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
800 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", 800 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
801 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); 801 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
802 802
803 amd64_dump_dramcfg_low(pvt->dclr0, 0); 803 amd64_dump_dramcfg_low(pvt->dclr0, 0);
804 804
805 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); 805 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
806 806
807 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " 807 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
808 "offset: 0x%08x\n", 808 "offset: 0x%08x\n",
809 pvt->dhar, dhar_base(pvt), 809 pvt->dhar, dhar_base(pvt),
810 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt) 810 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
811 : f10_dhar_offset(pvt)); 811 : f10_dhar_offset(pvt));
812 812
813 debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); 813 debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
814 814
815 amd64_debug_display_dimm_sizes(pvt, 0); 815 amd64_debug_display_dimm_sizes(pvt, 0);
816 816
817 /* everything below this point is Fam10h and above */ 817 /* everything below this point is Fam10h and above */
818 if (boot_cpu_data.x86 == 0xf) 818 if (boot_cpu_data.x86 == 0xf)
819 return; 819 return;
820 820
821 amd64_debug_display_dimm_sizes(pvt, 1); 821 amd64_debug_display_dimm_sizes(pvt, 1);
822 822
823 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4")); 823 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
824 824
825 /* Only if NOT ganged does dclr1 have valid info */ 825 /* Only if NOT ganged does dclr1 have valid info */
826 if (!dct_ganging_enabled(pvt)) 826 if (!dct_ganging_enabled(pvt))
827 amd64_dump_dramcfg_low(pvt->dclr1, 1); 827 amd64_dump_dramcfg_low(pvt->dclr1, 1);
828 } 828 }
829 829
830 /* 830 /*
831 * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60] 831 * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
832 */ 832 */
833 static void prep_chip_selects(struct amd64_pvt *pvt) 833 static void prep_chip_selects(struct amd64_pvt *pvt)
834 { 834 {
835 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { 835 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
836 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; 836 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
837 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8; 837 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
838 } else { 838 } else {
839 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; 839 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
840 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4; 840 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
841 } 841 }
842 } 842 }
843 843
844 /* 844 /*
845 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers 845 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
846 */ 846 */
847 static void read_dct_base_mask(struct amd64_pvt *pvt) 847 static void read_dct_base_mask(struct amd64_pvt *pvt)
848 { 848 {
849 int cs; 849 int cs;
850 850
851 prep_chip_selects(pvt); 851 prep_chip_selects(pvt);
852 852
853 for_each_chip_select(cs, 0, pvt) { 853 for_each_chip_select(cs, 0, pvt) {
854 int reg0 = DCSB0 + (cs * 4); 854 int reg0 = DCSB0 + (cs * 4);
855 int reg1 = DCSB1 + (cs * 4); 855 int reg1 = DCSB1 + (cs * 4);
856 u32 *base0 = &pvt->csels[0].csbases[cs]; 856 u32 *base0 = &pvt->csels[0].csbases[cs];
857 u32 *base1 = &pvt->csels[1].csbases[cs]; 857 u32 *base1 = &pvt->csels[1].csbases[cs];
858 858
859 if (!amd64_read_dct_pci_cfg(pvt, reg0, base0)) 859 if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
860 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", 860 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
861 cs, *base0, reg0); 861 cs, *base0, reg0);
862 862
863 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) 863 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
864 continue; 864 continue;
865 865
866 if (!amd64_read_dct_pci_cfg(pvt, reg1, base1)) 866 if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
867 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", 867 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
868 cs, *base1, reg1); 868 cs, *base1, reg1);
869 } 869 }
870 870
871 for_each_chip_select_mask(cs, 0, pvt) { 871 for_each_chip_select_mask(cs, 0, pvt) {
872 int reg0 = DCSM0 + (cs * 4); 872 int reg0 = DCSM0 + (cs * 4);
873 int reg1 = DCSM1 + (cs * 4); 873 int reg1 = DCSM1 + (cs * 4);
874 u32 *mask0 = &pvt->csels[0].csmasks[cs]; 874 u32 *mask0 = &pvt->csels[0].csmasks[cs];
875 u32 *mask1 = &pvt->csels[1].csmasks[cs]; 875 u32 *mask1 = &pvt->csels[1].csmasks[cs];
876 876
877 if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0)) 877 if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
878 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", 878 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
879 cs, *mask0, reg0); 879 cs, *mask0, reg0);
880 880
881 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) 881 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
882 continue; 882 continue;
883 883
884 if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1)) 884 if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
885 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", 885 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
886 cs, *mask1, reg1); 886 cs, *mask1, reg1);
887 } 887 }
888 } 888 }
889 889
890 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs) 890 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
891 { 891 {
892 enum mem_type type; 892 enum mem_type type;
893 893
894 /* F15h supports only DDR3 */ 894 /* F15h supports only DDR3 */
895 if (boot_cpu_data.x86 >= 0x15) 895 if (boot_cpu_data.x86 >= 0x15)
896 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; 896 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
897 else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) { 897 else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) {
898 if (pvt->dchr0 & DDR3_MODE) 898 if (pvt->dchr0 & DDR3_MODE)
899 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; 899 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
900 else 900 else
901 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2; 901 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
902 } else { 902 } else {
903 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; 903 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
904 } 904 }
905 905
906 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]); 906 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
907 907
908 return type; 908 return type;
909 } 909 }
910 910
911 /* Get the number of DCT channels the memory controller is using. */ 911 /* Get the number of DCT channels the memory controller is using. */
912 static int k8_early_channel_count(struct amd64_pvt *pvt) 912 static int k8_early_channel_count(struct amd64_pvt *pvt)
913 { 913 {
914 int flag; 914 int flag;
915 915
916 if (pvt->ext_model >= K8_REV_F) 916 if (pvt->ext_model >= K8_REV_F)
917 /* RevF (NPT) and later */ 917 /* RevF (NPT) and later */
918 flag = pvt->dclr0 & WIDTH_128; 918 flag = pvt->dclr0 & WIDTH_128;
919 else 919 else
920 /* RevE and earlier */ 920 /* RevE and earlier */
921 flag = pvt->dclr0 & REVE_WIDTH_128; 921 flag = pvt->dclr0 & REVE_WIDTH_128;
922 922
923 /* not used */ 923 /* not used */
924 pvt->dclr1 = 0; 924 pvt->dclr1 = 0;
925 925
926 return (flag) ? 2 : 1; 926 return (flag) ? 2 : 1;
927 } 927 }
928 928
929 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */ 929 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
930 static u64 get_error_address(struct mce *m) 930 static u64 get_error_address(struct mce *m)
931 { 931 {
932 struct cpuinfo_x86 *c = &boot_cpu_data; 932 struct cpuinfo_x86 *c = &boot_cpu_data;
933 u64 addr; 933 u64 addr;
934 u8 start_bit = 1; 934 u8 start_bit = 1;
935 u8 end_bit = 47; 935 u8 end_bit = 47;
936 936
937 if (c->x86 == 0xf) { 937 if (c->x86 == 0xf) {
938 start_bit = 3; 938 start_bit = 3;
939 end_bit = 39; 939 end_bit = 39;
940 } 940 }
941 941
942 addr = m->addr & GENMASK(start_bit, end_bit); 942 addr = m->addr & GENMASK(start_bit, end_bit);
943 943
944 /* 944 /*
945 * Erratum 637 workaround 945 * Erratum 637 workaround
946 */ 946 */
947 if (c->x86 == 0x15) { 947 if (c->x86 == 0x15) {
948 struct amd64_pvt *pvt; 948 struct amd64_pvt *pvt;
949 u64 cc6_base, tmp_addr; 949 u64 cc6_base, tmp_addr;
950 u32 tmp; 950 u32 tmp;
951 u8 mce_nid, intlv_en; 951 u8 mce_nid, intlv_en;
952 952
953 if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7) 953 if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
954 return addr; 954 return addr;
955 955
956 mce_nid = amd_get_nb_id(m->extcpu); 956 mce_nid = amd_get_nb_id(m->extcpu);
957 pvt = mcis[mce_nid]->pvt_info; 957 pvt = mcis[mce_nid]->pvt_info;
958 958
959 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp); 959 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
960 intlv_en = tmp >> 21 & 0x7; 960 intlv_en = tmp >> 21 & 0x7;
961 961
962 /* add [47:27] + 3 trailing bits */ 962 /* add [47:27] + 3 trailing bits */
963 cc6_base = (tmp & GENMASK(0, 20)) << 3; 963 cc6_base = (tmp & GENMASK(0, 20)) << 3;
964 964
965 /* reverse and add DramIntlvEn */ 965 /* reverse and add DramIntlvEn */
966 cc6_base |= intlv_en ^ 0x7; 966 cc6_base |= intlv_en ^ 0x7;
967 967
968 /* pin at [47:24] */ 968 /* pin at [47:24] */
969 cc6_base <<= 24; 969 cc6_base <<= 24;
970 970
971 if (!intlv_en) 971 if (!intlv_en)
972 return cc6_base | (addr & GENMASK(0, 23)); 972 return cc6_base | (addr & GENMASK(0, 23));
973 973
974 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp); 974 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
975 975
976 /* faster log2 */ 976 /* faster log2 */
977 tmp_addr = (addr & GENMASK(12, 23)) << __fls(intlv_en + 1); 977 tmp_addr = (addr & GENMASK(12, 23)) << __fls(intlv_en + 1);
978 978
979 /* OR DramIntlvSel into bits [14:12] */ 979 /* OR DramIntlvSel into bits [14:12] */
980 tmp_addr |= (tmp & GENMASK(21, 23)) >> 9; 980 tmp_addr |= (tmp & GENMASK(21, 23)) >> 9;
981 981
982 /* add remaining [11:0] bits from original MC4_ADDR */ 982 /* add remaining [11:0] bits from original MC4_ADDR */
983 tmp_addr |= addr & GENMASK(0, 11); 983 tmp_addr |= addr & GENMASK(0, 11);
984 984
985 return cc6_base | tmp_addr; 985 return cc6_base | tmp_addr;
986 } 986 }
987 987
988 return addr; 988 return addr;
989 } 989 }
990 990
991 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range) 991 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
992 { 992 {
993 struct cpuinfo_x86 *c = &boot_cpu_data; 993 struct cpuinfo_x86 *c = &boot_cpu_data;
994 int off = range << 3; 994 int off = range << 3;
995 995
996 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo); 996 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
997 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo); 997 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
998 998
999 if (c->x86 == 0xf) 999 if (c->x86 == 0xf)
1000 return; 1000 return;
1001 1001
1002 if (!dram_rw(pvt, range)) 1002 if (!dram_rw(pvt, range))
1003 return; 1003 return;
1004 1004
1005 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi); 1005 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1006 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi); 1006 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1007 1007
1008 /* Factor in CC6 save area by reading dst node's limit reg */ 1008 /* Factor in CC6 save area by reading dst node's limit reg */
1009 if (c->x86 == 0x15) { 1009 if (c->x86 == 0x15) {
1010 struct pci_dev *f1 = NULL; 1010 struct pci_dev *f1 = NULL;
1011 u8 nid = dram_dst_node(pvt, range); 1011 u8 nid = dram_dst_node(pvt, range);
1012 u32 llim; 1012 u32 llim;
1013 1013
1014 f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1)); 1014 f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1));
1015 if (WARN_ON(!f1)) 1015 if (WARN_ON(!f1))
1016 return; 1016 return;
1017 1017
1018 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim); 1018 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1019 1019
1020 pvt->ranges[range].lim.lo &= GENMASK(0, 15); 1020 pvt->ranges[range].lim.lo &= GENMASK(0, 15);
1021 1021
1022 /* {[39:27],111b} */ 1022 /* {[39:27],111b} */
1023 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16; 1023 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1024 1024
1025 pvt->ranges[range].lim.hi &= GENMASK(0, 7); 1025 pvt->ranges[range].lim.hi &= GENMASK(0, 7);
1026 1026
1027 /* [47:40] */ 1027 /* [47:40] */
1028 pvt->ranges[range].lim.hi |= llim >> 13; 1028 pvt->ranges[range].lim.hi |= llim >> 13;
1029 1029
1030 pci_dev_put(f1); 1030 pci_dev_put(f1);
1031 } 1031 }
1032 } 1032 }
1033 1033
1034 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, 1034 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1035 u16 syndrome) 1035 u16 syndrome)
1036 { 1036 {
1037 struct mem_ctl_info *src_mci; 1037 struct mem_ctl_info *src_mci;
1038 struct amd64_pvt *pvt = mci->pvt_info; 1038 struct amd64_pvt *pvt = mci->pvt_info;
1039 int channel, csrow; 1039 int channel, csrow;
1040 u32 page, offset; 1040 u32 page, offset;
1041 1041
1042 error_address_to_page_and_offset(sys_addr, &page, &offset); 1042 error_address_to_page_and_offset(sys_addr, &page, &offset);
1043 1043
1044 /* 1044 /*
1045 * Find out which node the error address belongs to. This may be 1045 * Find out which node the error address belongs to. This may be
1046 * different from the node that detected the error. 1046 * different from the node that detected the error.
1047 */ 1047 */
1048 src_mci = find_mc_by_sys_addr(mci, sys_addr); 1048 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1049 if (!src_mci) { 1049 if (!src_mci) {
1050 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n", 1050 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1051 (unsigned long)sys_addr); 1051 (unsigned long)sys_addr);
1052 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1052 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1053 page, offset, syndrome, 1053 page, offset, syndrome,
1054 -1, -1, -1, 1054 -1, -1, -1,
1055 EDAC_MOD_STR, 1055 EDAC_MOD_STR,
1056 "failed to map error addr to a node", 1056 "failed to map error addr to a node",
1057 NULL); 1057 NULL);
1058 return; 1058 return;
1059 } 1059 }
1060 1060
1061 /* Now map the sys_addr to a CSROW */ 1061 /* Now map the sys_addr to a CSROW */
1062 csrow = sys_addr_to_csrow(src_mci, sys_addr); 1062 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1063 if (csrow < 0) { 1063 if (csrow < 0) {
1064 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1064 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1065 page, offset, syndrome, 1065 page, offset, syndrome,
1066 -1, -1, -1, 1066 -1, -1, -1,
1067 EDAC_MOD_STR, 1067 EDAC_MOD_STR,
1068 "failed to map error addr to a csrow", 1068 "failed to map error addr to a csrow",
1069 NULL); 1069 NULL);
1070 return; 1070 return;
1071 } 1071 }
1072 1072
1073 /* CHIPKILL enabled */ 1073 /* CHIPKILL enabled */
1074 if (pvt->nbcfg & NBCFG_CHIPKILL) { 1074 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1075 channel = get_channel_from_ecc_syndrome(mci, syndrome); 1075 channel = get_channel_from_ecc_syndrome(mci, syndrome);
1076 if (channel < 0) { 1076 if (channel < 0) {
1077 /* 1077 /*
1078 * Syndrome didn't map, so we don't know which of the 1078 * Syndrome didn't map, so we don't know which of the
1079 * 2 DIMMs is in error. So we need to ID 'both' of them 1079 * 2 DIMMs is in error. So we need to ID 'both' of them
1080 * as suspect. 1080 * as suspect.
1081 */ 1081 */
1082 amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - " 1082 amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - "
1083 "possible error reporting race\n", 1083 "possible error reporting race\n",
1084 syndrome); 1084 syndrome);
1085 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1085 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1086 page, offset, syndrome, 1086 page, offset, syndrome,
1087 csrow, -1, -1, 1087 csrow, -1, -1,
1088 EDAC_MOD_STR, 1088 EDAC_MOD_STR,
1089 "unknown syndrome - possible error reporting race", 1089 "unknown syndrome - possible error reporting race",
1090 NULL); 1090 NULL);
1091 return; 1091 return;
1092 } 1092 }
1093 } else { 1093 } else {
1094 /* 1094 /*
1095 * non-chipkill ecc mode 1095 * non-chipkill ecc mode
1096 * 1096 *
1097 * The k8 documentation is unclear about how to determine the 1097 * The k8 documentation is unclear about how to determine the
1098 * channel number when using non-chipkill memory. This method 1098 * channel number when using non-chipkill memory. This method
1099 * was obtained from email communication with someone at AMD. 1099 * was obtained from email communication with someone at AMD.
1100 * (Wish the email was placed in this comment - norsk) 1100 * (Wish the email was placed in this comment - norsk)
1101 */ 1101 */
1102 channel = ((sys_addr & BIT(3)) != 0); 1102 channel = ((sys_addr & BIT(3)) != 0);
1103 } 1103 }
1104 1104
1105 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci, 1105 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci,
1106 page, offset, syndrome, 1106 page, offset, syndrome,
1107 csrow, channel, -1, 1107 csrow, channel, -1,
1108 EDAC_MOD_STR, "", NULL); 1108 EDAC_MOD_STR, "", NULL);
1109 } 1109 }
1110 1110
1111 static int ddr2_cs_size(unsigned i, bool dct_width) 1111 static int ddr2_cs_size(unsigned i, bool dct_width)
1112 { 1112 {
1113 unsigned shift = 0; 1113 unsigned shift = 0;
1114 1114
1115 if (i <= 2) 1115 if (i <= 2)
1116 shift = i; 1116 shift = i;
1117 else if (!(i & 0x1)) 1117 else if (!(i & 0x1))
1118 shift = i >> 1; 1118 shift = i >> 1;
1119 else 1119 else
1120 shift = (i + 1) >> 1; 1120 shift = (i + 1) >> 1;
1121 1121
1122 return 128 << (shift + !!dct_width); 1122 return 128 << (shift + !!dct_width);
1123 } 1123 }
1124 1124
1125 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, 1125 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1126 unsigned cs_mode) 1126 unsigned cs_mode)
1127 { 1127 {
1128 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; 1128 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1129 1129
1130 if (pvt->ext_model >= K8_REV_F) { 1130 if (pvt->ext_model >= K8_REV_F) {
1131 WARN_ON(cs_mode > 11); 1131 WARN_ON(cs_mode > 11);
1132 return ddr2_cs_size(cs_mode, dclr & WIDTH_128); 1132 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1133 } 1133 }
1134 else if (pvt->ext_model >= K8_REV_D) { 1134 else if (pvt->ext_model >= K8_REV_D) {
1135 unsigned diff; 1135 unsigned diff;
1136 WARN_ON(cs_mode > 10); 1136 WARN_ON(cs_mode > 10);
1137 1137
1138 /* 1138 /*
1139 * the below calculation, besides trying to win an obfuscated C 1139 * the below calculation, besides trying to win an obfuscated C
1140 * contest, maps cs_mode values to DIMM chip select sizes. The 1140 * contest, maps cs_mode values to DIMM chip select sizes. The
1141 * mappings are: 1141 * mappings are:
1142 * 1142 *
1143 * cs_mode CS size (mb) 1143 * cs_mode CS size (mb)
1144 * ======= ============ 1144 * ======= ============
1145 * 0 32 1145 * 0 32
1146 * 1 64 1146 * 1 64
1147 * 2 128 1147 * 2 128
1148 * 3 128 1148 * 3 128
1149 * 4 256 1149 * 4 256
1150 * 5 512 1150 * 5 512
1151 * 6 256 1151 * 6 256
1152 * 7 512 1152 * 7 512
1153 * 8 1024 1153 * 8 1024
1154 * 9 1024 1154 * 9 1024
1155 * 10 2048 1155 * 10 2048
1156 * 1156 *
1157 * Basically, it calculates a value with which to shift the 1157 * Basically, it calculates a value with which to shift the
1158 * smallest CS size of 32MB. 1158 * smallest CS size of 32MB.
1159 * 1159 *
1160 * ddr[23]_cs_size have a similar purpose. 1160 * ddr[23]_cs_size have a similar purpose.
1161 */ 1161 */
1162 diff = cs_mode/3 + (unsigned)(cs_mode > 5); 1162 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1163 1163
1164 return 32 << (cs_mode - diff); 1164 return 32 << (cs_mode - diff);
1165 } 1165 }
1166 else { 1166 else {
1167 WARN_ON(cs_mode > 6); 1167 WARN_ON(cs_mode > 6);
1168 return 32 << cs_mode; 1168 return 32 << cs_mode;
1169 } 1169 }
1170 } 1170 }
1171 1171
1172 /* 1172 /*
1173 * Get the number of DCT channels in use. 1173 * Get the number of DCT channels in use.
1174 * 1174 *
1175 * Return: 1175 * Return:
1176 * number of Memory Channels in operation 1176 * number of Memory Channels in operation
1177 * Pass back: 1177 * Pass back:
1178 * contents of the DCL0_LOW register 1178 * contents of the DCL0_LOW register
1179 */ 1179 */
1180 static int f1x_early_channel_count(struct amd64_pvt *pvt) 1180 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1181 { 1181 {
1182 int i, j, channels = 0; 1182 int i, j, channels = 0;
1183 1183
1184 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */ 1184 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1185 if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128)) 1185 if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
1186 return 2; 1186 return 2;
1187 1187
1188 /* 1188 /*
1189 * Need to check if in unganged mode: In such, there are 2 channels, 1189 * Need to check if in unganged mode: In such, there are 2 channels,
1190 * but they are not in 128 bit mode and thus the above 'dclr0' status 1190 * but they are not in 128 bit mode and thus the above 'dclr0' status
1191 * bit will be OFF. 1191 * bit will be OFF.
1192 * 1192 *
1193 * Need to check DCT0[0] and DCT1[0] to see if only one of them has 1193 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1194 * their CSEnable bit on. If so, then SINGLE DIMM case. 1194 * their CSEnable bit on. If so, then SINGLE DIMM case.
1195 */ 1195 */
1196 debugf0("Data width is not 128 bits - need more decoding\n"); 1196 debugf0("Data width is not 128 bits - need more decoding\n");
1197 1197
1198 /* 1198 /*
1199 * Check DRAM Bank Address Mapping values for each DIMM to see if there 1199 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1200 * is more than just one DIMM present in unganged mode. Need to check 1200 * is more than just one DIMM present in unganged mode. Need to check
1201 * both controllers since DIMMs can be placed in either one. 1201 * both controllers since DIMMs can be placed in either one.
1202 */ 1202 */
1203 for (i = 0; i < 2; i++) { 1203 for (i = 0; i < 2; i++) {
1204 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0); 1204 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1205 1205
1206 for (j = 0; j < 4; j++) { 1206 for (j = 0; j < 4; j++) {
1207 if (DBAM_DIMM(j, dbam) > 0) { 1207 if (DBAM_DIMM(j, dbam) > 0) {
1208 channels++; 1208 channels++;
1209 break; 1209 break;
1210 } 1210 }
1211 } 1211 }
1212 } 1212 }
1213 1213
1214 if (channels > 2) 1214 if (channels > 2)
1215 channels = 2; 1215 channels = 2;
1216 1216
1217 amd64_info("MCT channel count: %d\n", channels); 1217 amd64_info("MCT channel count: %d\n", channels);
1218 1218
1219 return channels; 1219 return channels;
1220 } 1220 }
1221 1221
1222 static int ddr3_cs_size(unsigned i, bool dct_width) 1222 static int ddr3_cs_size(unsigned i, bool dct_width)
1223 { 1223 {
1224 unsigned shift = 0; 1224 unsigned shift = 0;
1225 int cs_size = 0; 1225 int cs_size = 0;
1226 1226
1227 if (i == 0 || i == 3 || i == 4) 1227 if (i == 0 || i == 3 || i == 4)
1228 cs_size = -1; 1228 cs_size = -1;
1229 else if (i <= 2) 1229 else if (i <= 2)
1230 shift = i; 1230 shift = i;
1231 else if (i == 12) 1231 else if (i == 12)
1232 shift = 7; 1232 shift = 7;
1233 else if (!(i & 0x1)) 1233 else if (!(i & 0x1))
1234 shift = i >> 1; 1234 shift = i >> 1;
1235 else 1235 else
1236 shift = (i + 1) >> 1; 1236 shift = (i + 1) >> 1;
1237 1237
1238 if (cs_size != -1) 1238 if (cs_size != -1)
1239 cs_size = (128 * (1 << !!dct_width)) << shift; 1239 cs_size = (128 * (1 << !!dct_width)) << shift;
1240 1240
1241 return cs_size; 1241 return cs_size;
1242 } 1242 }
1243 1243
1244 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, 1244 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1245 unsigned cs_mode) 1245 unsigned cs_mode)
1246 { 1246 {
1247 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; 1247 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1248 1248
1249 WARN_ON(cs_mode > 11); 1249 WARN_ON(cs_mode > 11);
1250 1250
1251 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) 1251 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1252 return ddr3_cs_size(cs_mode, dclr & WIDTH_128); 1252 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1253 else 1253 else
1254 return ddr2_cs_size(cs_mode, dclr & WIDTH_128); 1254 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1255 } 1255 }
1256 1256
1257 /* 1257 /*
1258 * F15h supports only 64bit DCT interfaces 1258 * F15h supports only 64bit DCT interfaces
1259 */ 1259 */
1260 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, 1260 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1261 unsigned cs_mode) 1261 unsigned cs_mode)
1262 { 1262 {
1263 WARN_ON(cs_mode > 12); 1263 WARN_ON(cs_mode > 12);
1264 1264
1265 return ddr3_cs_size(cs_mode, false); 1265 return ddr3_cs_size(cs_mode, false);
1266 } 1266 }
1267 1267
1268 static void read_dram_ctl_register(struct amd64_pvt *pvt) 1268 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1269 { 1269 {
1270 1270
1271 if (boot_cpu_data.x86 == 0xf) 1271 if (boot_cpu_data.x86 == 0xf)
1272 return; 1272 return;
1273 1273
1274 if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) { 1274 if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1275 debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n", 1275 debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1276 pvt->dct_sel_lo, dct_sel_baseaddr(pvt)); 1276 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1277 1277
1278 debugf0(" DCTs operate in %s mode.\n", 1278 debugf0(" DCTs operate in %s mode.\n",
1279 (dct_ganging_enabled(pvt) ? "ganged" : "unganged")); 1279 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1280 1280
1281 if (!dct_ganging_enabled(pvt)) 1281 if (!dct_ganging_enabled(pvt))
1282 debugf0(" Address range split per DCT: %s\n", 1282 debugf0(" Address range split per DCT: %s\n",
1283 (dct_high_range_enabled(pvt) ? "yes" : "no")); 1283 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1284 1284
1285 debugf0(" data interleave for ECC: %s, " 1285 debugf0(" data interleave for ECC: %s, "
1286 "DRAM cleared since last warm reset: %s\n", 1286 "DRAM cleared since last warm reset: %s\n",
1287 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), 1287 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1288 (dct_memory_cleared(pvt) ? "yes" : "no")); 1288 (dct_memory_cleared(pvt) ? "yes" : "no"));
1289 1289
1290 debugf0(" channel interleave: %s, " 1290 debugf0(" channel interleave: %s, "
1291 "interleave bits selector: 0x%x\n", 1291 "interleave bits selector: 0x%x\n",
1292 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), 1292 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1293 dct_sel_interleave_addr(pvt)); 1293 dct_sel_interleave_addr(pvt));
1294 } 1294 }
1295 1295
1296 amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi); 1296 amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
1297 } 1297 }
1298 1298
1299 /* 1299 /*
1300 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory 1300 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1301 * Interleaving Modes. 1301 * Interleaving Modes.
1302 */ 1302 */
1303 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, 1303 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1304 bool hi_range_sel, u8 intlv_en) 1304 bool hi_range_sel, u8 intlv_en)
1305 { 1305 {
1306 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1; 1306 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1307 1307
1308 if (dct_ganging_enabled(pvt)) 1308 if (dct_ganging_enabled(pvt))
1309 return 0; 1309 return 0;
1310 1310
1311 if (hi_range_sel) 1311 if (hi_range_sel)
1312 return dct_sel_high; 1312 return dct_sel_high;
1313 1313
1314 /* 1314 /*
1315 * see F2x110[DctSelIntLvAddr] - channel interleave mode 1315 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1316 */ 1316 */
1317 if (dct_interleave_enabled(pvt)) { 1317 if (dct_interleave_enabled(pvt)) {
1318 u8 intlv_addr = dct_sel_interleave_addr(pvt); 1318 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1319 1319
1320 /* return DCT select function: 0=DCT0, 1=DCT1 */ 1320 /* return DCT select function: 0=DCT0, 1=DCT1 */
1321 if (!intlv_addr) 1321 if (!intlv_addr)
1322 return sys_addr >> 6 & 1; 1322 return sys_addr >> 6 & 1;
1323 1323
1324 if (intlv_addr & 0x2) { 1324 if (intlv_addr & 0x2) {
1325 u8 shift = intlv_addr & 0x1 ? 9 : 6; 1325 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1326 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2; 1326 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1327 1327
1328 return ((sys_addr >> shift) & 1) ^ temp; 1328 return ((sys_addr >> shift) & 1) ^ temp;
1329 } 1329 }
1330 1330
1331 return (sys_addr >> (12 + hweight8(intlv_en))) & 1; 1331 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1332 } 1332 }
1333 1333
1334 if (dct_high_range_enabled(pvt)) 1334 if (dct_high_range_enabled(pvt))
1335 return ~dct_sel_high & 1; 1335 return ~dct_sel_high & 1;
1336 1336
1337 return 0; 1337 return 0;
1338 } 1338 }
1339 1339
1340 /* Convert the sys_addr to the normalized DCT address */ 1340 /* Convert the sys_addr to the normalized DCT address */
1341 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range, 1341 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
1342 u64 sys_addr, bool hi_rng, 1342 u64 sys_addr, bool hi_rng,
1343 u32 dct_sel_base_addr) 1343 u32 dct_sel_base_addr)
1344 { 1344 {
1345 u64 chan_off; 1345 u64 chan_off;
1346 u64 dram_base = get_dram_base(pvt, range); 1346 u64 dram_base = get_dram_base(pvt, range);
1347 u64 hole_off = f10_dhar_offset(pvt); 1347 u64 hole_off = f10_dhar_offset(pvt);
1348 u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16; 1348 u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1349 1349
1350 if (hi_rng) { 1350 if (hi_rng) {
1351 /* 1351 /*
1352 * if 1352 * if
1353 * base address of high range is below 4Gb 1353 * base address of high range is below 4Gb
1354 * (bits [47:27] at [31:11]) 1354 * (bits [47:27] at [31:11])
1355 * DRAM address space on this DCT is hoisted above 4Gb && 1355 * DRAM address space on this DCT is hoisted above 4Gb &&
1356 * sys_addr > 4Gb 1356 * sys_addr > 4Gb
1357 * 1357 *
1358 * remove hole offset from sys_addr 1358 * remove hole offset from sys_addr
1359 * else 1359 * else
1360 * remove high range offset from sys_addr 1360 * remove high range offset from sys_addr
1361 */ 1361 */
1362 if ((!(dct_sel_base_addr >> 16) || 1362 if ((!(dct_sel_base_addr >> 16) ||
1363 dct_sel_base_addr < dhar_base(pvt)) && 1363 dct_sel_base_addr < dhar_base(pvt)) &&
1364 dhar_valid(pvt) && 1364 dhar_valid(pvt) &&
1365 (sys_addr >= BIT_64(32))) 1365 (sys_addr >= BIT_64(32)))
1366 chan_off = hole_off; 1366 chan_off = hole_off;
1367 else 1367 else
1368 chan_off = dct_sel_base_off; 1368 chan_off = dct_sel_base_off;
1369 } else { 1369 } else {
1370 /* 1370 /*
1371 * if 1371 * if
1372 * we have a valid hole && 1372 * we have a valid hole &&
1373 * sys_addr > 4Gb 1373 * sys_addr > 4Gb
1374 * 1374 *
1375 * remove hole 1375 * remove hole
1376 * else 1376 * else
1377 * remove dram base to normalize to DCT address 1377 * remove dram base to normalize to DCT address
1378 */ 1378 */
1379 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32))) 1379 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1380 chan_off = hole_off; 1380 chan_off = hole_off;
1381 else 1381 else
1382 chan_off = dram_base; 1382 chan_off = dram_base;
1383 } 1383 }
1384 1384
1385 return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47)); 1385 return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
1386 } 1386 }
1387 1387
1388 /* 1388 /*
1389 * checks if the csrow passed in is marked as SPARED, if so returns the new 1389 * checks if the csrow passed in is marked as SPARED, if so returns the new
1390 * spare row 1390 * spare row
1391 */ 1391 */
1392 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow) 1392 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1393 { 1393 {
1394 int tmp_cs; 1394 int tmp_cs;
1395 1395
1396 if (online_spare_swap_done(pvt, dct) && 1396 if (online_spare_swap_done(pvt, dct) &&
1397 csrow == online_spare_bad_dramcs(pvt, dct)) { 1397 csrow == online_spare_bad_dramcs(pvt, dct)) {
1398 1398
1399 for_each_chip_select(tmp_cs, dct, pvt) { 1399 for_each_chip_select(tmp_cs, dct, pvt) {
1400 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) { 1400 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1401 csrow = tmp_cs; 1401 csrow = tmp_cs;
1402 break; 1402 break;
1403 } 1403 }
1404 } 1404 }
1405 } 1405 }
1406 return csrow; 1406 return csrow;
1407 } 1407 }
1408 1408
1409 /* 1409 /*
1410 * Iterate over the DRAM DCT "base" and "mask" registers looking for a 1410 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1411 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID' 1411 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1412 * 1412 *
1413 * Return: 1413 * Return:
1414 * -EINVAL: NOT FOUND 1414 * -EINVAL: NOT FOUND
1415 * 0..csrow = Chip-Select Row 1415 * 0..csrow = Chip-Select Row
1416 */ 1416 */
1417 static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct) 1417 static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
1418 { 1418 {
1419 struct mem_ctl_info *mci; 1419 struct mem_ctl_info *mci;
1420 struct amd64_pvt *pvt; 1420 struct amd64_pvt *pvt;
1421 u64 cs_base, cs_mask; 1421 u64 cs_base, cs_mask;
1422 int cs_found = -EINVAL; 1422 int cs_found = -EINVAL;
1423 int csrow; 1423 int csrow;
1424 1424
1425 mci = mcis[nid]; 1425 mci = mcis[nid];
1426 if (!mci) 1426 if (!mci)
1427 return cs_found; 1427 return cs_found;
1428 1428
1429 pvt = mci->pvt_info; 1429 pvt = mci->pvt_info;
1430 1430
1431 debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct); 1431 debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1432 1432
1433 for_each_chip_select(csrow, dct, pvt) { 1433 for_each_chip_select(csrow, dct, pvt) {
1434 if (!csrow_enabled(csrow, dct, pvt)) 1434 if (!csrow_enabled(csrow, dct, pvt))
1435 continue; 1435 continue;
1436 1436
1437 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask); 1437 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1438 1438
1439 debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n", 1439 debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1440 csrow, cs_base, cs_mask); 1440 csrow, cs_base, cs_mask);
1441 1441
1442 cs_mask = ~cs_mask; 1442 cs_mask = ~cs_mask;
1443 1443
1444 debugf1(" (InputAddr & ~CSMask)=0x%llx " 1444 debugf1(" (InputAddr & ~CSMask)=0x%llx "
1445 "(CSBase & ~CSMask)=0x%llx\n", 1445 "(CSBase & ~CSMask)=0x%llx\n",
1446 (in_addr & cs_mask), (cs_base & cs_mask)); 1446 (in_addr & cs_mask), (cs_base & cs_mask));
1447 1447
1448 if ((in_addr & cs_mask) == (cs_base & cs_mask)) { 1448 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1449 cs_found = f10_process_possible_spare(pvt, dct, csrow); 1449 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1450 1450
1451 debugf1(" MATCH csrow=%d\n", cs_found); 1451 debugf1(" MATCH csrow=%d\n", cs_found);
1452 break; 1452 break;
1453 } 1453 }
1454 } 1454 }
1455 return cs_found; 1455 return cs_found;
1456 } 1456 }
1457 1457
1458 /* 1458 /*
1459 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is 1459 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1460 * swapped with a region located at the bottom of memory so that the GPU can use 1460 * swapped with a region located at the bottom of memory so that the GPU can use
1461 * the interleaved region and thus two channels. 1461 * the interleaved region and thus two channels.
1462 */ 1462 */
1463 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr) 1463 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1464 { 1464 {
1465 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr; 1465 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1466 1466
1467 if (boot_cpu_data.x86 == 0x10) { 1467 if (boot_cpu_data.x86 == 0x10) {
1468 /* only revC3 and revE have that feature */ 1468 /* only revC3 and revE have that feature */
1469 if (boot_cpu_data.x86_model < 4 || 1469 if (boot_cpu_data.x86_model < 4 ||
1470 (boot_cpu_data.x86_model < 0xa && 1470 (boot_cpu_data.x86_model < 0xa &&
1471 boot_cpu_data.x86_mask < 3)) 1471 boot_cpu_data.x86_mask < 3))
1472 return sys_addr; 1472 return sys_addr;
1473 } 1473 }
1474 1474
1475 amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg); 1475 amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
1476 1476
1477 if (!(swap_reg & 0x1)) 1477 if (!(swap_reg & 0x1))
1478 return sys_addr; 1478 return sys_addr;
1479 1479
1480 swap_base = (swap_reg >> 3) & 0x7f; 1480 swap_base = (swap_reg >> 3) & 0x7f;
1481 swap_limit = (swap_reg >> 11) & 0x7f; 1481 swap_limit = (swap_reg >> 11) & 0x7f;
1482 rgn_size = (swap_reg >> 20) & 0x7f; 1482 rgn_size = (swap_reg >> 20) & 0x7f;
1483 tmp_addr = sys_addr >> 27; 1483 tmp_addr = sys_addr >> 27;
1484 1484
1485 if (!(sys_addr >> 34) && 1485 if (!(sys_addr >> 34) &&
1486 (((tmp_addr >= swap_base) && 1486 (((tmp_addr >= swap_base) &&
1487 (tmp_addr <= swap_limit)) || 1487 (tmp_addr <= swap_limit)) ||
1488 (tmp_addr < rgn_size))) 1488 (tmp_addr < rgn_size)))
1489 return sys_addr ^ (u64)swap_base << 27; 1489 return sys_addr ^ (u64)swap_base << 27;
1490 1490
1491 return sys_addr; 1491 return sys_addr;
1492 } 1492 }
1493 1493
1494 /* For a given @dram_range, check if @sys_addr falls within it. */ 1494 /* For a given @dram_range, check if @sys_addr falls within it. */
1495 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range, 1495 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1496 u64 sys_addr, int *nid, int *chan_sel) 1496 u64 sys_addr, int *nid, int *chan_sel)
1497 { 1497 {
1498 int cs_found = -EINVAL; 1498 int cs_found = -EINVAL;
1499 u64 chan_addr; 1499 u64 chan_addr;
1500 u32 dct_sel_base; 1500 u32 dct_sel_base;
1501 u8 channel; 1501 u8 channel;
1502 bool high_range = false; 1502 bool high_range = false;
1503 1503
1504 u8 node_id = dram_dst_node(pvt, range); 1504 u8 node_id = dram_dst_node(pvt, range);
1505 u8 intlv_en = dram_intlv_en(pvt, range); 1505 u8 intlv_en = dram_intlv_en(pvt, range);
1506 u32 intlv_sel = dram_intlv_sel(pvt, range); 1506 u32 intlv_sel = dram_intlv_sel(pvt, range);
1507 1507
1508 debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n", 1508 debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1509 range, sys_addr, get_dram_limit(pvt, range)); 1509 range, sys_addr, get_dram_limit(pvt, range));
1510 1510
1511 if (dhar_valid(pvt) && 1511 if (dhar_valid(pvt) &&
1512 dhar_base(pvt) <= sys_addr && 1512 dhar_base(pvt) <= sys_addr &&
1513 sys_addr < BIT_64(32)) { 1513 sys_addr < BIT_64(32)) {
1514 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n", 1514 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1515 sys_addr); 1515 sys_addr);
1516 return -EINVAL; 1516 return -EINVAL;
1517 } 1517 }
1518 1518
1519 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en))) 1519 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1520 return -EINVAL; 1520 return -EINVAL;
1521 1521
1522 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr); 1522 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1523 1523
1524 dct_sel_base = dct_sel_baseaddr(pvt); 1524 dct_sel_base = dct_sel_baseaddr(pvt);
1525 1525
1526 /* 1526 /*
1527 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to 1527 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1528 * select between DCT0 and DCT1. 1528 * select between DCT0 and DCT1.
1529 */ 1529 */
1530 if (dct_high_range_enabled(pvt) && 1530 if (dct_high_range_enabled(pvt) &&
1531 !dct_ganging_enabled(pvt) && 1531 !dct_ganging_enabled(pvt) &&
1532 ((sys_addr >> 27) >= (dct_sel_base >> 11))) 1532 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1533 high_range = true; 1533 high_range = true;
1534 1534
1535 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en); 1535 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1536 1536
1537 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr, 1537 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1538 high_range, dct_sel_base); 1538 high_range, dct_sel_base);
1539 1539
1540 /* Remove node interleaving, see F1x120 */ 1540 /* Remove node interleaving, see F1x120 */
1541 if (intlv_en) 1541 if (intlv_en)
1542 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) | 1542 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1543 (chan_addr & 0xfff); 1543 (chan_addr & 0xfff);
1544 1544
1545 /* remove channel interleave */ 1545 /* remove channel interleave */
1546 if (dct_interleave_enabled(pvt) && 1546 if (dct_interleave_enabled(pvt) &&
1547 !dct_high_range_enabled(pvt) && 1547 !dct_high_range_enabled(pvt) &&
1548 !dct_ganging_enabled(pvt)) { 1548 !dct_ganging_enabled(pvt)) {
1549 1549
1550 if (dct_sel_interleave_addr(pvt) != 1) { 1550 if (dct_sel_interleave_addr(pvt) != 1) {
1551 if (dct_sel_interleave_addr(pvt) == 0x3) 1551 if (dct_sel_interleave_addr(pvt) == 0x3)
1552 /* hash 9 */ 1552 /* hash 9 */
1553 chan_addr = ((chan_addr >> 10) << 9) | 1553 chan_addr = ((chan_addr >> 10) << 9) |
1554 (chan_addr & 0x1ff); 1554 (chan_addr & 0x1ff);
1555 else 1555 else
1556 /* A[6] or hash 6 */ 1556 /* A[6] or hash 6 */
1557 chan_addr = ((chan_addr >> 7) << 6) | 1557 chan_addr = ((chan_addr >> 7) << 6) |
1558 (chan_addr & 0x3f); 1558 (chan_addr & 0x3f);
1559 } else 1559 } else
1560 /* A[12] */ 1560 /* A[12] */
1561 chan_addr = ((chan_addr >> 13) << 12) | 1561 chan_addr = ((chan_addr >> 13) << 12) |
1562 (chan_addr & 0xfff); 1562 (chan_addr & 0xfff);
1563 } 1563 }
1564 1564
1565 debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr); 1565 debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr);
1566 1566
1567 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel); 1567 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1568 1568
1569 if (cs_found >= 0) { 1569 if (cs_found >= 0) {
1570 *nid = node_id; 1570 *nid = node_id;
1571 *chan_sel = channel; 1571 *chan_sel = channel;
1572 } 1572 }
1573 return cs_found; 1573 return cs_found;
1574 } 1574 }
1575 1575
1576 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, 1576 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1577 int *node, int *chan_sel) 1577 int *node, int *chan_sel)
1578 { 1578 {
1579 int cs_found = -EINVAL; 1579 int cs_found = -EINVAL;
1580 unsigned range; 1580 unsigned range;
1581 1581
1582 for (range = 0; range < DRAM_RANGES; range++) { 1582 for (range = 0; range < DRAM_RANGES; range++) {
1583 1583
1584 if (!dram_rw(pvt, range)) 1584 if (!dram_rw(pvt, range))
1585 continue; 1585 continue;
1586 1586
1587 if ((get_dram_base(pvt, range) <= sys_addr) && 1587 if ((get_dram_base(pvt, range) <= sys_addr) &&
1588 (get_dram_limit(pvt, range) >= sys_addr)) { 1588 (get_dram_limit(pvt, range) >= sys_addr)) {
1589 1589
1590 cs_found = f1x_match_to_this_node(pvt, range, 1590 cs_found = f1x_match_to_this_node(pvt, range,
1591 sys_addr, node, 1591 sys_addr, node,
1592 chan_sel); 1592 chan_sel);
1593 if (cs_found >= 0) 1593 if (cs_found >= 0)
1594 break; 1594 break;
1595 } 1595 }
1596 } 1596 }
1597 return cs_found; 1597 return cs_found;
1598 } 1598 }
1599 1599
1600 /* 1600 /*
1601 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps 1601 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1602 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW). 1602 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1603 * 1603 *
1604 * The @sys_addr is usually an error address received from the hardware 1604 * The @sys_addr is usually an error address received from the hardware
1605 * (MCX_ADDR). 1605 * (MCX_ADDR).
1606 */ 1606 */
1607 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, 1607 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1608 u16 syndrome) 1608 u16 syndrome)
1609 { 1609 {
1610 struct amd64_pvt *pvt = mci->pvt_info; 1610 struct amd64_pvt *pvt = mci->pvt_info;
1611 u32 page, offset; 1611 u32 page, offset;
1612 int nid, csrow, chan = 0; 1612 int nid, csrow, chan = 0;
1613 1613
1614 error_address_to_page_and_offset(sys_addr, &page, &offset); 1614 error_address_to_page_and_offset(sys_addr, &page, &offset);
1615 1615
1616 csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); 1616 csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1617 1617
1618 if (csrow < 0) { 1618 if (csrow < 0) {
1619 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1619 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1620 page, offset, syndrome, 1620 page, offset, syndrome,
1621 -1, -1, -1, 1621 -1, -1, -1,
1622 EDAC_MOD_STR, 1622 EDAC_MOD_STR,
1623 "failed to map error addr to a csrow", 1623 "failed to map error addr to a csrow",
1624 NULL); 1624 NULL);
1625 return; 1625 return;
1626 } 1626 }
1627 1627
1628 /* 1628 /*
1629 * We need the syndromes for channel detection only when we're 1629 * We need the syndromes for channel detection only when we're
1630 * ganged. Otherwise @chan should already contain the channel at 1630 * ganged. Otherwise @chan should already contain the channel at
1631 * this point. 1631 * this point.
1632 */ 1632 */
1633 if (dct_ganging_enabled(pvt)) 1633 if (dct_ganging_enabled(pvt))
1634 chan = get_channel_from_ecc_syndrome(mci, syndrome); 1634 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1635 1635
1636 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1636 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1637 page, offset, syndrome, 1637 page, offset, syndrome,
1638 csrow, chan, -1, 1638 csrow, chan, -1,
1639 EDAC_MOD_STR, "", NULL); 1639 EDAC_MOD_STR, "", NULL);
1640 } 1640 }
1641 1641
1642 /* 1642 /*
1643 * debug routine to display the memory sizes of all logical DIMMs and its 1643 * debug routine to display the memory sizes of all logical DIMMs and its
1644 * CSROWs 1644 * CSROWs
1645 */ 1645 */
1646 static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) 1646 static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1647 { 1647 {
1648 int dimm, size0, size1, factor = 0; 1648 int dimm, size0, size1, factor = 0;
1649 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; 1649 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1650 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; 1650 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1651 1651
1652 if (boot_cpu_data.x86 == 0xf) { 1652 if (boot_cpu_data.x86 == 0xf) {
1653 if (pvt->dclr0 & WIDTH_128) 1653 if (pvt->dclr0 & WIDTH_128)
1654 factor = 1; 1654 factor = 1;
1655 1655
1656 /* K8 families < revF not supported yet */ 1656 /* K8 families < revF not supported yet */
1657 if (pvt->ext_model < K8_REV_F) 1657 if (pvt->ext_model < K8_REV_F)
1658 return; 1658 return;
1659 else 1659 else
1660 WARN_ON(ctrl != 0); 1660 WARN_ON(ctrl != 0);
1661 } 1661 }
1662 1662
1663 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0; 1663 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1664 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases 1664 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
1665 : pvt->csels[0].csbases; 1665 : pvt->csels[0].csbases;
1666 1666
1667 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam); 1667 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
1668 1668
1669 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); 1669 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1670 1670
1671 /* Dump memory sizes for DIMM and its CSROWs */ 1671 /* Dump memory sizes for DIMM and its CSROWs */
1672 for (dimm = 0; dimm < 4; dimm++) { 1672 for (dimm = 0; dimm < 4; dimm++) {
1673 1673
1674 size0 = 0; 1674 size0 = 0;
1675 if (dcsb[dimm*2] & DCSB_CS_ENABLE) 1675 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1676 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 1676 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1677 DBAM_DIMM(dimm, dbam)); 1677 DBAM_DIMM(dimm, dbam));
1678 1678
1679 size1 = 0; 1679 size1 = 0;
1680 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE) 1680 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1681 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 1681 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1682 DBAM_DIMM(dimm, dbam)); 1682 DBAM_DIMM(dimm, dbam));
1683 1683
1684 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", 1684 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1685 dimm * 2, size0 << factor, 1685 dimm * 2, size0 << factor,
1686 dimm * 2 + 1, size1 << factor); 1686 dimm * 2 + 1, size1 << factor);
1687 } 1687 }
1688 } 1688 }
1689 1689
1690 static struct amd64_family_type amd64_family_types[] = { 1690 static struct amd64_family_type amd64_family_types[] = {
1691 [K8_CPUS] = { 1691 [K8_CPUS] = {
1692 .ctl_name = "K8", 1692 .ctl_name = "K8",
1693 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, 1693 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1694 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC, 1694 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1695 .ops = { 1695 .ops = {
1696 .early_channel_count = k8_early_channel_count, 1696 .early_channel_count = k8_early_channel_count,
1697 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, 1697 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1698 .dbam_to_cs = k8_dbam_to_chip_select, 1698 .dbam_to_cs = k8_dbam_to_chip_select,
1699 .read_dct_pci_cfg = k8_read_dct_pci_cfg, 1699 .read_dct_pci_cfg = k8_read_dct_pci_cfg,
1700 } 1700 }
1701 }, 1701 },
1702 [F10_CPUS] = { 1702 [F10_CPUS] = {
1703 .ctl_name = "F10h", 1703 .ctl_name = "F10h",
1704 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP, 1704 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1705 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC, 1705 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1706 .ops = { 1706 .ops = {
1707 .early_channel_count = f1x_early_channel_count, 1707 .early_channel_count = f1x_early_channel_count,
1708 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, 1708 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1709 .dbam_to_cs = f10_dbam_to_chip_select, 1709 .dbam_to_cs = f10_dbam_to_chip_select,
1710 .read_dct_pci_cfg = f10_read_dct_pci_cfg, 1710 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1711 } 1711 }
1712 }, 1712 },
1713 [F15_CPUS] = { 1713 [F15_CPUS] = {
1714 .ctl_name = "F15h", 1714 .ctl_name = "F15h",
1715 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1, 1715 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
1716 .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3, 1716 .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
1717 .ops = { 1717 .ops = {
1718 .early_channel_count = f1x_early_channel_count, 1718 .early_channel_count = f1x_early_channel_count,
1719 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, 1719 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1720 .dbam_to_cs = f15_dbam_to_chip_select, 1720 .dbam_to_cs = f15_dbam_to_chip_select,
1721 .read_dct_pci_cfg = f15_read_dct_pci_cfg, 1721 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1722 } 1722 }
1723 }, 1723 },
1724 }; 1724 };
1725 1725
1726 static struct pci_dev *pci_get_related_function(unsigned int vendor, 1726 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1727 unsigned int device, 1727 unsigned int device,
1728 struct pci_dev *related) 1728 struct pci_dev *related)
1729 { 1729 {
1730 struct pci_dev *dev = NULL; 1730 struct pci_dev *dev = NULL;
1731 1731
1732 dev = pci_get_device(vendor, device, dev); 1732 dev = pci_get_device(vendor, device, dev);
1733 while (dev) { 1733 while (dev) {
1734 if ((dev->bus->number == related->bus->number) && 1734 if ((dev->bus->number == related->bus->number) &&
1735 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn))) 1735 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1736 break; 1736 break;
1737 dev = pci_get_device(vendor, device, dev); 1737 dev = pci_get_device(vendor, device, dev);
1738 } 1738 }
1739 1739
1740 return dev; 1740 return dev;
1741 } 1741 }
1742 1742
1743 /* 1743 /*
1744 * These are tables of eigenvectors (one per line) which can be used for the 1744 * These are tables of eigenvectors (one per line) which can be used for the
1745 * construction of the syndrome tables. The modified syndrome search algorithm 1745 * construction of the syndrome tables. The modified syndrome search algorithm
1746 * uses those to find the symbol in error and thus the DIMM. 1746 * uses those to find the symbol in error and thus the DIMM.
1747 * 1747 *
1748 * Algorithm courtesy of Ross LaFetra from AMD. 1748 * Algorithm courtesy of Ross LaFetra from AMD.
1749 */ 1749 */
1750 static u16 x4_vectors[] = { 1750 static u16 x4_vectors[] = {
1751 0x2f57, 0x1afe, 0x66cc, 0xdd88, 1751 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1752 0x11eb, 0x3396, 0x7f4c, 0xeac8, 1752 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1753 0x0001, 0x0002, 0x0004, 0x0008, 1753 0x0001, 0x0002, 0x0004, 0x0008,
1754 0x1013, 0x3032, 0x4044, 0x8088, 1754 0x1013, 0x3032, 0x4044, 0x8088,
1755 0x106b, 0x30d6, 0x70fc, 0xe0a8, 1755 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1756 0x4857, 0xc4fe, 0x13cc, 0x3288, 1756 0x4857, 0xc4fe, 0x13cc, 0x3288,
1757 0x1ac5, 0x2f4a, 0x5394, 0xa1e8, 1757 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1758 0x1f39, 0x251e, 0xbd6c, 0x6bd8, 1758 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1759 0x15c1, 0x2a42, 0x89ac, 0x4758, 1759 0x15c1, 0x2a42, 0x89ac, 0x4758,
1760 0x2b03, 0x1602, 0x4f0c, 0xca08, 1760 0x2b03, 0x1602, 0x4f0c, 0xca08,
1761 0x1f07, 0x3a0e, 0x6b04, 0xbd08, 1761 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1762 0x8ba7, 0x465e, 0x244c, 0x1cc8, 1762 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1763 0x2b87, 0x164e, 0x642c, 0xdc18, 1763 0x2b87, 0x164e, 0x642c, 0xdc18,
1764 0x40b9, 0x80de, 0x1094, 0x20e8, 1764 0x40b9, 0x80de, 0x1094, 0x20e8,
1765 0x27db, 0x1eb6, 0x9dac, 0x7b58, 1765 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1766 0x11c1, 0x2242, 0x84ac, 0x4c58, 1766 0x11c1, 0x2242, 0x84ac, 0x4c58,
1767 0x1be5, 0x2d7a, 0x5e34, 0xa718, 1767 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1768 0x4b39, 0x8d1e, 0x14b4, 0x28d8, 1768 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1769 0x4c97, 0xc87e, 0x11fc, 0x33a8, 1769 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1770 0x8e97, 0x497e, 0x2ffc, 0x1aa8, 1770 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1771 0x16b3, 0x3d62, 0x4f34, 0x8518, 1771 0x16b3, 0x3d62, 0x4f34, 0x8518,
1772 0x1e2f, 0x391a, 0x5cac, 0xf858, 1772 0x1e2f, 0x391a, 0x5cac, 0xf858,
1773 0x1d9f, 0x3b7a, 0x572c, 0xfe18, 1773 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1774 0x15f5, 0x2a5a, 0x5264, 0xa3b8, 1774 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1775 0x1dbb, 0x3b66, 0x715c, 0xe3f8, 1775 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1776 0x4397, 0xc27e, 0x17fc, 0x3ea8, 1776 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1777 0x1617, 0x3d3e, 0x6464, 0xb8b8, 1777 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1778 0x23ff, 0x12aa, 0xab6c, 0x56d8, 1778 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1779 0x2dfb, 0x1ba6, 0x913c, 0x7328, 1779 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1780 0x185d, 0x2ca6, 0x7914, 0x9e28, 1780 0x185d, 0x2ca6, 0x7914, 0x9e28,
1781 0x171b, 0x3e36, 0x7d7c, 0xebe8, 1781 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1782 0x4199, 0x82ee, 0x19f4, 0x2e58, 1782 0x4199, 0x82ee, 0x19f4, 0x2e58,
1783 0x4807, 0xc40e, 0x130c, 0x3208, 1783 0x4807, 0xc40e, 0x130c, 0x3208,
1784 0x1905, 0x2e0a, 0x5804, 0xac08, 1784 0x1905, 0x2e0a, 0x5804, 0xac08,
1785 0x213f, 0x132a, 0xadfc, 0x5ba8, 1785 0x213f, 0x132a, 0xadfc, 0x5ba8,
1786 0x19a9, 0x2efe, 0xb5cc, 0x6f88, 1786 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1787 }; 1787 };
1788 1788
1789 static u16 x8_vectors[] = { 1789 static u16 x8_vectors[] = {
1790 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480, 1790 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1791 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80, 1791 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1792 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80, 1792 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1793 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80, 1793 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1794 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780, 1794 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1795 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080, 1795 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1796 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080, 1796 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1797 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080, 1797 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1798 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80, 1798 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1799 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580, 1799 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1800 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880, 1800 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1801 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280, 1801 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1802 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180, 1802 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1803 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580, 1803 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1804 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280, 1804 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1805 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180, 1805 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1806 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080, 1806 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1807 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, 1807 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1808 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000, 1808 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1809 }; 1809 };
1810 1810
1811 static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs, 1811 static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
1812 unsigned v_dim) 1812 unsigned v_dim)
1813 { 1813 {
1814 unsigned int i, err_sym; 1814 unsigned int i, err_sym;
1815 1815
1816 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) { 1816 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1817 u16 s = syndrome; 1817 u16 s = syndrome;
1818 unsigned v_idx = err_sym * v_dim; 1818 unsigned v_idx = err_sym * v_dim;
1819 unsigned v_end = (err_sym + 1) * v_dim; 1819 unsigned v_end = (err_sym + 1) * v_dim;
1820 1820
1821 /* walk over all 16 bits of the syndrome */ 1821 /* walk over all 16 bits of the syndrome */
1822 for (i = 1; i < (1U << 16); i <<= 1) { 1822 for (i = 1; i < (1U << 16); i <<= 1) {
1823 1823
1824 /* if bit is set in that eigenvector... */ 1824 /* if bit is set in that eigenvector... */
1825 if (v_idx < v_end && vectors[v_idx] & i) { 1825 if (v_idx < v_end && vectors[v_idx] & i) {
1826 u16 ev_comp = vectors[v_idx++]; 1826 u16 ev_comp = vectors[v_idx++];
1827 1827
1828 /* ... and bit set in the modified syndrome, */ 1828 /* ... and bit set in the modified syndrome, */
1829 if (s & i) { 1829 if (s & i) {
1830 /* remove it. */ 1830 /* remove it. */
1831 s ^= ev_comp; 1831 s ^= ev_comp;
1832 1832
1833 if (!s) 1833 if (!s)
1834 return err_sym; 1834 return err_sym;
1835 } 1835 }
1836 1836
1837 } else if (s & i) 1837 } else if (s & i)
1838 /* can't get to zero, move to next symbol */ 1838 /* can't get to zero, move to next symbol */
1839 break; 1839 break;
1840 } 1840 }
1841 } 1841 }
1842 1842
1843 debugf0("syndrome(%x) not found\n", syndrome); 1843 debugf0("syndrome(%x) not found\n", syndrome);
1844 return -1; 1844 return -1;
1845 } 1845 }
1846 1846
1847 static int map_err_sym_to_channel(int err_sym, int sym_size) 1847 static int map_err_sym_to_channel(int err_sym, int sym_size)
1848 { 1848 {
1849 if (sym_size == 4) 1849 if (sym_size == 4)
1850 switch (err_sym) { 1850 switch (err_sym) {
1851 case 0x20: 1851 case 0x20:
1852 case 0x21: 1852 case 0x21:
1853 return 0; 1853 return 0;
1854 break; 1854 break;
1855 case 0x22: 1855 case 0x22:
1856 case 0x23: 1856 case 0x23:
1857 return 1; 1857 return 1;
1858 break; 1858 break;
1859 default: 1859 default:
1860 return err_sym >> 4; 1860 return err_sym >> 4;
1861 break; 1861 break;
1862 } 1862 }
1863 /* x8 symbols */ 1863 /* x8 symbols */
1864 else 1864 else
1865 switch (err_sym) { 1865 switch (err_sym) {
1866 /* imaginary bits not in a DIMM */ 1866 /* imaginary bits not in a DIMM */
1867 case 0x10: 1867 case 0x10:
1868 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n", 1868 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1869 err_sym); 1869 err_sym);
1870 return -1; 1870 return -1;
1871 break; 1871 break;
1872 1872
1873 case 0x11: 1873 case 0x11:
1874 return 0; 1874 return 0;
1875 break; 1875 break;
1876 case 0x12: 1876 case 0x12:
1877 return 1; 1877 return 1;
1878 break; 1878 break;
1879 default: 1879 default:
1880 return err_sym >> 3; 1880 return err_sym >> 3;
1881 break; 1881 break;
1882 } 1882 }
1883 return -1; 1883 return -1;
1884 } 1884 }
1885 1885
1886 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome) 1886 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1887 { 1887 {
1888 struct amd64_pvt *pvt = mci->pvt_info; 1888 struct amd64_pvt *pvt = mci->pvt_info;
1889 int err_sym = -1; 1889 int err_sym = -1;
1890 1890
1891 if (pvt->ecc_sym_sz == 8) 1891 if (pvt->ecc_sym_sz == 8)
1892 err_sym = decode_syndrome(syndrome, x8_vectors, 1892 err_sym = decode_syndrome(syndrome, x8_vectors,
1893 ARRAY_SIZE(x8_vectors), 1893 ARRAY_SIZE(x8_vectors),
1894 pvt->ecc_sym_sz); 1894 pvt->ecc_sym_sz);
1895 else if (pvt->ecc_sym_sz == 4) 1895 else if (pvt->ecc_sym_sz == 4)
1896 err_sym = decode_syndrome(syndrome, x4_vectors, 1896 err_sym = decode_syndrome(syndrome, x4_vectors,
1897 ARRAY_SIZE(x4_vectors), 1897 ARRAY_SIZE(x4_vectors),
1898 pvt->ecc_sym_sz); 1898 pvt->ecc_sym_sz);
1899 else { 1899 else {
1900 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz); 1900 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
1901 return err_sym; 1901 return err_sym;
1902 } 1902 }
1903 1903
1904 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz); 1904 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
1905 } 1905 }
1906 1906
1907 /* 1907 /*
1908 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR 1908 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
1909 * ADDRESS and process. 1909 * ADDRESS and process.
1910 */ 1910 */
1911 static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m) 1911 static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
1912 { 1912 {
1913 struct amd64_pvt *pvt = mci->pvt_info; 1913 struct amd64_pvt *pvt = mci->pvt_info;
1914 u64 sys_addr; 1914 u64 sys_addr;
1915 u16 syndrome; 1915 u16 syndrome;
1916 1916
1917 /* Ensure that the Error Address is VALID */ 1917 /* Ensure that the Error Address is VALID */
1918 if (!(m->status & MCI_STATUS_ADDRV)) { 1918 if (!(m->status & MCI_STATUS_ADDRV)) {
1919 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); 1919 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1920 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1920 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1921 0, 0, 0, 1921 0, 0, 0,
1922 -1, -1, -1, 1922 -1, -1, -1,
1923 EDAC_MOD_STR, 1923 EDAC_MOD_STR,
1924 "HW has no ERROR_ADDRESS available", 1924 "HW has no ERROR_ADDRESS available",
1925 NULL); 1925 NULL);
1926 return; 1926 return;
1927 } 1927 }
1928 1928
1929 sys_addr = get_error_address(m); 1929 sys_addr = get_error_address(m);
1930 syndrome = extract_syndrome(m->status); 1930 syndrome = extract_syndrome(m->status);
1931 1931
1932 amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr); 1932 amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
1933 1933
1934 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome); 1934 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome);
1935 } 1935 }
1936 1936
1937 /* Handle any Un-correctable Errors (UEs) */ 1937 /* Handle any Un-correctable Errors (UEs) */
1938 static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m) 1938 static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1939 { 1939 {
1940 struct mem_ctl_info *log_mci, *src_mci = NULL; 1940 struct mem_ctl_info *log_mci, *src_mci = NULL;
1941 int csrow; 1941 int csrow;
1942 u64 sys_addr; 1942 u64 sys_addr;
1943 u32 page, offset; 1943 u32 page, offset;
1944 1944
1945 log_mci = mci; 1945 log_mci = mci;
1946 1946
1947 if (!(m->status & MCI_STATUS_ADDRV)) { 1947 if (!(m->status & MCI_STATUS_ADDRV)) {
1948 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); 1948 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1949 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1949 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1950 0, 0, 0, 1950 0, 0, 0,
1951 -1, -1, -1, 1951 -1, -1, -1,
1952 EDAC_MOD_STR, 1952 EDAC_MOD_STR,
1953 "HW has no ERROR_ADDRESS available", 1953 "HW has no ERROR_ADDRESS available",
1954 NULL); 1954 NULL);
1955 return; 1955 return;
1956 } 1956 }
1957 1957
1958 sys_addr = get_error_address(m); 1958 sys_addr = get_error_address(m);
1959 error_address_to_page_and_offset(sys_addr, &page, &offset); 1959 error_address_to_page_and_offset(sys_addr, &page, &offset);
1960 1960
1961 /* 1961 /*
1962 * Find out which node the error address belongs to. This may be 1962 * Find out which node the error address belongs to. This may be
1963 * different from the node that detected the error. 1963 * different from the node that detected the error.
1964 */ 1964 */
1965 src_mci = find_mc_by_sys_addr(mci, sys_addr); 1965 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1966 if (!src_mci) { 1966 if (!src_mci) {
1967 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n", 1967 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1968 (unsigned long)sys_addr); 1968 (unsigned long)sys_addr);
1969 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1969 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1970 page, offset, 0, 1970 page, offset, 0,
1971 -1, -1, -1, 1971 -1, -1, -1,
1972 EDAC_MOD_STR, 1972 EDAC_MOD_STR,
1973 "ERROR ADDRESS NOT mapped to a MC", NULL); 1973 "ERROR ADDRESS NOT mapped to a MC", NULL);
1974 return; 1974 return;
1975 } 1975 }
1976 1976
1977 log_mci = src_mci; 1977 log_mci = src_mci;
1978 1978
1979 csrow = sys_addr_to_csrow(log_mci, sys_addr); 1979 csrow = sys_addr_to_csrow(log_mci, sys_addr);
1980 if (csrow < 0) { 1980 if (csrow < 0) {
1981 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n", 1981 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1982 (unsigned long)sys_addr); 1982 (unsigned long)sys_addr);
1983 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1983 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1984 page, offset, 0, 1984 page, offset, 0,
1985 -1, -1, -1, 1985 -1, -1, -1,
1986 EDAC_MOD_STR, 1986 EDAC_MOD_STR,
1987 "ERROR ADDRESS NOT mapped to CS", 1987 "ERROR ADDRESS NOT mapped to CS",
1988 NULL); 1988 NULL);
1989 } else { 1989 } else {
1990 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1990 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1991 page, offset, 0, 1991 page, offset, 0,
1992 csrow, -1, -1, 1992 csrow, -1, -1,
1993 EDAC_MOD_STR, "", NULL); 1993 EDAC_MOD_STR, "", NULL);
1994 } 1994 }
1995 } 1995 }
1996 1996
1997 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, 1997 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
1998 struct mce *m) 1998 struct mce *m)
1999 { 1999 {
2000 u16 ec = EC(m->status); 2000 u16 ec = EC(m->status);
2001 u8 xec = XEC(m->status, 0x1f); 2001 u8 xec = XEC(m->status, 0x1f);
2002 u8 ecc_type = (m->status >> 45) & 0x3; 2002 u8 ecc_type = (m->status >> 45) & 0x3;
2003 2003
2004 /* Bail early out if this was an 'observed' error */ 2004 /* Bail early out if this was an 'observed' error */
2005 if (PP(ec) == NBSL_PP_OBS) 2005 if (PP(ec) == NBSL_PP_OBS)
2006 return; 2006 return;
2007 2007
2008 /* Do only ECC errors */ 2008 /* Do only ECC errors */
2009 if (xec && xec != F10_NBSL_EXT_ERR_ECC) 2009 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2010 return; 2010 return;
2011 2011
2012 if (ecc_type == 2) 2012 if (ecc_type == 2)
2013 amd64_handle_ce(mci, m); 2013 amd64_handle_ce(mci, m);
2014 else if (ecc_type == 1) 2014 else if (ecc_type == 1)
2015 amd64_handle_ue(mci, m); 2015 amd64_handle_ue(mci, m);
2016 } 2016 }
2017 2017
2018 void amd64_decode_bus_error(int node_id, struct mce *m) 2018 void amd64_decode_bus_error(int node_id, struct mce *m)
2019 { 2019 {
2020 __amd64_decode_bus_error(mcis[node_id], m); 2020 __amd64_decode_bus_error(mcis[node_id], m);
2021 } 2021 }
2022 2022
2023 /* 2023 /*
2024 * Use pvt->F2 which contains the F2 CPU PCI device to get the related 2024 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
2025 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error. 2025 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
2026 */ 2026 */
2027 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id) 2027 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
2028 { 2028 {
2029 /* Reserve the ADDRESS MAP Device */ 2029 /* Reserve the ADDRESS MAP Device */
2030 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2); 2030 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
2031 if (!pvt->F1) { 2031 if (!pvt->F1) {
2032 amd64_err("error address map device not found: " 2032 amd64_err("error address map device not found: "
2033 "vendor %x device 0x%x (broken BIOS?)\n", 2033 "vendor %x device 0x%x (broken BIOS?)\n",
2034 PCI_VENDOR_ID_AMD, f1_id); 2034 PCI_VENDOR_ID_AMD, f1_id);
2035 return -ENODEV; 2035 return -ENODEV;
2036 } 2036 }
2037 2037
2038 /* Reserve the MISC Device */ 2038 /* Reserve the MISC Device */
2039 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2); 2039 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
2040 if (!pvt->F3) { 2040 if (!pvt->F3) {
2041 pci_dev_put(pvt->F1); 2041 pci_dev_put(pvt->F1);
2042 pvt->F1 = NULL; 2042 pvt->F1 = NULL;
2043 2043
2044 amd64_err("error F3 device not found: " 2044 amd64_err("error F3 device not found: "
2045 "vendor %x device 0x%x (broken BIOS?)\n", 2045 "vendor %x device 0x%x (broken BIOS?)\n",
2046 PCI_VENDOR_ID_AMD, f3_id); 2046 PCI_VENDOR_ID_AMD, f3_id);
2047 2047
2048 return -ENODEV; 2048 return -ENODEV;
2049 } 2049 }
2050 debugf1("F1: %s\n", pci_name(pvt->F1)); 2050 debugf1("F1: %s\n", pci_name(pvt->F1));
2051 debugf1("F2: %s\n", pci_name(pvt->F2)); 2051 debugf1("F2: %s\n", pci_name(pvt->F2));
2052 debugf1("F3: %s\n", pci_name(pvt->F3)); 2052 debugf1("F3: %s\n", pci_name(pvt->F3));
2053 2053
2054 return 0; 2054 return 0;
2055 } 2055 }
2056 2056
2057 static void free_mc_sibling_devs(struct amd64_pvt *pvt) 2057 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2058 { 2058 {
2059 pci_dev_put(pvt->F1); 2059 pci_dev_put(pvt->F1);
2060 pci_dev_put(pvt->F3); 2060 pci_dev_put(pvt->F3);
2061 } 2061 }
2062 2062
2063 /* 2063 /*
2064 * Retrieve the hardware registers of the memory controller (this includes the 2064 * Retrieve the hardware registers of the memory controller (this includes the
2065 * 'Address Map' and 'Misc' device regs) 2065 * 'Address Map' and 'Misc' device regs)
2066 */ 2066 */
2067 static void read_mc_regs(struct amd64_pvt *pvt) 2067 static void read_mc_regs(struct amd64_pvt *pvt)
2068 { 2068 {
2069 struct cpuinfo_x86 *c = &boot_cpu_data; 2069 struct cpuinfo_x86 *c = &boot_cpu_data;
2070 u64 msr_val; 2070 u64 msr_val;
2071 u32 tmp; 2071 u32 tmp;
2072 unsigned range; 2072 unsigned range;
2073 2073
2074 /* 2074 /*
2075 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since 2075 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2076 * those are Read-As-Zero 2076 * those are Read-As-Zero
2077 */ 2077 */
2078 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem); 2078 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2079 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem); 2079 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
2080 2080
2081 /* check first whether TOP_MEM2 is enabled */ 2081 /* check first whether TOP_MEM2 is enabled */
2082 rdmsrl(MSR_K8_SYSCFG, msr_val); 2082 rdmsrl(MSR_K8_SYSCFG, msr_val);
2083 if (msr_val & (1U << 21)) { 2083 if (msr_val & (1U << 21)) {
2084 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2); 2084 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2085 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2); 2085 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2086 } else 2086 } else
2087 debugf0(" TOP_MEM2 disabled.\n"); 2087 debugf0(" TOP_MEM2 disabled.\n");
2088 2088
2089 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap); 2089 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2090 2090
2091 read_dram_ctl_register(pvt); 2091 read_dram_ctl_register(pvt);
2092 2092
2093 for (range = 0; range < DRAM_RANGES; range++) { 2093 for (range = 0; range < DRAM_RANGES; range++) {
2094 u8 rw; 2094 u8 rw;
2095 2095
2096 /* read settings for this DRAM range */ 2096 /* read settings for this DRAM range */
2097 read_dram_base_limit_regs(pvt, range); 2097 read_dram_base_limit_regs(pvt, range);
2098 2098
2099 rw = dram_rw(pvt, range); 2099 rw = dram_rw(pvt, range);
2100 if (!rw) 2100 if (!rw)
2101 continue; 2101 continue;
2102 2102
2103 debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n", 2103 debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2104 range, 2104 range,
2105 get_dram_base(pvt, range), 2105 get_dram_base(pvt, range),
2106 get_dram_limit(pvt, range)); 2106 get_dram_limit(pvt, range));
2107 2107
2108 debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n", 2108 debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2109 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled", 2109 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2110 (rw & 0x1) ? "R" : "-", 2110 (rw & 0x1) ? "R" : "-",
2111 (rw & 0x2) ? "W" : "-", 2111 (rw & 0x2) ? "W" : "-",
2112 dram_intlv_sel(pvt, range), 2112 dram_intlv_sel(pvt, range),
2113 dram_dst_node(pvt, range)); 2113 dram_dst_node(pvt, range));
2114 } 2114 }
2115 2115
2116 read_dct_base_mask(pvt); 2116 read_dct_base_mask(pvt);
2117 2117
2118 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar); 2118 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2119 amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0); 2119 amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
2120 2120
2121 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare); 2121 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2122 2122
2123 amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0); 2123 amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
2124 amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0); 2124 amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
2125 2125
2126 if (!dct_ganging_enabled(pvt)) { 2126 if (!dct_ganging_enabled(pvt)) {
2127 amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1); 2127 amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
2128 amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1); 2128 amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
2129 } 2129 }
2130 2130
2131 pvt->ecc_sym_sz = 4; 2131 pvt->ecc_sym_sz = 4;
2132 2132
2133 if (c->x86 >= 0x10) { 2133 if (c->x86 >= 0x10) {
2134 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); 2134 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2135 amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1); 2135 amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
2136 2136
2137 /* F10h, revD and later can do x8 ECC too */ 2137 /* F10h, revD and later can do x8 ECC too */
2138 if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25)) 2138 if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
2139 pvt->ecc_sym_sz = 8; 2139 pvt->ecc_sym_sz = 8;
2140 } 2140 }
2141 dump_misc_regs(pvt); 2141 dump_misc_regs(pvt);
2142 } 2142 }
2143 2143
2144 /* 2144 /*
2145 * NOTE: CPU Revision Dependent code 2145 * NOTE: CPU Revision Dependent code
2146 * 2146 *
2147 * Input: 2147 * Input:
2148 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1) 2148 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2149 * k8 private pointer to --> 2149 * k8 private pointer to -->
2150 * DRAM Bank Address mapping register 2150 * DRAM Bank Address mapping register
2151 * node_id 2151 * node_id
2152 * DCL register where dual_channel_active is 2152 * DCL register where dual_channel_active is
2153 * 2153 *
2154 * The DBAM register consists of 4 sets of 4 bits each definitions: 2154 * The DBAM register consists of 4 sets of 4 bits each definitions:
2155 * 2155 *
2156 * Bits: CSROWs 2156 * Bits: CSROWs
2157 * 0-3 CSROWs 0 and 1 2157 * 0-3 CSROWs 0 and 1
2158 * 4-7 CSROWs 2 and 3 2158 * 4-7 CSROWs 2 and 3
2159 * 8-11 CSROWs 4 and 5 2159 * 8-11 CSROWs 4 and 5
2160 * 12-15 CSROWs 6 and 7 2160 * 12-15 CSROWs 6 and 7
2161 * 2161 *
2162 * Values range from: 0 to 15 2162 * Values range from: 0 to 15
2163 * The meaning of the values depends on CPU revision and dual-channel state, 2163 * The meaning of the values depends on CPU revision and dual-channel state,
2164 * see relevant BKDG more info. 2164 * see relevant BKDG more info.
2165 * 2165 *
2166 * The memory controller provides for total of only 8 CSROWs in its current 2166 * The memory controller provides for total of only 8 CSROWs in its current
2167 * architecture. Each "pair" of CSROWs normally represents just one DIMM in 2167 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2168 * single channel or two (2) DIMMs in dual channel mode. 2168 * single channel or two (2) DIMMs in dual channel mode.
2169 * 2169 *
2170 * The following code logic collapses the various tables for CSROW based on CPU 2170 * The following code logic collapses the various tables for CSROW based on CPU
2171 * revision. 2171 * revision.
2172 * 2172 *
2173 * Returns: 2173 * Returns:
2174 * The number of PAGE_SIZE pages on the specified CSROW number it 2174 * The number of PAGE_SIZE pages on the specified CSROW number it
2175 * encompasses 2175 * encompasses
2176 * 2176 *
2177 */ 2177 */
2178 static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) 2178 static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2179 { 2179 {
2180 u32 cs_mode, nr_pages; 2180 u32 cs_mode, nr_pages;
2181 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0; 2181 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2182 2182
2183 /* 2183 /*
2184 * The math on this doesn't look right on the surface because x/2*4 can 2184 * The math on this doesn't look right on the surface because x/2*4 can
2185 * be simplified to x*2 but this expression makes use of the fact that 2185 * be simplified to x*2 but this expression makes use of the fact that
2186 * it is integral math where 1/2=0. This intermediate value becomes the 2186 * it is integral math where 1/2=0. This intermediate value becomes the
2187 * number of bits to shift the DBAM register to extract the proper CSROW 2187 * number of bits to shift the DBAM register to extract the proper CSROW
2188 * field. 2188 * field.
2189 */ 2189 */
2190 cs_mode = (dbam >> ((csrow_nr / 2) * 4)) & 0xF; 2190 cs_mode = (dbam >> ((csrow_nr / 2) * 4)) & 0xF;
2191 2191
2192 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT); 2192 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
2193 2193
2194 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode); 2194 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2195 debugf0(" nr_pages/channel= %u channel-count = %d\n", 2195 debugf0(" nr_pages/channel= %u channel-count = %d\n",
2196 nr_pages, pvt->channel_count); 2196 nr_pages, pvt->channel_count);
2197 2197
2198 return nr_pages; 2198 return nr_pages;
2199 } 2199 }
2200 2200
2201 /* 2201 /*
2202 * Initialize the array of csrow attribute instances, based on the values 2202 * Initialize the array of csrow attribute instances, based on the values
2203 * from pci config hardware registers. 2203 * from pci config hardware registers.
2204 */ 2204 */
2205 static int init_csrows(struct mem_ctl_info *mci) 2205 static int init_csrows(struct mem_ctl_info *mci)
2206 { 2206 {
2207 struct csrow_info *csrow; 2207 struct csrow_info *csrow;
2208 struct dimm_info *dimm;
2208 struct amd64_pvt *pvt = mci->pvt_info; 2209 struct amd64_pvt *pvt = mci->pvt_info;
2209 u64 base, mask; 2210 u64 base, mask;
2210 u32 val; 2211 u32 val;
2211 int i, j, empty = 1; 2212 int i, j, empty = 1;
2212 enum mem_type mtype; 2213 enum mem_type mtype;
2213 enum edac_type edac_mode; 2214 enum edac_type edac_mode;
2214 int nr_pages = 0; 2215 int nr_pages = 0;
2215 2216
2216 amd64_read_pci_cfg(pvt->F3, NBCFG, &val); 2217 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2217 2218
2218 pvt->nbcfg = val; 2219 pvt->nbcfg = val;
2219 2220
2220 debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", 2221 debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2221 pvt->mc_node_id, val, 2222 pvt->mc_node_id, val,
2222 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE)); 2223 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2223 2224
2224 for_each_chip_select(i, 0, pvt) { 2225 for_each_chip_select(i, 0, pvt) {
2225 csrow = &mci->csrows[i]; 2226 csrow = mci->csrows[i];
2226 2227
2227 if (!csrow_enabled(i, 0, pvt) && !csrow_enabled(i, 1, pvt)) { 2228 if (!csrow_enabled(i, 0, pvt) && !csrow_enabled(i, 1, pvt)) {
2228 debugf1("----CSROW %d EMPTY for node %d\n", i, 2229 debugf1("----CSROW %d EMPTY for node %d\n", i,
2229 pvt->mc_node_id); 2230 pvt->mc_node_id);
2230 continue; 2231 continue;
2231 } 2232 }
2232 2233
2233 debugf1("----CSROW %d VALID for MC node %d\n", 2234 debugf1("----CSROW %d VALID for MC node %d\n",
2234 i, pvt->mc_node_id); 2235 i, pvt->mc_node_id);
2235 2236
2236 empty = 0; 2237 empty = 0;
2237 if (csrow_enabled(i, 0, pvt)) 2238 if (csrow_enabled(i, 0, pvt))
2238 nr_pages = amd64_csrow_nr_pages(pvt, 0, i); 2239 nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
2239 if (csrow_enabled(i, 1, pvt)) 2240 if (csrow_enabled(i, 1, pvt))
2240 nr_pages += amd64_csrow_nr_pages(pvt, 1, i); 2241 nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
2241 2242
2242 get_cs_base_and_mask(pvt, i, 0, &base, &mask); 2243 get_cs_base_and_mask(pvt, i, 0, &base, &mask);
2243 /* 8 bytes of resolution */ 2244 /* 8 bytes of resolution */
2244 2245
2245 mtype = amd64_determine_memory_type(pvt, i); 2246 mtype = amd64_determine_memory_type(pvt, i);
2246 2247
2247 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); 2248 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2248 debugf1(" nr_pages: %u\n", nr_pages * pvt->channel_count); 2249 debugf1(" nr_pages: %u\n", nr_pages * pvt->channel_count);
2249 2250
2250 /* 2251 /*
2251 * determine whether CHIPKILL or JUST ECC or NO ECC is operating 2252 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2252 */ 2253 */
2253 if (pvt->nbcfg & NBCFG_ECC_ENABLE) 2254 if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2254 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ? 2255 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
2255 EDAC_S4ECD4ED : EDAC_SECDED; 2256 EDAC_S4ECD4ED : EDAC_SECDED;
2256 else 2257 else
2257 edac_mode = EDAC_NONE; 2258 edac_mode = EDAC_NONE;
2258 2259
2259 for (j = 0; j < pvt->channel_count; j++) { 2260 for (j = 0; j < pvt->channel_count; j++) {
2260 csrow->channels[j].dimm->mtype = mtype; 2261 dimm = csrow->channels[j]->dimm;
2261 csrow->channels[j].dimm->edac_mode = edac_mode; 2262 dimm->mtype = mtype;
2262 csrow->channels[j].dimm->nr_pages = nr_pages; 2263 dimm->edac_mode = edac_mode;
2264 dimm->nr_pages = nr_pages;
2263 } 2265 }
2264 } 2266 }
2265 2267
2266 return empty; 2268 return empty;
2267 } 2269 }
2268 2270
2269 /* get all cores on this DCT */ 2271 /* get all cores on this DCT */
2270 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid) 2272 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
2271 { 2273 {
2272 int cpu; 2274 int cpu;
2273 2275
2274 for_each_online_cpu(cpu) 2276 for_each_online_cpu(cpu)
2275 if (amd_get_nb_id(cpu) == nid) 2277 if (amd_get_nb_id(cpu) == nid)
2276 cpumask_set_cpu(cpu, mask); 2278 cpumask_set_cpu(cpu, mask);
2277 } 2279 }
2278 2280
2279 /* check MCG_CTL on all the cpus on this node */ 2281 /* check MCG_CTL on all the cpus on this node */
2280 static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid) 2282 static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
2281 { 2283 {
2282 cpumask_var_t mask; 2284 cpumask_var_t mask;
2283 int cpu, nbe; 2285 int cpu, nbe;
2284 bool ret = false; 2286 bool ret = false;
2285 2287
2286 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { 2288 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2287 amd64_warn("%s: Error allocating mask\n", __func__); 2289 amd64_warn("%s: Error allocating mask\n", __func__);
2288 return false; 2290 return false;
2289 } 2291 }
2290 2292
2291 get_cpus_on_this_dct_cpumask(mask, nid); 2293 get_cpus_on_this_dct_cpumask(mask, nid);
2292 2294
2293 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs); 2295 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2294 2296
2295 for_each_cpu(cpu, mask) { 2297 for_each_cpu(cpu, mask) {
2296 struct msr *reg = per_cpu_ptr(msrs, cpu); 2298 struct msr *reg = per_cpu_ptr(msrs, cpu);
2297 nbe = reg->l & MSR_MCGCTL_NBE; 2299 nbe = reg->l & MSR_MCGCTL_NBE;
2298 2300
2299 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", 2301 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2300 cpu, reg->q, 2302 cpu, reg->q,
2301 (nbe ? "enabled" : "disabled")); 2303 (nbe ? "enabled" : "disabled"));
2302 2304
2303 if (!nbe) 2305 if (!nbe)
2304 goto out; 2306 goto out;
2305 } 2307 }
2306 ret = true; 2308 ret = true;
2307 2309
2308 out: 2310 out:
2309 free_cpumask_var(mask); 2311 free_cpumask_var(mask);
2310 return ret; 2312 return ret;
2311 } 2313 }
2312 2314
2313 static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on) 2315 static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
2314 { 2316 {
2315 cpumask_var_t cmask; 2317 cpumask_var_t cmask;
2316 int cpu; 2318 int cpu;
2317 2319
2318 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { 2320 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2319 amd64_warn("%s: error allocating mask\n", __func__); 2321 amd64_warn("%s: error allocating mask\n", __func__);
2320 return false; 2322 return false;
2321 } 2323 }
2322 2324
2323 get_cpus_on_this_dct_cpumask(cmask, nid); 2325 get_cpus_on_this_dct_cpumask(cmask, nid);
2324 2326
2325 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); 2327 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2326 2328
2327 for_each_cpu(cpu, cmask) { 2329 for_each_cpu(cpu, cmask) {
2328 2330
2329 struct msr *reg = per_cpu_ptr(msrs, cpu); 2331 struct msr *reg = per_cpu_ptr(msrs, cpu);
2330 2332
2331 if (on) { 2333 if (on) {
2332 if (reg->l & MSR_MCGCTL_NBE) 2334 if (reg->l & MSR_MCGCTL_NBE)
2333 s->flags.nb_mce_enable = 1; 2335 s->flags.nb_mce_enable = 1;
2334 2336
2335 reg->l |= MSR_MCGCTL_NBE; 2337 reg->l |= MSR_MCGCTL_NBE;
2336 } else { 2338 } else {
2337 /* 2339 /*
2338 * Turn off NB MCE reporting only when it was off before 2340 * Turn off NB MCE reporting only when it was off before
2339 */ 2341 */
2340 if (!s->flags.nb_mce_enable) 2342 if (!s->flags.nb_mce_enable)
2341 reg->l &= ~MSR_MCGCTL_NBE; 2343 reg->l &= ~MSR_MCGCTL_NBE;
2342 } 2344 }
2343 } 2345 }
2344 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); 2346 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2345 2347
2346 free_cpumask_var(cmask); 2348 free_cpumask_var(cmask);
2347 2349
2348 return 0; 2350 return 0;
2349 } 2351 }
2350 2352
2351 static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid, 2353 static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2352 struct pci_dev *F3) 2354 struct pci_dev *F3)
2353 { 2355 {
2354 bool ret = true; 2356 bool ret = true;
2355 u32 value, mask = 0x3; /* UECC/CECC enable */ 2357 u32 value, mask = 0x3; /* UECC/CECC enable */
2356 2358
2357 if (toggle_ecc_err_reporting(s, nid, ON)) { 2359 if (toggle_ecc_err_reporting(s, nid, ON)) {
2358 amd64_warn("Error enabling ECC reporting over MCGCTL!\n"); 2360 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2359 return false; 2361 return false;
2360 } 2362 }
2361 2363
2362 amd64_read_pci_cfg(F3, NBCTL, &value); 2364 amd64_read_pci_cfg(F3, NBCTL, &value);
2363 2365
2364 s->old_nbctl = value & mask; 2366 s->old_nbctl = value & mask;
2365 s->nbctl_valid = true; 2367 s->nbctl_valid = true;
2366 2368
2367 value |= mask; 2369 value |= mask;
2368 amd64_write_pci_cfg(F3, NBCTL, value); 2370 amd64_write_pci_cfg(F3, NBCTL, value);
2369 2371
2370 amd64_read_pci_cfg(F3, NBCFG, &value); 2372 amd64_read_pci_cfg(F3, NBCFG, &value);
2371 2373
2372 debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", 2374 debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2373 nid, value, !!(value & NBCFG_ECC_ENABLE)); 2375 nid, value, !!(value & NBCFG_ECC_ENABLE));
2374 2376
2375 if (!(value & NBCFG_ECC_ENABLE)) { 2377 if (!(value & NBCFG_ECC_ENABLE)) {
2376 amd64_warn("DRAM ECC disabled on this node, enabling...\n"); 2378 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2377 2379
2378 s->flags.nb_ecc_prev = 0; 2380 s->flags.nb_ecc_prev = 0;
2379 2381
2380 /* Attempt to turn on DRAM ECC Enable */ 2382 /* Attempt to turn on DRAM ECC Enable */
2381 value |= NBCFG_ECC_ENABLE; 2383 value |= NBCFG_ECC_ENABLE;
2382 amd64_write_pci_cfg(F3, NBCFG, value); 2384 amd64_write_pci_cfg(F3, NBCFG, value);
2383 2385
2384 amd64_read_pci_cfg(F3, NBCFG, &value); 2386 amd64_read_pci_cfg(F3, NBCFG, &value);
2385 2387
2386 if (!(value & NBCFG_ECC_ENABLE)) { 2388 if (!(value & NBCFG_ECC_ENABLE)) {
2387 amd64_warn("Hardware rejected DRAM ECC enable," 2389 amd64_warn("Hardware rejected DRAM ECC enable,"
2388 "check memory DIMM configuration.\n"); 2390 "check memory DIMM configuration.\n");
2389 ret = false; 2391 ret = false;
2390 } else { 2392 } else {
2391 amd64_info("Hardware accepted DRAM ECC Enable\n"); 2393 amd64_info("Hardware accepted DRAM ECC Enable\n");
2392 } 2394 }
2393 } else { 2395 } else {
2394 s->flags.nb_ecc_prev = 1; 2396 s->flags.nb_ecc_prev = 1;
2395 } 2397 }
2396 2398
2397 debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", 2399 debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2398 nid, value, !!(value & NBCFG_ECC_ENABLE)); 2400 nid, value, !!(value & NBCFG_ECC_ENABLE));
2399 2401
2400 return ret; 2402 return ret;
2401 } 2403 }
2402 2404
2403 static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid, 2405 static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2404 struct pci_dev *F3) 2406 struct pci_dev *F3)
2405 { 2407 {
2406 u32 value, mask = 0x3; /* UECC/CECC enable */ 2408 u32 value, mask = 0x3; /* UECC/CECC enable */
2407 2409
2408 2410
2409 if (!s->nbctl_valid) 2411 if (!s->nbctl_valid)
2410 return; 2412 return;
2411 2413
2412 amd64_read_pci_cfg(F3, NBCTL, &value); 2414 amd64_read_pci_cfg(F3, NBCTL, &value);
2413 value &= ~mask; 2415 value &= ~mask;
2414 value |= s->old_nbctl; 2416 value |= s->old_nbctl;
2415 2417
2416 amd64_write_pci_cfg(F3, NBCTL, value); 2418 amd64_write_pci_cfg(F3, NBCTL, value);
2417 2419
2418 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */ 2420 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2419 if (!s->flags.nb_ecc_prev) { 2421 if (!s->flags.nb_ecc_prev) {
2420 amd64_read_pci_cfg(F3, NBCFG, &value); 2422 amd64_read_pci_cfg(F3, NBCFG, &value);
2421 value &= ~NBCFG_ECC_ENABLE; 2423 value &= ~NBCFG_ECC_ENABLE;
2422 amd64_write_pci_cfg(F3, NBCFG, value); 2424 amd64_write_pci_cfg(F3, NBCFG, value);
2423 } 2425 }
2424 2426
2425 /* restore the NB Enable MCGCTL bit */ 2427 /* restore the NB Enable MCGCTL bit */
2426 if (toggle_ecc_err_reporting(s, nid, OFF)) 2428 if (toggle_ecc_err_reporting(s, nid, OFF))
2427 amd64_warn("Error restoring NB MCGCTL settings!\n"); 2429 amd64_warn("Error restoring NB MCGCTL settings!\n");
2428 } 2430 }
2429 2431
2430 /* 2432 /*
2431 * EDAC requires that the BIOS have ECC enabled before 2433 * EDAC requires that the BIOS have ECC enabled before
2432 * taking over the processing of ECC errors. A command line 2434 * taking over the processing of ECC errors. A command line
2433 * option allows to force-enable hardware ECC later in 2435 * option allows to force-enable hardware ECC later in
2434 * enable_ecc_error_reporting(). 2436 * enable_ecc_error_reporting().
2435 */ 2437 */
2436 static const char *ecc_msg = 2438 static const char *ecc_msg =
2437 "ECC disabled in the BIOS or no ECC capability, module will not load.\n" 2439 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2438 " Either enable ECC checking or force module loading by setting " 2440 " Either enable ECC checking or force module loading by setting "
2439 "'ecc_enable_override'.\n" 2441 "'ecc_enable_override'.\n"
2440 " (Note that use of the override may cause unknown side effects.)\n"; 2442 " (Note that use of the override may cause unknown side effects.)\n";
2441 2443
2442 static bool ecc_enabled(struct pci_dev *F3, u8 nid) 2444 static bool ecc_enabled(struct pci_dev *F3, u8 nid)
2443 { 2445 {
2444 u32 value; 2446 u32 value;
2445 u8 ecc_en = 0; 2447 u8 ecc_en = 0;
2446 bool nb_mce_en = false; 2448 bool nb_mce_en = false;
2447 2449
2448 amd64_read_pci_cfg(F3, NBCFG, &value); 2450 amd64_read_pci_cfg(F3, NBCFG, &value);
2449 2451
2450 ecc_en = !!(value & NBCFG_ECC_ENABLE); 2452 ecc_en = !!(value & NBCFG_ECC_ENABLE);
2451 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled")); 2453 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2452 2454
2453 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid); 2455 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
2454 if (!nb_mce_en) 2456 if (!nb_mce_en)
2455 amd64_notice("NB MCE bank disabled, set MSR " 2457 amd64_notice("NB MCE bank disabled, set MSR "
2456 "0x%08x[4] on node %d to enable.\n", 2458 "0x%08x[4] on node %d to enable.\n",
2457 MSR_IA32_MCG_CTL, nid); 2459 MSR_IA32_MCG_CTL, nid);
2458 2460
2459 if (!ecc_en || !nb_mce_en) { 2461 if (!ecc_en || !nb_mce_en) {
2460 amd64_notice("%s", ecc_msg); 2462 amd64_notice("%s", ecc_msg);
2461 return false; 2463 return false;
2462 } 2464 }
2463 return true; 2465 return true;
2464 } 2466 }
2465 2467
2466 static int set_mc_sysfs_attrs(struct mem_ctl_info *mci) 2468 static int set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2467 { 2469 {
2468 int rc; 2470 int rc;
2469 2471
2470 rc = amd64_create_sysfs_dbg_files(mci); 2472 rc = amd64_create_sysfs_dbg_files(mci);
2471 if (rc < 0) 2473 if (rc < 0)
2472 return rc; 2474 return rc;
2473 2475
2474 if (boot_cpu_data.x86 >= 0x10) { 2476 if (boot_cpu_data.x86 >= 0x10) {
2475 rc = amd64_create_sysfs_inject_files(mci); 2477 rc = amd64_create_sysfs_inject_files(mci);
2476 if (rc < 0) 2478 if (rc < 0)
2477 return rc; 2479 return rc;
2478 } 2480 }
2479 2481
2480 return 0; 2482 return 0;
2481 } 2483 }
2482 2484
2483 static void del_mc_sysfs_attrs(struct mem_ctl_info *mci) 2485 static void del_mc_sysfs_attrs(struct mem_ctl_info *mci)
2484 { 2486 {
2485 amd64_remove_sysfs_dbg_files(mci); 2487 amd64_remove_sysfs_dbg_files(mci);
2486 2488
2487 if (boot_cpu_data.x86 >= 0x10) 2489 if (boot_cpu_data.x86 >= 0x10)
2488 amd64_remove_sysfs_inject_files(mci); 2490 amd64_remove_sysfs_inject_files(mci);
2489 } 2491 }
2490 2492
2491 static void setup_mci_misc_attrs(struct mem_ctl_info *mci, 2493 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2492 struct amd64_family_type *fam) 2494 struct amd64_family_type *fam)
2493 { 2495 {
2494 struct amd64_pvt *pvt = mci->pvt_info; 2496 struct amd64_pvt *pvt = mci->pvt_info;
2495 2497
2496 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; 2498 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2497 mci->edac_ctl_cap = EDAC_FLAG_NONE; 2499 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2498 2500
2499 if (pvt->nbcap & NBCAP_SECDED) 2501 if (pvt->nbcap & NBCAP_SECDED)
2500 mci->edac_ctl_cap |= EDAC_FLAG_SECDED; 2502 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2501 2503
2502 if (pvt->nbcap & NBCAP_CHIPKILL) 2504 if (pvt->nbcap & NBCAP_CHIPKILL)
2503 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; 2505 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2504 2506
2505 mci->edac_cap = amd64_determine_edac_cap(pvt); 2507 mci->edac_cap = amd64_determine_edac_cap(pvt);
2506 mci->mod_name = EDAC_MOD_STR; 2508 mci->mod_name = EDAC_MOD_STR;
2507 mci->mod_ver = EDAC_AMD64_VERSION; 2509 mci->mod_ver = EDAC_AMD64_VERSION;
2508 mci->ctl_name = fam->ctl_name; 2510 mci->ctl_name = fam->ctl_name;
2509 mci->dev_name = pci_name(pvt->F2); 2511 mci->dev_name = pci_name(pvt->F2);
2510 mci->ctl_page_to_phys = NULL; 2512 mci->ctl_page_to_phys = NULL;
2511 2513
2512 /* memory scrubber interface */ 2514 /* memory scrubber interface */
2513 mci->set_sdram_scrub_rate = amd64_set_scrub_rate; 2515 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2514 mci->get_sdram_scrub_rate = amd64_get_scrub_rate; 2516 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2515 } 2517 }
2516 2518
2517 /* 2519 /*
2518 * returns a pointer to the family descriptor on success, NULL otherwise. 2520 * returns a pointer to the family descriptor on success, NULL otherwise.
2519 */ 2521 */
2520 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) 2522 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2521 { 2523 {
2522 u8 fam = boot_cpu_data.x86; 2524 u8 fam = boot_cpu_data.x86;
2523 struct amd64_family_type *fam_type = NULL; 2525 struct amd64_family_type *fam_type = NULL;
2524 2526
2525 switch (fam) { 2527 switch (fam) {
2526 case 0xf: 2528 case 0xf:
2527 fam_type = &amd64_family_types[K8_CPUS]; 2529 fam_type = &amd64_family_types[K8_CPUS];
2528 pvt->ops = &amd64_family_types[K8_CPUS].ops; 2530 pvt->ops = &amd64_family_types[K8_CPUS].ops;
2529 break; 2531 break;
2530 2532
2531 case 0x10: 2533 case 0x10:
2532 fam_type = &amd64_family_types[F10_CPUS]; 2534 fam_type = &amd64_family_types[F10_CPUS];
2533 pvt->ops = &amd64_family_types[F10_CPUS].ops; 2535 pvt->ops = &amd64_family_types[F10_CPUS].ops;
2534 break; 2536 break;
2535 2537
2536 case 0x15: 2538 case 0x15:
2537 fam_type = &amd64_family_types[F15_CPUS]; 2539 fam_type = &amd64_family_types[F15_CPUS];
2538 pvt->ops = &amd64_family_types[F15_CPUS].ops; 2540 pvt->ops = &amd64_family_types[F15_CPUS].ops;
2539 break; 2541 break;
2540 2542
2541 default: 2543 default:
2542 amd64_err("Unsupported family!\n"); 2544 amd64_err("Unsupported family!\n");
2543 return NULL; 2545 return NULL;
2544 } 2546 }
2545 2547
2546 pvt->ext_model = boot_cpu_data.x86_model >> 4; 2548 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2547 2549
2548 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name, 2550 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
2549 (fam == 0xf ? 2551 (fam == 0xf ?
2550 (pvt->ext_model >= K8_REV_F ? "revF or later " 2552 (pvt->ext_model >= K8_REV_F ? "revF or later "
2551 : "revE or earlier ") 2553 : "revE or earlier ")
2552 : ""), pvt->mc_node_id); 2554 : ""), pvt->mc_node_id);
2553 return fam_type; 2555 return fam_type;
2554 } 2556 }
2555 2557
2556 static int amd64_init_one_instance(struct pci_dev *F2) 2558 static int amd64_init_one_instance(struct pci_dev *F2)
2557 { 2559 {
2558 struct amd64_pvt *pvt = NULL; 2560 struct amd64_pvt *pvt = NULL;
2559 struct amd64_family_type *fam_type = NULL; 2561 struct amd64_family_type *fam_type = NULL;
2560 struct mem_ctl_info *mci = NULL; 2562 struct mem_ctl_info *mci = NULL;
2561 struct edac_mc_layer layers[2]; 2563 struct edac_mc_layer layers[2];
2562 int err = 0, ret; 2564 int err = 0, ret;
2563 u8 nid = get_node_id(F2); 2565 u8 nid = get_node_id(F2);
2564 2566
2565 ret = -ENOMEM; 2567 ret = -ENOMEM;
2566 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); 2568 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2567 if (!pvt) 2569 if (!pvt)
2568 goto err_ret; 2570 goto err_ret;
2569 2571
2570 pvt->mc_node_id = nid; 2572 pvt->mc_node_id = nid;
2571 pvt->F2 = F2; 2573 pvt->F2 = F2;
2572 2574
2573 ret = -EINVAL; 2575 ret = -EINVAL;
2574 fam_type = amd64_per_family_init(pvt); 2576 fam_type = amd64_per_family_init(pvt);
2575 if (!fam_type) 2577 if (!fam_type)
2576 goto err_free; 2578 goto err_free;
2577 2579
2578 ret = -ENODEV; 2580 ret = -ENODEV;
2579 err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id); 2581 err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
2580 if (err) 2582 if (err)
2581 goto err_free; 2583 goto err_free;
2582 2584
2583 read_mc_regs(pvt); 2585 read_mc_regs(pvt);
2584 2586
2585 /* 2587 /*
2586 * We need to determine how many memory channels there are. Then use 2588 * We need to determine how many memory channels there are. Then use
2587 * that information for calculating the size of the dynamic instance 2589 * that information for calculating the size of the dynamic instance
2588 * tables in the 'mci' structure. 2590 * tables in the 'mci' structure.
2589 */ 2591 */
2590 ret = -EINVAL; 2592 ret = -EINVAL;
2591 pvt->channel_count = pvt->ops->early_channel_count(pvt); 2593 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2592 if (pvt->channel_count < 0) 2594 if (pvt->channel_count < 0)
2593 goto err_siblings; 2595 goto err_siblings;
2594 2596
2595 ret = -ENOMEM; 2597 ret = -ENOMEM;
2596 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 2598 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
2597 layers[0].size = pvt->csels[0].b_cnt; 2599 layers[0].size = pvt->csels[0].b_cnt;
2598 layers[0].is_virt_csrow = true; 2600 layers[0].is_virt_csrow = true;
2599 layers[1].type = EDAC_MC_LAYER_CHANNEL; 2601 layers[1].type = EDAC_MC_LAYER_CHANNEL;
2600 layers[1].size = pvt->channel_count; 2602 layers[1].size = pvt->channel_count;
2601 layers[1].is_virt_csrow = false; 2603 layers[1].is_virt_csrow = false;
2602 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0); 2604 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
2603 if (!mci) 2605 if (!mci)
2604 goto err_siblings; 2606 goto err_siblings;
2605 2607
2606 mci->pvt_info = pvt; 2608 mci->pvt_info = pvt;
2607 mci->pdev = &pvt->F2->dev; 2609 mci->pdev = &pvt->F2->dev;
2608 2610
2609 setup_mci_misc_attrs(mci, fam_type); 2611 setup_mci_misc_attrs(mci, fam_type);
2610 2612
2611 if (init_csrows(mci)) 2613 if (init_csrows(mci))
2612 mci->edac_cap = EDAC_FLAG_NONE; 2614 mci->edac_cap = EDAC_FLAG_NONE;
2613 2615
2614 ret = -ENODEV; 2616 ret = -ENODEV;
2615 if (edac_mc_add_mc(mci)) { 2617 if (edac_mc_add_mc(mci)) {
2616 debugf1("failed edac_mc_add_mc()\n"); 2618 debugf1("failed edac_mc_add_mc()\n");
2617 goto err_add_mc; 2619 goto err_add_mc;
2618 } 2620 }
2619 if (set_mc_sysfs_attrs(mci)) { 2621 if (set_mc_sysfs_attrs(mci)) {
2620 debugf1("failed edac_mc_add_mc()\n"); 2622 debugf1("failed edac_mc_add_mc()\n");
2621 goto err_add_sysfs; 2623 goto err_add_sysfs;
2622 } 2624 }
2623 2625
2624 /* register stuff with EDAC MCE */ 2626 /* register stuff with EDAC MCE */
2625 if (report_gart_errors) 2627 if (report_gart_errors)
2626 amd_report_gart_errors(true); 2628 amd_report_gart_errors(true);
2627 2629
2628 amd_register_ecc_decoder(amd64_decode_bus_error); 2630 amd_register_ecc_decoder(amd64_decode_bus_error);
2629 2631
2630 mcis[nid] = mci; 2632 mcis[nid] = mci;
2631 2633
2632 atomic_inc(&drv_instances); 2634 atomic_inc(&drv_instances);
2633 2635
2634 return 0; 2636 return 0;
2635 2637
2636 err_add_sysfs: 2638 err_add_sysfs:
2637 edac_mc_del_mc(mci->pdev); 2639 edac_mc_del_mc(mci->pdev);
2638 err_add_mc: 2640 err_add_mc:
2639 edac_mc_free(mci); 2641 edac_mc_free(mci);
2640 2642
2641 err_siblings: 2643 err_siblings:
2642 free_mc_sibling_devs(pvt); 2644 free_mc_sibling_devs(pvt);
2643 2645
2644 err_free: 2646 err_free:
2645 kfree(pvt); 2647 kfree(pvt);
2646 2648
2647 err_ret: 2649 err_ret:
2648 return ret; 2650 return ret;
2649 } 2651 }
2650 2652
2651 static int __devinit amd64_probe_one_instance(struct pci_dev *pdev, 2653 static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
2652 const struct pci_device_id *mc_type) 2654 const struct pci_device_id *mc_type)
2653 { 2655 {
2654 u8 nid = get_node_id(pdev); 2656 u8 nid = get_node_id(pdev);
2655 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; 2657 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2656 struct ecc_settings *s; 2658 struct ecc_settings *s;
2657 int ret = 0; 2659 int ret = 0;
2658 2660
2659 ret = pci_enable_device(pdev); 2661 ret = pci_enable_device(pdev);
2660 if (ret < 0) { 2662 if (ret < 0) {
2661 debugf0("ret=%d\n", ret); 2663 debugf0("ret=%d\n", ret);
2662 return -EIO; 2664 return -EIO;
2663 } 2665 }
2664 2666
2665 ret = -ENOMEM; 2667 ret = -ENOMEM;
2666 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL); 2668 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2667 if (!s) 2669 if (!s)
2668 goto err_out; 2670 goto err_out;
2669 2671
2670 ecc_stngs[nid] = s; 2672 ecc_stngs[nid] = s;
2671 2673
2672 if (!ecc_enabled(F3, nid)) { 2674 if (!ecc_enabled(F3, nid)) {
2673 ret = -ENODEV; 2675 ret = -ENODEV;
2674 2676
2675 if (!ecc_enable_override) 2677 if (!ecc_enable_override)
2676 goto err_enable; 2678 goto err_enable;
2677 2679
2678 amd64_warn("Forcing ECC on!\n"); 2680 amd64_warn("Forcing ECC on!\n");
2679 2681
2680 if (!enable_ecc_error_reporting(s, nid, F3)) 2682 if (!enable_ecc_error_reporting(s, nid, F3))
2681 goto err_enable; 2683 goto err_enable;
2682 } 2684 }
2683 2685
2684 ret = amd64_init_one_instance(pdev); 2686 ret = amd64_init_one_instance(pdev);
2685 if (ret < 0) { 2687 if (ret < 0) {
2686 amd64_err("Error probing instance: %d\n", nid); 2688 amd64_err("Error probing instance: %d\n", nid);
2687 restore_ecc_error_reporting(s, nid, F3); 2689 restore_ecc_error_reporting(s, nid, F3);
2688 } 2690 }
2689 2691
2690 return ret; 2692 return ret;
2691 2693
2692 err_enable: 2694 err_enable:
2693 kfree(s); 2695 kfree(s);
2694 ecc_stngs[nid] = NULL; 2696 ecc_stngs[nid] = NULL;
2695 2697
2696 err_out: 2698 err_out:
2697 return ret; 2699 return ret;
2698 } 2700 }
2699 2701
2700 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) 2702 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2701 { 2703 {
2702 struct mem_ctl_info *mci; 2704 struct mem_ctl_info *mci;
2703 struct amd64_pvt *pvt; 2705 struct amd64_pvt *pvt;
2704 u8 nid = get_node_id(pdev); 2706 u8 nid = get_node_id(pdev);
2705 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; 2707 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2706 struct ecc_settings *s = ecc_stngs[nid]; 2708 struct ecc_settings *s = ecc_stngs[nid];
2707 2709
2708 mci = find_mci_by_dev(&pdev->dev); 2710 mci = find_mci_by_dev(&pdev->dev);
2709 del_mc_sysfs_attrs(mci); 2711 del_mc_sysfs_attrs(mci);
2710 /* Remove from EDAC CORE tracking list */ 2712 /* Remove from EDAC CORE tracking list */
2711 mci = edac_mc_del_mc(&pdev->dev); 2713 mci = edac_mc_del_mc(&pdev->dev);
2712 if (!mci) 2714 if (!mci)
2713 return; 2715 return;
2714 2716
2715 pvt = mci->pvt_info; 2717 pvt = mci->pvt_info;
2716 2718
2717 restore_ecc_error_reporting(s, nid, F3); 2719 restore_ecc_error_reporting(s, nid, F3);
2718 2720
2719 free_mc_sibling_devs(pvt); 2721 free_mc_sibling_devs(pvt);
2720 2722
2721 /* unregister from EDAC MCE */ 2723 /* unregister from EDAC MCE */
2722 amd_report_gart_errors(false); 2724 amd_report_gart_errors(false);
2723 amd_unregister_ecc_decoder(amd64_decode_bus_error); 2725 amd_unregister_ecc_decoder(amd64_decode_bus_error);
2724 2726
2725 kfree(ecc_stngs[nid]); 2727 kfree(ecc_stngs[nid]);
2726 ecc_stngs[nid] = NULL; 2728 ecc_stngs[nid] = NULL;
2727 2729
2728 /* Free the EDAC CORE resources */ 2730 /* Free the EDAC CORE resources */
2729 mci->pvt_info = NULL; 2731 mci->pvt_info = NULL;
2730 mcis[nid] = NULL; 2732 mcis[nid] = NULL;
2731 2733
2732 kfree(pvt); 2734 kfree(pvt);
2733 edac_mc_free(mci); 2735 edac_mc_free(mci);
2734 } 2736 }
2735 2737
2736 /* 2738 /*
2737 * This table is part of the interface for loading drivers for PCI devices. The 2739 * This table is part of the interface for loading drivers for PCI devices. The
2738 * PCI core identifies what devices are on a system during boot, and then 2740 * PCI core identifies what devices are on a system during boot, and then
2739 * inquiry this table to see if this driver is for a given device found. 2741 * inquiry this table to see if this driver is for a given device found.
2740 */ 2742 */
2741 static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = { 2743 static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = {
2742 { 2744 {
2743 .vendor = PCI_VENDOR_ID_AMD, 2745 .vendor = PCI_VENDOR_ID_AMD,
2744 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, 2746 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2745 .subvendor = PCI_ANY_ID, 2747 .subvendor = PCI_ANY_ID,
2746 .subdevice = PCI_ANY_ID, 2748 .subdevice = PCI_ANY_ID,
2747 .class = 0, 2749 .class = 0,
2748 .class_mask = 0, 2750 .class_mask = 0,
2749 }, 2751 },
2750 { 2752 {
2751 .vendor = PCI_VENDOR_ID_AMD, 2753 .vendor = PCI_VENDOR_ID_AMD,
2752 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM, 2754 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2753 .subvendor = PCI_ANY_ID, 2755 .subvendor = PCI_ANY_ID,
2754 .subdevice = PCI_ANY_ID, 2756 .subdevice = PCI_ANY_ID,
2755 .class = 0, 2757 .class = 0,
2756 .class_mask = 0, 2758 .class_mask = 0,
2757 }, 2759 },
2758 { 2760 {
2759 .vendor = PCI_VENDOR_ID_AMD, 2761 .vendor = PCI_VENDOR_ID_AMD,
2760 .device = PCI_DEVICE_ID_AMD_15H_NB_F2, 2762 .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
2761 .subvendor = PCI_ANY_ID, 2763 .subvendor = PCI_ANY_ID,
2762 .subdevice = PCI_ANY_ID, 2764 .subdevice = PCI_ANY_ID,
2763 .class = 0, 2765 .class = 0,
2764 .class_mask = 0, 2766 .class_mask = 0,
2765 }, 2767 },
2766 2768
2767 {0, } 2769 {0, }
2768 }; 2770 };
2769 MODULE_DEVICE_TABLE(pci, amd64_pci_table); 2771 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2770 2772
2771 static struct pci_driver amd64_pci_driver = { 2773 static struct pci_driver amd64_pci_driver = {
2772 .name = EDAC_MOD_STR, 2774 .name = EDAC_MOD_STR,
2773 .probe = amd64_probe_one_instance, 2775 .probe = amd64_probe_one_instance,
2774 .remove = __devexit_p(amd64_remove_one_instance), 2776 .remove = __devexit_p(amd64_remove_one_instance),
2775 .id_table = amd64_pci_table, 2777 .id_table = amd64_pci_table,
2776 }; 2778 };
2777 2779
2778 static void setup_pci_device(void) 2780 static void setup_pci_device(void)
2779 { 2781 {
2780 struct mem_ctl_info *mci; 2782 struct mem_ctl_info *mci;
2781 struct amd64_pvt *pvt; 2783 struct amd64_pvt *pvt;
2782 2784
2783 if (amd64_ctl_pci) 2785 if (amd64_ctl_pci)
2784 return; 2786 return;
2785 2787
2786 mci = mcis[0]; 2788 mci = mcis[0];
2787 if (mci) { 2789 if (mci) {
2788 2790
2789 pvt = mci->pvt_info; 2791 pvt = mci->pvt_info;
2790 amd64_ctl_pci = 2792 amd64_ctl_pci =
2791 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR); 2793 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2792 2794
2793 if (!amd64_ctl_pci) { 2795 if (!amd64_ctl_pci) {
2794 pr_warning("%s(): Unable to create PCI control\n", 2796 pr_warning("%s(): Unable to create PCI control\n",
2795 __func__); 2797 __func__);
2796 2798
2797 pr_warning("%s(): PCI error report via EDAC not set\n", 2799 pr_warning("%s(): PCI error report via EDAC not set\n",
2798 __func__); 2800 __func__);
2799 } 2801 }
2800 } 2802 }
2801 } 2803 }
2802 2804
2803 static int __init amd64_edac_init(void) 2805 static int __init amd64_edac_init(void)
2804 { 2806 {
2805 int err = -ENODEV; 2807 int err = -ENODEV;
2806 2808
2807 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION); 2809 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
2808 2810
2809 opstate_init(); 2811 opstate_init();
2810 2812
2811 if (amd_cache_northbridges() < 0) 2813 if (amd_cache_northbridges() < 0)
2812 goto err_ret; 2814 goto err_ret;
2813 2815
2814 err = -ENOMEM; 2816 err = -ENOMEM;
2815 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL); 2817 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2816 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL); 2818 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2817 if (!(mcis && ecc_stngs)) 2819 if (!(mcis && ecc_stngs))
2818 goto err_free; 2820 goto err_free;
2819 2821
2820 msrs = msrs_alloc(); 2822 msrs = msrs_alloc();
2821 if (!msrs) 2823 if (!msrs)
2822 goto err_free; 2824 goto err_free;
2823 2825
2824 err = pci_register_driver(&amd64_pci_driver); 2826 err = pci_register_driver(&amd64_pci_driver);
2825 if (err) 2827 if (err)
2826 goto err_pci; 2828 goto err_pci;
2827 2829
2828 err = -ENODEV; 2830 err = -ENODEV;
2829 if (!atomic_read(&drv_instances)) 2831 if (!atomic_read(&drv_instances))
2830 goto err_no_instances; 2832 goto err_no_instances;
2831 2833
2832 setup_pci_device(); 2834 setup_pci_device();
2833 return 0; 2835 return 0;
2834 2836
2835 err_no_instances: 2837 err_no_instances:
2836 pci_unregister_driver(&amd64_pci_driver); 2838 pci_unregister_driver(&amd64_pci_driver);
2837 2839
2838 err_pci: 2840 err_pci:
2839 msrs_free(msrs); 2841 msrs_free(msrs);
2840 msrs = NULL; 2842 msrs = NULL;
2841 2843
2842 err_free: 2844 err_free:
2843 kfree(mcis); 2845 kfree(mcis);
2844 mcis = NULL; 2846 mcis = NULL;
2845 2847
2846 kfree(ecc_stngs); 2848 kfree(ecc_stngs);
2847 ecc_stngs = NULL; 2849 ecc_stngs = NULL;
2848 2850
2849 err_ret: 2851 err_ret:
2850 return err; 2852 return err;
2851 } 2853 }
2852 2854
2853 static void __exit amd64_edac_exit(void) 2855 static void __exit amd64_edac_exit(void)
2854 { 2856 {
2855 if (amd64_ctl_pci) 2857 if (amd64_ctl_pci)
2856 edac_pci_release_generic_ctl(amd64_ctl_pci); 2858 edac_pci_release_generic_ctl(amd64_ctl_pci);
2857 2859
2858 pci_unregister_driver(&amd64_pci_driver); 2860 pci_unregister_driver(&amd64_pci_driver);
2859 2861
2860 kfree(ecc_stngs); 2862 kfree(ecc_stngs);
2861 ecc_stngs = NULL; 2863 ecc_stngs = NULL;
2862 2864
2863 kfree(mcis); 2865 kfree(mcis);
2864 mcis = NULL; 2866 mcis = NULL;
2865 2867
2866 msrs_free(msrs); 2868 msrs_free(msrs);
2867 msrs = NULL; 2869 msrs = NULL;
2868 } 2870 }
2869 2871
2870 module_init(amd64_edac_init); 2872 module_init(amd64_edac_init);
2871 module_exit(amd64_edac_exit); 2873 module_exit(amd64_edac_exit);
2872 2874
2873 MODULE_LICENSE("GPL"); 2875 MODULE_LICENSE("GPL");
2874 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, " 2876 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2875 "Dave Peterson, Thayne Harbaugh"); 2877 "Dave Peterson, Thayne Harbaugh");
2876 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - " 2878 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2877 EDAC_AMD64_VERSION); 2879 EDAC_AMD64_VERSION);
2878 2880
2879 module_param(edac_op_state, int, 0444); 2881 module_param(edac_op_state, int, 0444);
2880 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 2882 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
2881 2883
drivers/edac/amd76x_edac.c
1 /* 1 /*
2 * AMD 76x Memory Controller kernel module 2 * AMD 76x Memory Controller kernel module
3 * (C) 2003 Linux Networx (http://lnxi.com) 3 * (C) 2003 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the 4 * This file may be distributed under the terms of the
5 * GNU General Public License. 5 * GNU General Public License.
6 * 6 *
7 * Written by Thayne Harbaugh 7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others. 8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/ 9 * http://www.anime.net/~goemon/linux-ecc/
10 * 10 *
11 * $Id: edac_amd76x.c,v 1.4.2.5 2005/10/05 00:43:44 dsp_llnl Exp $ 11 * $Id: edac_amd76x.c,v 1.4.2.5 2005/10/05 00:43:44 dsp_llnl Exp $
12 * 12 *
13 */ 13 */
14 14
15 #include <linux/module.h> 15 #include <linux/module.h>
16 #include <linux/init.h> 16 #include <linux/init.h>
17 #include <linux/pci.h> 17 #include <linux/pci.h>
18 #include <linux/pci_ids.h> 18 #include <linux/pci_ids.h>
19 #include <linux/edac.h> 19 #include <linux/edac.h>
20 #include "edac_core.h" 20 #include "edac_core.h"
21 21
22 #define AMD76X_REVISION " Ver: 2.0.2" 22 #define AMD76X_REVISION " Ver: 2.0.2"
23 #define EDAC_MOD_STR "amd76x_edac" 23 #define EDAC_MOD_STR "amd76x_edac"
24 24
25 #define amd76x_printk(level, fmt, arg...) \ 25 #define amd76x_printk(level, fmt, arg...) \
26 edac_printk(level, "amd76x", fmt, ##arg) 26 edac_printk(level, "amd76x", fmt, ##arg)
27 27
28 #define amd76x_mc_printk(mci, level, fmt, arg...) \ 28 #define amd76x_mc_printk(mci, level, fmt, arg...) \
29 edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg) 29 edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg)
30 30
31 #define AMD76X_NR_CSROWS 8 31 #define AMD76X_NR_CSROWS 8
32 #define AMD76X_NR_DIMMS 4 32 #define AMD76X_NR_DIMMS 4
33 33
34 /* AMD 76x register addresses - device 0 function 0 - PCI bridge */ 34 /* AMD 76x register addresses - device 0 function 0 - PCI bridge */
35 35
36 #define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b) 36 #define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b)
37 * 37 *
38 * 31:16 reserved 38 * 31:16 reserved
39 * 15:14 SERR enabled: x1=ue 1x=ce 39 * 15:14 SERR enabled: x1=ue 1x=ce
40 * 13 reserved 40 * 13 reserved
41 * 12 diag: disabled, enabled 41 * 12 diag: disabled, enabled
42 * 11:10 mode: dis, EC, ECC, ECC+scrub 42 * 11:10 mode: dis, EC, ECC, ECC+scrub
43 * 9:8 status: x1=ue 1x=ce 43 * 9:8 status: x1=ue 1x=ce
44 * 7:4 UE cs row 44 * 7:4 UE cs row
45 * 3:0 CE cs row 45 * 3:0 CE cs row
46 */ 46 */
47 47
48 #define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b) 48 #define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b)
49 * 49 *
50 * 31:26 clock disable 5 - 0 50 * 31:26 clock disable 5 - 0
51 * 25 SDRAM init 51 * 25 SDRAM init
52 * 24 reserved 52 * 24 reserved
53 * 23 mode register service 53 * 23 mode register service
54 * 22:21 suspend to RAM 54 * 22:21 suspend to RAM
55 * 20 burst refresh enable 55 * 20 burst refresh enable
56 * 19 refresh disable 56 * 19 refresh disable
57 * 18 reserved 57 * 18 reserved
58 * 17:16 cycles-per-refresh 58 * 17:16 cycles-per-refresh
59 * 15:8 reserved 59 * 15:8 reserved
60 * 7:0 x4 mode enable 7 - 0 60 * 7:0 x4 mode enable 7 - 0
61 */ 61 */
62 62
63 #define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b) 63 #define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b)
64 * 64 *
65 * 31:23 chip-select base 65 * 31:23 chip-select base
66 * 22:16 reserved 66 * 22:16 reserved
67 * 15:7 chip-select mask 67 * 15:7 chip-select mask
68 * 6:3 reserved 68 * 6:3 reserved
69 * 2:1 address mode 69 * 2:1 address mode
70 * 0 chip-select enable 70 * 0 chip-select enable
71 */ 71 */
72 72
73 struct amd76x_error_info { 73 struct amd76x_error_info {
74 u32 ecc_mode_status; 74 u32 ecc_mode_status;
75 }; 75 };
76 76
77 enum amd76x_chips { 77 enum amd76x_chips {
78 AMD761 = 0, 78 AMD761 = 0,
79 AMD762 79 AMD762
80 }; 80 };
81 81
82 struct amd76x_dev_info { 82 struct amd76x_dev_info {
83 const char *ctl_name; 83 const char *ctl_name;
84 }; 84 };
85 85
86 static const struct amd76x_dev_info amd76x_devs[] = { 86 static const struct amd76x_dev_info amd76x_devs[] = {
87 [AMD761] = { 87 [AMD761] = {
88 .ctl_name = "AMD761"}, 88 .ctl_name = "AMD761"},
89 [AMD762] = { 89 [AMD762] = {
90 .ctl_name = "AMD762"}, 90 .ctl_name = "AMD762"},
91 }; 91 };
92 92
93 static struct edac_pci_ctl_info *amd76x_pci; 93 static struct edac_pci_ctl_info *amd76x_pci;
94 94
95 /** 95 /**
96 * amd76x_get_error_info - fetch error information 96 * amd76x_get_error_info - fetch error information
97 * @mci: Memory controller 97 * @mci: Memory controller
98 * @info: Info to fill in 98 * @info: Info to fill in
99 * 99 *
100 * Fetch and store the AMD76x ECC status. Clear pending status 100 * Fetch and store the AMD76x ECC status. Clear pending status
101 * on the chip so that further errors will be reported 101 * on the chip so that further errors will be reported
102 */ 102 */
103 static void amd76x_get_error_info(struct mem_ctl_info *mci, 103 static void amd76x_get_error_info(struct mem_ctl_info *mci,
104 struct amd76x_error_info *info) 104 struct amd76x_error_info *info)
105 { 105 {
106 struct pci_dev *pdev; 106 struct pci_dev *pdev;
107 107
108 pdev = to_pci_dev(mci->pdev); 108 pdev = to_pci_dev(mci->pdev);
109 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, 109 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS,
110 &info->ecc_mode_status); 110 &info->ecc_mode_status);
111 111
112 if (info->ecc_mode_status & BIT(8)) 112 if (info->ecc_mode_status & BIT(8))
113 pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS, 113 pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS,
114 (u32) BIT(8), (u32) BIT(8)); 114 (u32) BIT(8), (u32) BIT(8));
115 115
116 if (info->ecc_mode_status & BIT(9)) 116 if (info->ecc_mode_status & BIT(9))
117 pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS, 117 pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS,
118 (u32) BIT(9), (u32) BIT(9)); 118 (u32) BIT(9), (u32) BIT(9));
119 } 119 }
120 120
121 /** 121 /**
122 * amd76x_process_error_info - Error check 122 * amd76x_process_error_info - Error check
123 * @mci: Memory controller 123 * @mci: Memory controller
124 * @info: Previously fetched information from chip 124 * @info: Previously fetched information from chip
125 * @handle_errors: 1 if we should do recovery 125 * @handle_errors: 1 if we should do recovery
126 * 126 *
127 * Process the chip state and decide if an error has occurred. 127 * Process the chip state and decide if an error has occurred.
128 * A return of 1 indicates an error. Also if handle_errors is true 128 * A return of 1 indicates an error. Also if handle_errors is true
129 * then attempt to handle and clean up after the error 129 * then attempt to handle and clean up after the error
130 */ 130 */
131 static int amd76x_process_error_info(struct mem_ctl_info *mci, 131 static int amd76x_process_error_info(struct mem_ctl_info *mci,
132 struct amd76x_error_info *info, 132 struct amd76x_error_info *info,
133 int handle_errors) 133 int handle_errors)
134 { 134 {
135 int error_found; 135 int error_found;
136 u32 row; 136 u32 row;
137 137
138 error_found = 0; 138 error_found = 0;
139 139
140 /* 140 /*
141 * Check for an uncorrectable error 141 * Check for an uncorrectable error
142 */ 142 */
143 if (info->ecc_mode_status & BIT(8)) { 143 if (info->ecc_mode_status & BIT(8)) {
144 error_found = 1; 144 error_found = 1;
145 145
146 if (handle_errors) { 146 if (handle_errors) {
147 row = (info->ecc_mode_status >> 4) & 0xf; 147 row = (info->ecc_mode_status >> 4) & 0xf;
148 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 148 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
149 mci->csrows[row].first_page, 0, 0, 149 mci->csrows[row]->first_page, 0, 0,
150 row, 0, -1, 150 row, 0, -1,
151 mci->ctl_name, "", NULL); 151 mci->ctl_name, "", NULL);
152 } 152 }
153 } 153 }
154 154
155 /* 155 /*
156 * Check for a correctable error 156 * Check for a correctable error
157 */ 157 */
158 if (info->ecc_mode_status & BIT(9)) { 158 if (info->ecc_mode_status & BIT(9)) {
159 error_found = 1; 159 error_found = 1;
160 160
161 if (handle_errors) { 161 if (handle_errors) {
162 row = info->ecc_mode_status & 0xf; 162 row = info->ecc_mode_status & 0xf;
163 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 163 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
164 mci->csrows[row].first_page, 0, 0, 164 mci->csrows[row]->first_page, 0, 0,
165 row, 0, -1, 165 row, 0, -1,
166 mci->ctl_name, "", NULL); 166 mci->ctl_name, "", NULL);
167 } 167 }
168 } 168 }
169 169
170 return error_found; 170 return error_found;
171 } 171 }
172 172
173 /** 173 /**
174 * amd76x_check - Poll the controller 174 * amd76x_check - Poll the controller
175 * @mci: Memory controller 175 * @mci: Memory controller
176 * 176 *
177 * Called by the poll handlers this function reads the status 177 * Called by the poll handlers this function reads the status
178 * from the controller and checks for errors. 178 * from the controller and checks for errors.
179 */ 179 */
180 static void amd76x_check(struct mem_ctl_info *mci) 180 static void amd76x_check(struct mem_ctl_info *mci)
181 { 181 {
182 struct amd76x_error_info info; 182 struct amd76x_error_info info;
183 debugf3("%s()\n", __func__); 183 debugf3("%s()\n", __func__);
184 amd76x_get_error_info(mci, &info); 184 amd76x_get_error_info(mci, &info);
185 amd76x_process_error_info(mci, &info, 1); 185 amd76x_process_error_info(mci, &info, 1);
186 } 186 }
187 187
188 static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, 188 static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
189 enum edac_type edac_mode) 189 enum edac_type edac_mode)
190 { 190 {
191 struct csrow_info *csrow; 191 struct csrow_info *csrow;
192 struct dimm_info *dimm; 192 struct dimm_info *dimm;
193 u32 mba, mba_base, mba_mask, dms; 193 u32 mba, mba_base, mba_mask, dms;
194 int index; 194 int index;
195 195
196 for (index = 0; index < mci->nr_csrows; index++) { 196 for (index = 0; index < mci->nr_csrows; index++) {
197 csrow = &mci->csrows[index]; 197 csrow = mci->csrows[index];
198 dimm = csrow->channels[0].dimm; 198 dimm = csrow->channels[0]->dimm;
199 199
200 /* find the DRAM Chip Select Base address and mask */ 200 /* find the DRAM Chip Select Base address and mask */
201 pci_read_config_dword(pdev, 201 pci_read_config_dword(pdev,
202 AMD76X_MEM_BASE_ADDR + (index * 4), &mba); 202 AMD76X_MEM_BASE_ADDR + (index * 4), &mba);
203 203
204 if (!(mba & BIT(0))) 204 if (!(mba & BIT(0)))
205 continue; 205 continue;
206 206
207 mba_base = mba & 0xff800000UL; 207 mba_base = mba & 0xff800000UL;
208 mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL; 208 mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
209 pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms); 209 pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms);
210 csrow->first_page = mba_base >> PAGE_SHIFT; 210 csrow->first_page = mba_base >> PAGE_SHIFT;
211 dimm->nr_pages = (mba_mask + 1) >> PAGE_SHIFT; 211 dimm->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
212 csrow->last_page = csrow->first_page + dimm->nr_pages - 1; 212 csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
213 csrow->page_mask = mba_mask >> PAGE_SHIFT; 213 csrow->page_mask = mba_mask >> PAGE_SHIFT;
214 dimm->grain = dimm->nr_pages << PAGE_SHIFT; 214 dimm->grain = dimm->nr_pages << PAGE_SHIFT;
215 dimm->mtype = MEM_RDDR; 215 dimm->mtype = MEM_RDDR;
216 dimm->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN; 216 dimm->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
217 dimm->edac_mode = edac_mode; 217 dimm->edac_mode = edac_mode;
218 } 218 }
219 } 219 }
220 220
221 /** 221 /**
222 * amd76x_probe1 - Perform set up for detected device 222 * amd76x_probe1 - Perform set up for detected device
223 * @pdev; PCI device detected 223 * @pdev; PCI device detected
224 * @dev_idx: Device type index 224 * @dev_idx: Device type index
225 * 225 *
226 * We have found an AMD76x and now need to set up the memory 226 * We have found an AMD76x and now need to set up the memory
227 * controller status reporting. We configure and set up the 227 * controller status reporting. We configure and set up the
228 * memory controller reporting and claim the device. 228 * memory controller reporting and claim the device.
229 */ 229 */
230 static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) 230 static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
231 { 231 {
232 static const enum edac_type ems_modes[] = { 232 static const enum edac_type ems_modes[] = {
233 EDAC_NONE, 233 EDAC_NONE,
234 EDAC_EC, 234 EDAC_EC,
235 EDAC_SECDED, 235 EDAC_SECDED,
236 EDAC_SECDED 236 EDAC_SECDED
237 }; 237 };
238 struct mem_ctl_info *mci; 238 struct mem_ctl_info *mci;
239 struct edac_mc_layer layers[2]; 239 struct edac_mc_layer layers[2];
240 u32 ems; 240 u32 ems;
241 u32 ems_mode; 241 u32 ems_mode;
242 struct amd76x_error_info discard; 242 struct amd76x_error_info discard;
243 243
244 debugf0("%s()\n", __func__); 244 debugf0("%s()\n", __func__);
245 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems); 245 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
246 ems_mode = (ems >> 10) & 0x3; 246 ems_mode = (ems >> 10) & 0x3;
247 247
248 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 248 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
249 layers[0].size = AMD76X_NR_CSROWS; 249 layers[0].size = AMD76X_NR_CSROWS;
250 layers[0].is_virt_csrow = true; 250 layers[0].is_virt_csrow = true;
251 layers[1].type = EDAC_MC_LAYER_CHANNEL; 251 layers[1].type = EDAC_MC_LAYER_CHANNEL;
252 layers[1].size = 1; 252 layers[1].size = 1;
253 layers[1].is_virt_csrow = false; 253 layers[1].is_virt_csrow = false;
254 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); 254 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
255 255
256 if (mci == NULL) 256 if (mci == NULL)
257 return -ENOMEM; 257 return -ENOMEM;
258 258
259 debugf0("%s(): mci = %p\n", __func__, mci); 259 debugf0("%s(): mci = %p\n", __func__, mci);
260 mci->pdev = &pdev->dev; 260 mci->pdev = &pdev->dev;
261 mci->mtype_cap = MEM_FLAG_RDDR; 261 mci->mtype_cap = MEM_FLAG_RDDR;
262 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 262 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
263 mci->edac_cap = ems_mode ? 263 mci->edac_cap = ems_mode ?
264 (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE; 264 (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
265 mci->mod_name = EDAC_MOD_STR; 265 mci->mod_name = EDAC_MOD_STR;
266 mci->mod_ver = AMD76X_REVISION; 266 mci->mod_ver = AMD76X_REVISION;
267 mci->ctl_name = amd76x_devs[dev_idx].ctl_name; 267 mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
268 mci->dev_name = pci_name(pdev); 268 mci->dev_name = pci_name(pdev);
269 mci->edac_check = amd76x_check; 269 mci->edac_check = amd76x_check;
270 mci->ctl_page_to_phys = NULL; 270 mci->ctl_page_to_phys = NULL;
271 271
272 amd76x_init_csrows(mci, pdev, ems_modes[ems_mode]); 272 amd76x_init_csrows(mci, pdev, ems_modes[ems_mode]);
273 amd76x_get_error_info(mci, &discard); /* clear counters */ 273 amd76x_get_error_info(mci, &discard); /* clear counters */
274 274
275 /* Here we assume that we will never see multiple instances of this 275 /* Here we assume that we will never see multiple instances of this
276 * type of memory controller. The ID is therefore hardcoded to 0. 276 * type of memory controller. The ID is therefore hardcoded to 0.
277 */ 277 */
278 if (edac_mc_add_mc(mci)) { 278 if (edac_mc_add_mc(mci)) {
279 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 279 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
280 goto fail; 280 goto fail;
281 } 281 }
282 282
283 /* allocating generic PCI control info */ 283 /* allocating generic PCI control info */
284 amd76x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); 284 amd76x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
285 if (!amd76x_pci) { 285 if (!amd76x_pci) {
286 printk(KERN_WARNING 286 printk(KERN_WARNING
287 "%s(): Unable to create PCI control\n", 287 "%s(): Unable to create PCI control\n",
288 __func__); 288 __func__);
289 printk(KERN_WARNING 289 printk(KERN_WARNING
290 "%s(): PCI error report via EDAC not setup\n", 290 "%s(): PCI error report via EDAC not setup\n",
291 __func__); 291 __func__);
292 } 292 }
293 293
294 /* get this far and it's successful */ 294 /* get this far and it's successful */
295 debugf3("%s(): success\n", __func__); 295 debugf3("%s(): success\n", __func__);
296 return 0; 296 return 0;
297 297
298 fail: 298 fail:
299 edac_mc_free(mci); 299 edac_mc_free(mci);
300 return -ENODEV; 300 return -ENODEV;
301 } 301 }
302 302
303 /* returns count (>= 0), or negative on error */ 303 /* returns count (>= 0), or negative on error */
304 static int __devinit amd76x_init_one(struct pci_dev *pdev, 304 static int __devinit amd76x_init_one(struct pci_dev *pdev,
305 const struct pci_device_id *ent) 305 const struct pci_device_id *ent)
306 { 306 {
307 debugf0("%s()\n", __func__); 307 debugf0("%s()\n", __func__);
308 308
309 /* don't need to call pci_enable_device() */ 309 /* don't need to call pci_enable_device() */
310 return amd76x_probe1(pdev, ent->driver_data); 310 return amd76x_probe1(pdev, ent->driver_data);
311 } 311 }
312 312
313 /** 313 /**
314 * amd76x_remove_one - driver shutdown 314 * amd76x_remove_one - driver shutdown
315 * @pdev: PCI device being handed back 315 * @pdev: PCI device being handed back
316 * 316 *
317 * Called when the driver is unloaded. Find the matching mci 317 * Called when the driver is unloaded. Find the matching mci
318 * structure for the device then delete the mci and free the 318 * structure for the device then delete the mci and free the
319 * resources. 319 * resources.
320 */ 320 */
321 static void __devexit amd76x_remove_one(struct pci_dev *pdev) 321 static void __devexit amd76x_remove_one(struct pci_dev *pdev)
322 { 322 {
323 struct mem_ctl_info *mci; 323 struct mem_ctl_info *mci;
324 324
325 debugf0("%s()\n", __func__); 325 debugf0("%s()\n", __func__);
326 326
327 if (amd76x_pci) 327 if (amd76x_pci)
328 edac_pci_release_generic_ctl(amd76x_pci); 328 edac_pci_release_generic_ctl(amd76x_pci);
329 329
330 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) 330 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
331 return; 331 return;
332 332
333 edac_mc_free(mci); 333 edac_mc_free(mci);
334 } 334 }
335 335
336 static DEFINE_PCI_DEVICE_TABLE(amd76x_pci_tbl) = { 336 static DEFINE_PCI_DEVICE_TABLE(amd76x_pci_tbl) = {
337 { 337 {
338 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 338 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
339 AMD762}, 339 AMD762},
340 { 340 {
341 PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 341 PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
342 AMD761}, 342 AMD761},
343 { 343 {
344 0, 344 0,
345 } /* 0 terminated list. */ 345 } /* 0 terminated list. */
346 }; 346 };
347 347
348 MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl); 348 MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl);
349 349
350 static struct pci_driver amd76x_driver = { 350 static struct pci_driver amd76x_driver = {
351 .name = EDAC_MOD_STR, 351 .name = EDAC_MOD_STR,
352 .probe = amd76x_init_one, 352 .probe = amd76x_init_one,
353 .remove = __devexit_p(amd76x_remove_one), 353 .remove = __devexit_p(amd76x_remove_one),
354 .id_table = amd76x_pci_tbl, 354 .id_table = amd76x_pci_tbl,
355 }; 355 };
356 356
357 static int __init amd76x_init(void) 357 static int __init amd76x_init(void)
358 { 358 {
359 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 359 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
360 opstate_init(); 360 opstate_init();
361 361
362 return pci_register_driver(&amd76x_driver); 362 return pci_register_driver(&amd76x_driver);
363 } 363 }
364 364
365 static void __exit amd76x_exit(void) 365 static void __exit amd76x_exit(void)
366 { 366 {
367 pci_unregister_driver(&amd76x_driver); 367 pci_unregister_driver(&amd76x_driver);
368 } 368 }
369 369
370 module_init(amd76x_init); 370 module_init(amd76x_init);
371 module_exit(amd76x_exit); 371 module_exit(amd76x_exit);
372 372
373 MODULE_LICENSE("GPL"); 373 MODULE_LICENSE("GPL");
374 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); 374 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
375 MODULE_DESCRIPTION("MC support for AMD 76x memory controllers"); 375 MODULE_DESCRIPTION("MC support for AMD 76x memory controllers");
376 376
377 module_param(edac_op_state, int, 0444); 377 module_param(edac_op_state, int, 0444);
378 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 378 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
379 379
drivers/edac/cell_edac.c
1 /* 1 /*
2 * Cell MIC driver for ECC counting 2 * Cell MIC driver for ECC counting
3 * 3 *
4 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. 4 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
5 * <benh@kernel.crashing.org> 5 * <benh@kernel.crashing.org>
6 * 6 *
7 * This file may be distributed under the terms of the 7 * This file may be distributed under the terms of the
8 * GNU General Public License. 8 * GNU General Public License.
9 */ 9 */
10 #undef DEBUG 10 #undef DEBUG
11 11
12 #include <linux/edac.h> 12 #include <linux/edac.h>
13 #include <linux/module.h> 13 #include <linux/module.h>
14 #include <linux/init.h> 14 #include <linux/init.h>
15 #include <linux/platform_device.h> 15 #include <linux/platform_device.h>
16 #include <linux/stop_machine.h> 16 #include <linux/stop_machine.h>
17 #include <linux/io.h> 17 #include <linux/io.h>
18 #include <asm/machdep.h> 18 #include <asm/machdep.h>
19 #include <asm/cell-regs.h> 19 #include <asm/cell-regs.h>
20 20
21 #include "edac_core.h" 21 #include "edac_core.h"
22 22
23 struct cell_edac_priv 23 struct cell_edac_priv
24 { 24 {
25 struct cbe_mic_tm_regs __iomem *regs; 25 struct cbe_mic_tm_regs __iomem *regs;
26 int node; 26 int node;
27 int chanmask; 27 int chanmask;
28 #ifdef DEBUG 28 #ifdef DEBUG
29 u64 prev_fir; 29 u64 prev_fir;
30 #endif 30 #endif
31 }; 31 };
32 32
33 static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar) 33 static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
34 { 34 {
35 struct cell_edac_priv *priv = mci->pvt_info; 35 struct cell_edac_priv *priv = mci->pvt_info;
36 struct csrow_info *csrow = &mci->csrows[0]; 36 struct csrow_info *csrow = mci->csrows[0];
37 unsigned long address, pfn, offset, syndrome; 37 unsigned long address, pfn, offset, syndrome;
38 38
39 dev_dbg(mci->pdev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n", 39 dev_dbg(mci->pdev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n",
40 priv->node, chan, ar); 40 priv->node, chan, ar);
41 41
42 /* Address decoding is likely a bit bogus, to dbl check */ 42 /* Address decoding is likely a bit bogus, to dbl check */
43 address = (ar & 0xffffffffe0000000ul) >> 29; 43 address = (ar & 0xffffffffe0000000ul) >> 29;
44 if (priv->chanmask == 0x3) 44 if (priv->chanmask == 0x3)
45 address = (address << 1) | chan; 45 address = (address << 1) | chan;
46 pfn = address >> PAGE_SHIFT; 46 pfn = address >> PAGE_SHIFT;
47 offset = address & ~PAGE_MASK; 47 offset = address & ~PAGE_MASK;
48 syndrome = (ar & 0x000000001fe00000ul) >> 21; 48 syndrome = (ar & 0x000000001fe00000ul) >> 21;
49 49
50 /* TODO: Decoding of the error address */ 50 /* TODO: Decoding of the error address */
51 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 51 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
52 csrow->first_page + pfn, offset, syndrome, 52 csrow->first_page + pfn, offset, syndrome,
53 0, chan, -1, "", "", NULL); 53 0, chan, -1, "", "", NULL);
54 } 54 }
55 55
56 static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar) 56 static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
57 { 57 {
58 struct cell_edac_priv *priv = mci->pvt_info; 58 struct cell_edac_priv *priv = mci->pvt_info;
59 struct csrow_info *csrow = &mci->csrows[0]; 59 struct csrow_info *csrow = mci->csrows[0];
60 unsigned long address, pfn, offset; 60 unsigned long address, pfn, offset;
61 61
62 dev_dbg(mci->pdev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n", 62 dev_dbg(mci->pdev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n",
63 priv->node, chan, ar); 63 priv->node, chan, ar);
64 64
65 /* Address decoding is likely a bit bogus, to dbl check */ 65 /* Address decoding is likely a bit bogus, to dbl check */
66 address = (ar & 0xffffffffe0000000ul) >> 29; 66 address = (ar & 0xffffffffe0000000ul) >> 29;
67 if (priv->chanmask == 0x3) 67 if (priv->chanmask == 0x3)
68 address = (address << 1) | chan; 68 address = (address << 1) | chan;
69 pfn = address >> PAGE_SHIFT; 69 pfn = address >> PAGE_SHIFT;
70 offset = address & ~PAGE_MASK; 70 offset = address & ~PAGE_MASK;
71 71
72 /* TODO: Decoding of the error address */ 72 /* TODO: Decoding of the error address */
73 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 73 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
74 csrow->first_page + pfn, offset, 0, 74 csrow->first_page + pfn, offset, 0,
75 0, chan, -1, "", "", NULL); 75 0, chan, -1, "", "", NULL);
76 } 76 }
77 77
78 static void cell_edac_check(struct mem_ctl_info *mci) 78 static void cell_edac_check(struct mem_ctl_info *mci)
79 { 79 {
80 struct cell_edac_priv *priv = mci->pvt_info; 80 struct cell_edac_priv *priv = mci->pvt_info;
81 u64 fir, addreg, clear = 0; 81 u64 fir, addreg, clear = 0;
82 82
83 fir = in_be64(&priv->regs->mic_fir); 83 fir = in_be64(&priv->regs->mic_fir);
84 #ifdef DEBUG 84 #ifdef DEBUG
85 if (fir != priv->prev_fir) { 85 if (fir != priv->prev_fir) {
86 dev_dbg(mci->pdev, "fir change : 0x%016lx\n", fir); 86 dev_dbg(mci->pdev, "fir change : 0x%016lx\n", fir);
87 priv->prev_fir = fir; 87 priv->prev_fir = fir;
88 } 88 }
89 #endif 89 #endif
90 if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_SINGLE_0_ERR)) { 90 if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_SINGLE_0_ERR)) {
91 addreg = in_be64(&priv->regs->mic_df_ecc_address_0); 91 addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
92 clear |= CBE_MIC_FIR_ECC_SINGLE_0_RESET; 92 clear |= CBE_MIC_FIR_ECC_SINGLE_0_RESET;
93 cell_edac_count_ce(mci, 0, addreg); 93 cell_edac_count_ce(mci, 0, addreg);
94 } 94 }
95 if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_SINGLE_1_ERR)) { 95 if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_SINGLE_1_ERR)) {
96 addreg = in_be64(&priv->regs->mic_df_ecc_address_1); 96 addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
97 clear |= CBE_MIC_FIR_ECC_SINGLE_1_RESET; 97 clear |= CBE_MIC_FIR_ECC_SINGLE_1_RESET;
98 cell_edac_count_ce(mci, 1, addreg); 98 cell_edac_count_ce(mci, 1, addreg);
99 } 99 }
100 if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_MULTI_0_ERR)) { 100 if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_MULTI_0_ERR)) {
101 addreg = in_be64(&priv->regs->mic_df_ecc_address_0); 101 addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
102 clear |= CBE_MIC_FIR_ECC_MULTI_0_RESET; 102 clear |= CBE_MIC_FIR_ECC_MULTI_0_RESET;
103 cell_edac_count_ue(mci, 0, addreg); 103 cell_edac_count_ue(mci, 0, addreg);
104 } 104 }
105 if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_MULTI_1_ERR)) { 105 if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_MULTI_1_ERR)) {
106 addreg = in_be64(&priv->regs->mic_df_ecc_address_1); 106 addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
107 clear |= CBE_MIC_FIR_ECC_MULTI_1_RESET; 107 clear |= CBE_MIC_FIR_ECC_MULTI_1_RESET;
108 cell_edac_count_ue(mci, 1, addreg); 108 cell_edac_count_ue(mci, 1, addreg);
109 } 109 }
110 110
111 /* The procedure for clearing FIR bits is a bit ... weird */ 111 /* The procedure for clearing FIR bits is a bit ... weird */
112 if (clear) { 112 if (clear) {
113 fir &= ~(CBE_MIC_FIR_ECC_ERR_MASK | CBE_MIC_FIR_ECC_SET_MASK); 113 fir &= ~(CBE_MIC_FIR_ECC_ERR_MASK | CBE_MIC_FIR_ECC_SET_MASK);
114 fir |= CBE_MIC_FIR_ECC_RESET_MASK; 114 fir |= CBE_MIC_FIR_ECC_RESET_MASK;
115 fir &= ~clear; 115 fir &= ~clear;
116 out_be64(&priv->regs->mic_fir, fir); 116 out_be64(&priv->regs->mic_fir, fir);
117 (void)in_be64(&priv->regs->mic_fir); 117 (void)in_be64(&priv->regs->mic_fir);
118 118
119 mb(); /* sync up */ 119 mb(); /* sync up */
120 #ifdef DEBUG 120 #ifdef DEBUG
121 fir = in_be64(&priv->regs->mic_fir); 121 fir = in_be64(&priv->regs->mic_fir);
122 dev_dbg(mci->pdev, "fir clear : 0x%016lx\n", fir); 122 dev_dbg(mci->pdev, "fir clear : 0x%016lx\n", fir);
123 #endif 123 #endif
124 } 124 }
125 } 125 }
126 126
127 static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci) 127 static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
128 { 128 {
129 struct csrow_info *csrow = &mci->csrows[0]; 129 struct csrow_info *csrow = mci->csrows[0];
130 struct dimm_info *dimm; 130 struct dimm_info *dimm;
131 struct cell_edac_priv *priv = mci->pvt_info; 131 struct cell_edac_priv *priv = mci->pvt_info;
132 struct device_node *np; 132 struct device_node *np;
133 int j; 133 int j;
134 u32 nr_pages; 134 u32 nr_pages;
135 135
136 for (np = NULL; 136 for (np = NULL;
137 (np = of_find_node_by_name(np, "memory")) != NULL;) { 137 (np = of_find_node_by_name(np, "memory")) != NULL;) {
138 struct resource r; 138 struct resource r;
139 139
140 /* We "know" that the Cell firmware only creates one entry 140 /* We "know" that the Cell firmware only creates one entry
141 * in the "memory" nodes. If that changes, this code will 141 * in the "memory" nodes. If that changes, this code will
142 * need to be adapted. 142 * need to be adapted.
143 */ 143 */
144 if (of_address_to_resource(np, 0, &r)) 144 if (of_address_to_resource(np, 0, &r))
145 continue; 145 continue;
146 if (of_node_to_nid(np) != priv->node) 146 if (of_node_to_nid(np) != priv->node)
147 continue; 147 continue;
148 csrow->first_page = r.start >> PAGE_SHIFT; 148 csrow->first_page = r.start >> PAGE_SHIFT;
149 nr_pages = resource_size(&r) >> PAGE_SHIFT; 149 nr_pages = resource_size(&r) >> PAGE_SHIFT;
150 csrow->last_page = csrow->first_page + nr_pages - 1; 150 csrow->last_page = csrow->first_page + nr_pages - 1;
151 151
152 for (j = 0; j < csrow->nr_channels; j++) { 152 for (j = 0; j < csrow->nr_channels; j++) {
153 dimm = csrow->channels[j].dimm; 153 dimm = csrow->channels[j]->dimm;
154 dimm->mtype = MEM_XDR; 154 dimm->mtype = MEM_XDR;
155 dimm->edac_mode = EDAC_SECDED; 155 dimm->edac_mode = EDAC_SECDED;
156 dimm->nr_pages = nr_pages / csrow->nr_channels; 156 dimm->nr_pages = nr_pages / csrow->nr_channels;
157 } 157 }
158 dev_dbg(mci->pdev, 158 dev_dbg(mci->pdev,
159 "Initialized on node %d, chanmask=0x%x," 159 "Initialized on node %d, chanmask=0x%x,"
160 " first_page=0x%lx, nr_pages=0x%x\n", 160 " first_page=0x%lx, nr_pages=0x%x\n",
161 priv->node, priv->chanmask, 161 priv->node, priv->chanmask,
162 csrow->first_page, nr_pages); 162 csrow->first_page, nr_pages);
163 break; 163 break;
164 } 164 }
165 } 165 }
166 166
167 static int __devinit cell_edac_probe(struct platform_device *pdev) 167 static int __devinit cell_edac_probe(struct platform_device *pdev)
168 { 168 {
169 struct cbe_mic_tm_regs __iomem *regs; 169 struct cbe_mic_tm_regs __iomem *regs;
170 struct mem_ctl_info *mci; 170 struct mem_ctl_info *mci;
171 struct edac_mc_layer layers[2]; 171 struct edac_mc_layer layers[2];
172 struct cell_edac_priv *priv; 172 struct cell_edac_priv *priv;
173 u64 reg; 173 u64 reg;
174 int rc, chanmask, num_chans; 174 int rc, chanmask, num_chans;
175 175
176 regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id)); 176 regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
177 if (regs == NULL) 177 if (regs == NULL)
178 return -ENODEV; 178 return -ENODEV;
179 179
180 edac_op_state = EDAC_OPSTATE_POLL; 180 edac_op_state = EDAC_OPSTATE_POLL;
181 181
182 /* Get channel population */ 182 /* Get channel population */
183 reg = in_be64(&regs->mic_mnt_cfg); 183 reg = in_be64(&regs->mic_mnt_cfg);
184 dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016llx\n", reg); 184 dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016llx\n", reg);
185 chanmask = 0; 185 chanmask = 0;
186 if (reg & CBE_MIC_MNT_CFG_CHAN_0_POP) 186 if (reg & CBE_MIC_MNT_CFG_CHAN_0_POP)
187 chanmask |= 0x1; 187 chanmask |= 0x1;
188 if (reg & CBE_MIC_MNT_CFG_CHAN_1_POP) 188 if (reg & CBE_MIC_MNT_CFG_CHAN_1_POP)
189 chanmask |= 0x2; 189 chanmask |= 0x2;
190 if (chanmask == 0) { 190 if (chanmask == 0) {
191 dev_warn(&pdev->dev, 191 dev_warn(&pdev->dev,
192 "Yuck ! No channel populated ? Aborting !\n"); 192 "Yuck ! No channel populated ? Aborting !\n");
193 return -ENODEV; 193 return -ENODEV;
194 } 194 }
195 dev_dbg(&pdev->dev, "Initial FIR = 0x%016llx\n", 195 dev_dbg(&pdev->dev, "Initial FIR = 0x%016llx\n",
196 in_be64(&regs->mic_fir)); 196 in_be64(&regs->mic_fir));
197 197
198 /* Allocate & init EDAC MC data structure */ 198 /* Allocate & init EDAC MC data structure */
199 num_chans = chanmask == 3 ? 2 : 1; 199 num_chans = chanmask == 3 ? 2 : 1;
200 200
201 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 201 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
202 layers[0].size = 1; 202 layers[0].size = 1;
203 layers[0].is_virt_csrow = true; 203 layers[0].is_virt_csrow = true;
204 layers[1].type = EDAC_MC_LAYER_CHANNEL; 204 layers[1].type = EDAC_MC_LAYER_CHANNEL;
205 layers[1].size = num_chans; 205 layers[1].size = num_chans;
206 layers[1].is_virt_csrow = false; 206 layers[1].is_virt_csrow = false;
207 mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers, 207 mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
208 sizeof(struct cell_edac_priv)); 208 sizeof(struct cell_edac_priv));
209 if (mci == NULL) 209 if (mci == NULL)
210 return -ENOMEM; 210 return -ENOMEM;
211 priv = mci->pvt_info; 211 priv = mci->pvt_info;
212 priv->regs = regs; 212 priv->regs = regs;
213 priv->node = pdev->id; 213 priv->node = pdev->id;
214 priv->chanmask = chanmask; 214 priv->chanmask = chanmask;
215 mci->pdev = &pdev->dev; 215 mci->pdev = &pdev->dev;
216 mci->mtype_cap = MEM_FLAG_XDR; 216 mci->mtype_cap = MEM_FLAG_XDR;
217 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 217 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
218 mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED; 218 mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED;
219 mci->mod_name = "cell_edac"; 219 mci->mod_name = "cell_edac";
220 mci->ctl_name = "MIC"; 220 mci->ctl_name = "MIC";
221 mci->dev_name = dev_name(&pdev->dev); 221 mci->dev_name = dev_name(&pdev->dev);
222 mci->edac_check = cell_edac_check; 222 mci->edac_check = cell_edac_check;
223 cell_edac_init_csrows(mci); 223 cell_edac_init_csrows(mci);
224 224
225 /* Register with EDAC core */ 225 /* Register with EDAC core */
226 rc = edac_mc_add_mc(mci); 226 rc = edac_mc_add_mc(mci);
227 if (rc) { 227 if (rc) {
228 dev_err(&pdev->dev, "failed to register with EDAC core\n"); 228 dev_err(&pdev->dev, "failed to register with EDAC core\n");
229 edac_mc_free(mci); 229 edac_mc_free(mci);
230 return rc; 230 return rc;
231 } 231 }
232 232
233 return 0; 233 return 0;
234 } 234 }
235 235
236 static int __devexit cell_edac_remove(struct platform_device *pdev) 236 static int __devexit cell_edac_remove(struct platform_device *pdev)
237 { 237 {
238 struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev); 238 struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
239 if (mci) 239 if (mci)
240 edac_mc_free(mci); 240 edac_mc_free(mci);
241 return 0; 241 return 0;
242 } 242 }
243 243
244 static struct platform_driver cell_edac_driver = { 244 static struct platform_driver cell_edac_driver = {
245 .driver = { 245 .driver = {
246 .name = "cbe-mic", 246 .name = "cbe-mic",
247 .owner = THIS_MODULE, 247 .owner = THIS_MODULE,
248 }, 248 },
249 .probe = cell_edac_probe, 249 .probe = cell_edac_probe,
250 .remove = __devexit_p(cell_edac_remove), 250 .remove = __devexit_p(cell_edac_remove),
251 }; 251 };
252 252
253 static int __init cell_edac_init(void) 253 static int __init cell_edac_init(void)
254 { 254 {
255 /* Sanity check registers data structure */ 255 /* Sanity check registers data structure */
256 BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs, 256 BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
257 mic_df_ecc_address_0) != 0xf8); 257 mic_df_ecc_address_0) != 0xf8);
258 BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs, 258 BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
259 mic_df_ecc_address_1) != 0x1b8); 259 mic_df_ecc_address_1) != 0x1b8);
260 BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs, 260 BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
261 mic_df_config) != 0x218); 261 mic_df_config) != 0x218);
262 BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs, 262 BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
263 mic_fir) != 0x230); 263 mic_fir) != 0x230);
264 BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs, 264 BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
265 mic_mnt_cfg) != 0x210); 265 mic_mnt_cfg) != 0x210);
266 BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs, 266 BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
267 mic_exc) != 0x208); 267 mic_exc) != 0x208);
268 268
269 return platform_driver_register(&cell_edac_driver); 269 return platform_driver_register(&cell_edac_driver);
270 } 270 }
271 271
272 static void __exit cell_edac_exit(void) 272 static void __exit cell_edac_exit(void)
273 { 273 {
274 platform_driver_unregister(&cell_edac_driver); 274 platform_driver_unregister(&cell_edac_driver);
275 } 275 }
276 276
277 module_init(cell_edac_init); 277 module_init(cell_edac_init);
278 module_exit(cell_edac_exit); 278 module_exit(cell_edac_exit);
279 279
280 MODULE_LICENSE("GPL"); 280 MODULE_LICENSE("GPL");
281 MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>"); 281 MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
282 MODULE_DESCRIPTION("ECC counting for Cell MIC"); 282 MODULE_DESCRIPTION("ECC counting for Cell MIC");
283 283
drivers/edac/cpc925_edac.c
1 /* 1 /*
2 * cpc925_edac.c, EDAC driver for IBM CPC925 Bridge and Memory Controller. 2 * cpc925_edac.c, EDAC driver for IBM CPC925 Bridge and Memory Controller.
3 * 3 *
4 * Copyright (c) 2008 Wind River Systems, Inc. 4 * Copyright (c) 2008 Wind River Systems, Inc.
5 * 5 *
6 * Authors: Cao Qingtao <qingtao.cao@windriver.com> 6 * Authors: Cao Qingtao <qingtao.cao@windriver.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 * 11 *
12 * This program is distributed in the hope that it will be useful, 12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 * See the GNU General Public License for more details. 15 * See the GNU General Public License for more details.
16 * 16 *
17 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software 18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21 21
22 #include <linux/module.h> 22 #include <linux/module.h>
23 #include <linux/init.h> 23 #include <linux/init.h>
24 #include <linux/io.h> 24 #include <linux/io.h>
25 #include <linux/edac.h> 25 #include <linux/edac.h>
26 #include <linux/of.h> 26 #include <linux/of.h>
27 #include <linux/platform_device.h> 27 #include <linux/platform_device.h>
28 #include <linux/gfp.h> 28 #include <linux/gfp.h>
29 29
30 #include "edac_core.h" 30 #include "edac_core.h"
31 #include "edac_module.h" 31 #include "edac_module.h"
32 32
33 #define CPC925_EDAC_REVISION " Ver: 1.0.0" 33 #define CPC925_EDAC_REVISION " Ver: 1.0.0"
34 #define CPC925_EDAC_MOD_STR "cpc925_edac" 34 #define CPC925_EDAC_MOD_STR "cpc925_edac"
35 35
36 #define cpc925_printk(level, fmt, arg...) \ 36 #define cpc925_printk(level, fmt, arg...) \
37 edac_printk(level, "CPC925", fmt, ##arg) 37 edac_printk(level, "CPC925", fmt, ##arg)
38 38
39 #define cpc925_mc_printk(mci, level, fmt, arg...) \ 39 #define cpc925_mc_printk(mci, level, fmt, arg...) \
40 edac_mc_chipset_printk(mci, level, "CPC925", fmt, ##arg) 40 edac_mc_chipset_printk(mci, level, "CPC925", fmt, ##arg)
41 41
42 /* 42 /*
43 * CPC925 registers are of 32 bits with bit0 defined at the 43 * CPC925 registers are of 32 bits with bit0 defined at the
44 * most significant bit and bit31 at that of least significant. 44 * most significant bit and bit31 at that of least significant.
45 */ 45 */
46 #define CPC925_BITS_PER_REG 32 46 #define CPC925_BITS_PER_REG 32
47 #define CPC925_BIT(nr) (1UL << (CPC925_BITS_PER_REG - 1 - nr)) 47 #define CPC925_BIT(nr) (1UL << (CPC925_BITS_PER_REG - 1 - nr))
48 48
49 /* 49 /*
50 * EDAC device names for the error detections of 50 * EDAC device names for the error detections of
51 * CPU Interface and Hypertransport Link. 51 * CPU Interface and Hypertransport Link.
52 */ 52 */
53 #define CPC925_CPU_ERR_DEV "cpu" 53 #define CPC925_CPU_ERR_DEV "cpu"
54 #define CPC925_HT_LINK_DEV "htlink" 54 #define CPC925_HT_LINK_DEV "htlink"
55 55
56 /* Suppose DDR Refresh cycle is 15.6 microsecond */ 56 /* Suppose DDR Refresh cycle is 15.6 microsecond */
57 #define CPC925_REF_FREQ 0xFA69 57 #define CPC925_REF_FREQ 0xFA69
58 #define CPC925_SCRUB_BLOCK_SIZE 64 /* bytes */ 58 #define CPC925_SCRUB_BLOCK_SIZE 64 /* bytes */
59 #define CPC925_NR_CSROWS 8 59 #define CPC925_NR_CSROWS 8
60 60
61 /* 61 /*
62 * All registers and bits definitions are taken from 62 * All registers and bits definitions are taken from
63 * "CPC925 Bridge and Memory Controller User Manual, SA14-2761-02". 63 * "CPC925 Bridge and Memory Controller User Manual, SA14-2761-02".
64 */ 64 */
65 65
66 /* 66 /*
67 * CPU and Memory Controller Registers 67 * CPU and Memory Controller Registers
68 */ 68 */
69 /************************************************************ 69 /************************************************************
70 * Processor Interface Exception Mask Register (APIMASK) 70 * Processor Interface Exception Mask Register (APIMASK)
71 ************************************************************/ 71 ************************************************************/
72 #define REG_APIMASK_OFFSET 0x30070 72 #define REG_APIMASK_OFFSET 0x30070
73 enum apimask_bits { 73 enum apimask_bits {
74 APIMASK_DART = CPC925_BIT(0), /* DART Exception */ 74 APIMASK_DART = CPC925_BIT(0), /* DART Exception */
75 APIMASK_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */ 75 APIMASK_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */
76 APIMASK_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */ 76 APIMASK_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */
77 APIMASK_STAT = CPC925_BIT(3), /* Status Exception */ 77 APIMASK_STAT = CPC925_BIT(3), /* Status Exception */
78 APIMASK_DERR = CPC925_BIT(4), /* Data Error Exception */ 78 APIMASK_DERR = CPC925_BIT(4), /* Data Error Exception */
79 APIMASK_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */ 79 APIMASK_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */
80 APIMASK_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */ 80 APIMASK_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */
81 /* BIT(7) Reserved */ 81 /* BIT(7) Reserved */
82 APIMASK_ECC_UE_H = CPC925_BIT(8), /* UECC upper */ 82 APIMASK_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
83 APIMASK_ECC_CE_H = CPC925_BIT(9), /* CECC upper */ 83 APIMASK_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
84 APIMASK_ECC_UE_L = CPC925_BIT(10), /* UECC lower */ 84 APIMASK_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
85 APIMASK_ECC_CE_L = CPC925_BIT(11), /* CECC lower */ 85 APIMASK_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
86 86
87 CPU_MASK_ENABLE = (APIMASK_DART | APIMASK_ADI0 | APIMASK_ADI1 | 87 CPU_MASK_ENABLE = (APIMASK_DART | APIMASK_ADI0 | APIMASK_ADI1 |
88 APIMASK_STAT | APIMASK_DERR | APIMASK_ADRS0 | 88 APIMASK_STAT | APIMASK_DERR | APIMASK_ADRS0 |
89 APIMASK_ADRS1), 89 APIMASK_ADRS1),
90 ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H | 90 ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H |
91 APIMASK_ECC_UE_L | APIMASK_ECC_CE_L), 91 APIMASK_ECC_UE_L | APIMASK_ECC_CE_L),
92 }; 92 };
93 #define APIMASK_ADI(n) CPC925_BIT(((n)+1)) 93 #define APIMASK_ADI(n) CPC925_BIT(((n)+1))
94 94
95 /************************************************************ 95 /************************************************************
96 * Processor Interface Exception Register (APIEXCP) 96 * Processor Interface Exception Register (APIEXCP)
97 ************************************************************/ 97 ************************************************************/
98 #define REG_APIEXCP_OFFSET 0x30060 98 #define REG_APIEXCP_OFFSET 0x30060
99 enum apiexcp_bits { 99 enum apiexcp_bits {
100 APIEXCP_DART = CPC925_BIT(0), /* DART Exception */ 100 APIEXCP_DART = CPC925_BIT(0), /* DART Exception */
101 APIEXCP_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */ 101 APIEXCP_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */
102 APIEXCP_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */ 102 APIEXCP_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */
103 APIEXCP_STAT = CPC925_BIT(3), /* Status Exception */ 103 APIEXCP_STAT = CPC925_BIT(3), /* Status Exception */
104 APIEXCP_DERR = CPC925_BIT(4), /* Data Error Exception */ 104 APIEXCP_DERR = CPC925_BIT(4), /* Data Error Exception */
105 APIEXCP_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */ 105 APIEXCP_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */
106 APIEXCP_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */ 106 APIEXCP_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */
107 /* BIT(7) Reserved */ 107 /* BIT(7) Reserved */
108 APIEXCP_ECC_UE_H = CPC925_BIT(8), /* UECC upper */ 108 APIEXCP_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
109 APIEXCP_ECC_CE_H = CPC925_BIT(9), /* CECC upper */ 109 APIEXCP_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
110 APIEXCP_ECC_UE_L = CPC925_BIT(10), /* UECC lower */ 110 APIEXCP_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
111 APIEXCP_ECC_CE_L = CPC925_BIT(11), /* CECC lower */ 111 APIEXCP_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
112 112
113 CPU_EXCP_DETECTED = (APIEXCP_DART | APIEXCP_ADI0 | APIEXCP_ADI1 | 113 CPU_EXCP_DETECTED = (APIEXCP_DART | APIEXCP_ADI0 | APIEXCP_ADI1 |
114 APIEXCP_STAT | APIEXCP_DERR | APIEXCP_ADRS0 | 114 APIEXCP_STAT | APIEXCP_DERR | APIEXCP_ADRS0 |
115 APIEXCP_ADRS1), 115 APIEXCP_ADRS1),
116 UECC_EXCP_DETECTED = (APIEXCP_ECC_UE_H | APIEXCP_ECC_UE_L), 116 UECC_EXCP_DETECTED = (APIEXCP_ECC_UE_H | APIEXCP_ECC_UE_L),
117 CECC_EXCP_DETECTED = (APIEXCP_ECC_CE_H | APIEXCP_ECC_CE_L), 117 CECC_EXCP_DETECTED = (APIEXCP_ECC_CE_H | APIEXCP_ECC_CE_L),
118 ECC_EXCP_DETECTED = (UECC_EXCP_DETECTED | CECC_EXCP_DETECTED), 118 ECC_EXCP_DETECTED = (UECC_EXCP_DETECTED | CECC_EXCP_DETECTED),
119 }; 119 };
120 120
121 /************************************************************ 121 /************************************************************
122 * Memory Bus Configuration Register (MBCR) 122 * Memory Bus Configuration Register (MBCR)
123 ************************************************************/ 123 ************************************************************/
124 #define REG_MBCR_OFFSET 0x2190 124 #define REG_MBCR_OFFSET 0x2190
125 #define MBCR_64BITCFG_SHIFT 23 125 #define MBCR_64BITCFG_SHIFT 23
126 #define MBCR_64BITCFG_MASK (1UL << MBCR_64BITCFG_SHIFT) 126 #define MBCR_64BITCFG_MASK (1UL << MBCR_64BITCFG_SHIFT)
127 #define MBCR_64BITBUS_SHIFT 22 127 #define MBCR_64BITBUS_SHIFT 22
128 #define MBCR_64BITBUS_MASK (1UL << MBCR_64BITBUS_SHIFT) 128 #define MBCR_64BITBUS_MASK (1UL << MBCR_64BITBUS_SHIFT)
129 129
130 /************************************************************ 130 /************************************************************
131 * Memory Bank Mode Register (MBMR) 131 * Memory Bank Mode Register (MBMR)
132 ************************************************************/ 132 ************************************************************/
133 #define REG_MBMR_OFFSET 0x21C0 133 #define REG_MBMR_OFFSET 0x21C0
134 #define MBMR_MODE_MAX_VALUE 0xF 134 #define MBMR_MODE_MAX_VALUE 0xF
135 #define MBMR_MODE_SHIFT 25 135 #define MBMR_MODE_SHIFT 25
136 #define MBMR_MODE_MASK (MBMR_MODE_MAX_VALUE << MBMR_MODE_SHIFT) 136 #define MBMR_MODE_MASK (MBMR_MODE_MAX_VALUE << MBMR_MODE_SHIFT)
137 #define MBMR_BBA_SHIFT 24 137 #define MBMR_BBA_SHIFT 24
138 #define MBMR_BBA_MASK (1UL << MBMR_BBA_SHIFT) 138 #define MBMR_BBA_MASK (1UL << MBMR_BBA_SHIFT)
139 139
140 /************************************************************ 140 /************************************************************
141 * Memory Bank Boundary Address Register (MBBAR) 141 * Memory Bank Boundary Address Register (MBBAR)
142 ************************************************************/ 142 ************************************************************/
143 #define REG_MBBAR_OFFSET 0x21D0 143 #define REG_MBBAR_OFFSET 0x21D0
144 #define MBBAR_BBA_MAX_VALUE 0xFF 144 #define MBBAR_BBA_MAX_VALUE 0xFF
145 #define MBBAR_BBA_SHIFT 24 145 #define MBBAR_BBA_SHIFT 24
146 #define MBBAR_BBA_MASK (MBBAR_BBA_MAX_VALUE << MBBAR_BBA_SHIFT) 146 #define MBBAR_BBA_MASK (MBBAR_BBA_MAX_VALUE << MBBAR_BBA_SHIFT)
147 147
148 /************************************************************ 148 /************************************************************
149 * Memory Scrub Control Register (MSCR) 149 * Memory Scrub Control Register (MSCR)
150 ************************************************************/ 150 ************************************************************/
151 #define REG_MSCR_OFFSET 0x2400 151 #define REG_MSCR_OFFSET 0x2400
152 #define MSCR_SCRUB_MOD_MASK 0xC0000000 /* scrub_mod - bit0:1*/ 152 #define MSCR_SCRUB_MOD_MASK 0xC0000000 /* scrub_mod - bit0:1*/
153 #define MSCR_BACKGR_SCRUB 0x40000000 /* 01 */ 153 #define MSCR_BACKGR_SCRUB 0x40000000 /* 01 */
154 #define MSCR_SI_SHIFT 16 /* si - bit8:15*/ 154 #define MSCR_SI_SHIFT 16 /* si - bit8:15*/
155 #define MSCR_SI_MAX_VALUE 0xFF 155 #define MSCR_SI_MAX_VALUE 0xFF
156 #define MSCR_SI_MASK (MSCR_SI_MAX_VALUE << MSCR_SI_SHIFT) 156 #define MSCR_SI_MASK (MSCR_SI_MAX_VALUE << MSCR_SI_SHIFT)
157 157
158 /************************************************************ 158 /************************************************************
159 * Memory Scrub Range Start Register (MSRSR) 159 * Memory Scrub Range Start Register (MSRSR)
160 ************************************************************/ 160 ************************************************************/
161 #define REG_MSRSR_OFFSET 0x2410 161 #define REG_MSRSR_OFFSET 0x2410
162 162
163 /************************************************************ 163 /************************************************************
164 * Memory Scrub Range End Register (MSRER) 164 * Memory Scrub Range End Register (MSRER)
165 ************************************************************/ 165 ************************************************************/
166 #define REG_MSRER_OFFSET 0x2420 166 #define REG_MSRER_OFFSET 0x2420
167 167
168 /************************************************************ 168 /************************************************************
169 * Memory Scrub Pattern Register (MSPR) 169 * Memory Scrub Pattern Register (MSPR)
170 ************************************************************/ 170 ************************************************************/
171 #define REG_MSPR_OFFSET 0x2430 171 #define REG_MSPR_OFFSET 0x2430
172 172
173 /************************************************************ 173 /************************************************************
174 * Memory Check Control Register (MCCR) 174 * Memory Check Control Register (MCCR)
175 ************************************************************/ 175 ************************************************************/
176 #define REG_MCCR_OFFSET 0x2440 176 #define REG_MCCR_OFFSET 0x2440
177 enum mccr_bits { 177 enum mccr_bits {
178 MCCR_ECC_EN = CPC925_BIT(0), /* ECC high and low check */ 178 MCCR_ECC_EN = CPC925_BIT(0), /* ECC high and low check */
179 }; 179 };
180 180
181 /************************************************************ 181 /************************************************************
182 * Memory Check Range End Register (MCRER) 182 * Memory Check Range End Register (MCRER)
183 ************************************************************/ 183 ************************************************************/
184 #define REG_MCRER_OFFSET 0x2450 184 #define REG_MCRER_OFFSET 0x2450
185 185
186 /************************************************************ 186 /************************************************************
187 * Memory Error Address Register (MEAR) 187 * Memory Error Address Register (MEAR)
188 ************************************************************/ 188 ************************************************************/
189 #define REG_MEAR_OFFSET 0x2460 189 #define REG_MEAR_OFFSET 0x2460
190 #define MEAR_BCNT_MAX_VALUE 0x3 190 #define MEAR_BCNT_MAX_VALUE 0x3
191 #define MEAR_BCNT_SHIFT 30 191 #define MEAR_BCNT_SHIFT 30
192 #define MEAR_BCNT_MASK (MEAR_BCNT_MAX_VALUE << MEAR_BCNT_SHIFT) 192 #define MEAR_BCNT_MASK (MEAR_BCNT_MAX_VALUE << MEAR_BCNT_SHIFT)
193 #define MEAR_RANK_MAX_VALUE 0x7 193 #define MEAR_RANK_MAX_VALUE 0x7
194 #define MEAR_RANK_SHIFT 27 194 #define MEAR_RANK_SHIFT 27
195 #define MEAR_RANK_MASK (MEAR_RANK_MAX_VALUE << MEAR_RANK_SHIFT) 195 #define MEAR_RANK_MASK (MEAR_RANK_MAX_VALUE << MEAR_RANK_SHIFT)
196 #define MEAR_COL_MAX_VALUE 0x7FF 196 #define MEAR_COL_MAX_VALUE 0x7FF
197 #define MEAR_COL_SHIFT 16 197 #define MEAR_COL_SHIFT 16
198 #define MEAR_COL_MASK (MEAR_COL_MAX_VALUE << MEAR_COL_SHIFT) 198 #define MEAR_COL_MASK (MEAR_COL_MAX_VALUE << MEAR_COL_SHIFT)
199 #define MEAR_BANK_MAX_VALUE 0x3 199 #define MEAR_BANK_MAX_VALUE 0x3
200 #define MEAR_BANK_SHIFT 14 200 #define MEAR_BANK_SHIFT 14
201 #define MEAR_BANK_MASK (MEAR_BANK_MAX_VALUE << MEAR_BANK_SHIFT) 201 #define MEAR_BANK_MASK (MEAR_BANK_MAX_VALUE << MEAR_BANK_SHIFT)
202 #define MEAR_ROW_MASK 0x00003FFF 202 #define MEAR_ROW_MASK 0x00003FFF
203 203
204 /************************************************************ 204 /************************************************************
205 * Memory Error Syndrome Register (MESR) 205 * Memory Error Syndrome Register (MESR)
206 ************************************************************/ 206 ************************************************************/
207 #define REG_MESR_OFFSET 0x2470 207 #define REG_MESR_OFFSET 0x2470
208 #define MESR_ECC_SYN_H_MASK 0xFF00 208 #define MESR_ECC_SYN_H_MASK 0xFF00
209 #define MESR_ECC_SYN_L_MASK 0x00FF 209 #define MESR_ECC_SYN_L_MASK 0x00FF
210 210
211 /************************************************************ 211 /************************************************************
212 * Memory Mode Control Register (MMCR) 212 * Memory Mode Control Register (MMCR)
213 ************************************************************/ 213 ************************************************************/
214 #define REG_MMCR_OFFSET 0x2500 214 #define REG_MMCR_OFFSET 0x2500
215 enum mmcr_bits { 215 enum mmcr_bits {
216 MMCR_REG_DIMM_MODE = CPC925_BIT(3), 216 MMCR_REG_DIMM_MODE = CPC925_BIT(3),
217 }; 217 };
218 218
219 /* 219 /*
220 * HyperTransport Link Registers 220 * HyperTransport Link Registers
221 */ 221 */
222 /************************************************************ 222 /************************************************************
223 * Error Handling/Enumeration Scratch Pad Register (ERRCTRL) 223 * Error Handling/Enumeration Scratch Pad Register (ERRCTRL)
224 ************************************************************/ 224 ************************************************************/
225 #define REG_ERRCTRL_OFFSET 0x70140 225 #define REG_ERRCTRL_OFFSET 0x70140
226 enum errctrl_bits { /* nonfatal interrupts for */ 226 enum errctrl_bits { /* nonfatal interrupts for */
227 ERRCTRL_SERR_NF = CPC925_BIT(0), /* system error */ 227 ERRCTRL_SERR_NF = CPC925_BIT(0), /* system error */
228 ERRCTRL_CRC_NF = CPC925_BIT(1), /* CRC error */ 228 ERRCTRL_CRC_NF = CPC925_BIT(1), /* CRC error */
229 ERRCTRL_RSP_NF = CPC925_BIT(2), /* Response error */ 229 ERRCTRL_RSP_NF = CPC925_BIT(2), /* Response error */
230 ERRCTRL_EOC_NF = CPC925_BIT(3), /* End-Of-Chain error */ 230 ERRCTRL_EOC_NF = CPC925_BIT(3), /* End-Of-Chain error */
231 ERRCTRL_OVF_NF = CPC925_BIT(4), /* Overflow error */ 231 ERRCTRL_OVF_NF = CPC925_BIT(4), /* Overflow error */
232 ERRCTRL_PROT_NF = CPC925_BIT(5), /* Protocol error */ 232 ERRCTRL_PROT_NF = CPC925_BIT(5), /* Protocol error */
233 233
234 ERRCTRL_RSP_ERR = CPC925_BIT(6), /* Response error received */ 234 ERRCTRL_RSP_ERR = CPC925_BIT(6), /* Response error received */
235 ERRCTRL_CHN_FAL = CPC925_BIT(7), /* Sync flooding detected */ 235 ERRCTRL_CHN_FAL = CPC925_BIT(7), /* Sync flooding detected */
236 236
237 HT_ERRCTRL_ENABLE = (ERRCTRL_SERR_NF | ERRCTRL_CRC_NF | 237 HT_ERRCTRL_ENABLE = (ERRCTRL_SERR_NF | ERRCTRL_CRC_NF |
238 ERRCTRL_RSP_NF | ERRCTRL_EOC_NF | 238 ERRCTRL_RSP_NF | ERRCTRL_EOC_NF |
239 ERRCTRL_OVF_NF | ERRCTRL_PROT_NF), 239 ERRCTRL_OVF_NF | ERRCTRL_PROT_NF),
240 HT_ERRCTRL_DETECTED = (ERRCTRL_RSP_ERR | ERRCTRL_CHN_FAL), 240 HT_ERRCTRL_DETECTED = (ERRCTRL_RSP_ERR | ERRCTRL_CHN_FAL),
241 }; 241 };
242 242
243 /************************************************************ 243 /************************************************************
244 * Link Configuration and Link Control Register (LINKCTRL) 244 * Link Configuration and Link Control Register (LINKCTRL)
245 ************************************************************/ 245 ************************************************************/
246 #define REG_LINKCTRL_OFFSET 0x70110 246 #define REG_LINKCTRL_OFFSET 0x70110
247 enum linkctrl_bits { 247 enum linkctrl_bits {
248 LINKCTRL_CRC_ERR = (CPC925_BIT(22) | CPC925_BIT(23)), 248 LINKCTRL_CRC_ERR = (CPC925_BIT(22) | CPC925_BIT(23)),
249 LINKCTRL_LINK_FAIL = CPC925_BIT(27), 249 LINKCTRL_LINK_FAIL = CPC925_BIT(27),
250 250
251 HT_LINKCTRL_DETECTED = (LINKCTRL_CRC_ERR | LINKCTRL_LINK_FAIL), 251 HT_LINKCTRL_DETECTED = (LINKCTRL_CRC_ERR | LINKCTRL_LINK_FAIL),
252 }; 252 };
253 253
254 /************************************************************ 254 /************************************************************
255 * Link FreqCap/Error/Freq/Revision ID Register (LINKERR) 255 * Link FreqCap/Error/Freq/Revision ID Register (LINKERR)
256 ************************************************************/ 256 ************************************************************/
257 #define REG_LINKERR_OFFSET 0x70120 257 #define REG_LINKERR_OFFSET 0x70120
258 enum linkerr_bits { 258 enum linkerr_bits {
259 LINKERR_EOC_ERR = CPC925_BIT(17), /* End-Of-Chain error */ 259 LINKERR_EOC_ERR = CPC925_BIT(17), /* End-Of-Chain error */
260 LINKERR_OVF_ERR = CPC925_BIT(18), /* Receive Buffer Overflow */ 260 LINKERR_OVF_ERR = CPC925_BIT(18), /* Receive Buffer Overflow */
261 LINKERR_PROT_ERR = CPC925_BIT(19), /* Protocol error */ 261 LINKERR_PROT_ERR = CPC925_BIT(19), /* Protocol error */
262 262
263 HT_LINKERR_DETECTED = (LINKERR_EOC_ERR | LINKERR_OVF_ERR | 263 HT_LINKERR_DETECTED = (LINKERR_EOC_ERR | LINKERR_OVF_ERR |
264 LINKERR_PROT_ERR), 264 LINKERR_PROT_ERR),
265 }; 265 };
266 266
267 /************************************************************ 267 /************************************************************
268 * Bridge Control Register (BRGCTRL) 268 * Bridge Control Register (BRGCTRL)
269 ************************************************************/ 269 ************************************************************/
270 #define REG_BRGCTRL_OFFSET 0x70300 270 #define REG_BRGCTRL_OFFSET 0x70300
271 enum brgctrl_bits { 271 enum brgctrl_bits {
272 BRGCTRL_DETSERR = CPC925_BIT(0), /* SERR on Secondary Bus */ 272 BRGCTRL_DETSERR = CPC925_BIT(0), /* SERR on Secondary Bus */
273 BRGCTRL_SECBUSRESET = CPC925_BIT(9), /* Secondary Bus Reset */ 273 BRGCTRL_SECBUSRESET = CPC925_BIT(9), /* Secondary Bus Reset */
274 }; 274 };
275 275
276 /* Private structure for edac memory controller */ 276 /* Private structure for edac memory controller */
277 struct cpc925_mc_pdata { 277 struct cpc925_mc_pdata {
278 void __iomem *vbase; 278 void __iomem *vbase;
279 unsigned long total_mem; 279 unsigned long total_mem;
280 const char *name; 280 const char *name;
281 int edac_idx; 281 int edac_idx;
282 }; 282 };
283 283
284 /* Private structure for common edac device */ 284 /* Private structure for common edac device */
285 struct cpc925_dev_info { 285 struct cpc925_dev_info {
286 void __iomem *vbase; 286 void __iomem *vbase;
287 struct platform_device *pdev; 287 struct platform_device *pdev;
288 char *ctl_name; 288 char *ctl_name;
289 int edac_idx; 289 int edac_idx;
290 struct edac_device_ctl_info *edac_dev; 290 struct edac_device_ctl_info *edac_dev;
291 void (*init)(struct cpc925_dev_info *dev_info); 291 void (*init)(struct cpc925_dev_info *dev_info);
292 void (*exit)(struct cpc925_dev_info *dev_info); 292 void (*exit)(struct cpc925_dev_info *dev_info);
293 void (*check)(struct edac_device_ctl_info *edac_dev); 293 void (*check)(struct edac_device_ctl_info *edac_dev);
294 }; 294 };
295 295
296 /* Get total memory size from Open Firmware DTB */ 296 /* Get total memory size from Open Firmware DTB */
297 static void get_total_mem(struct cpc925_mc_pdata *pdata) 297 static void get_total_mem(struct cpc925_mc_pdata *pdata)
298 { 298 {
299 struct device_node *np = NULL; 299 struct device_node *np = NULL;
300 const unsigned int *reg, *reg_end; 300 const unsigned int *reg, *reg_end;
301 int len, sw, aw; 301 int len, sw, aw;
302 unsigned long start, size; 302 unsigned long start, size;
303 303
304 np = of_find_node_by_type(NULL, "memory"); 304 np = of_find_node_by_type(NULL, "memory");
305 if (!np) 305 if (!np)
306 return; 306 return;
307 307
308 aw = of_n_addr_cells(np); 308 aw = of_n_addr_cells(np);
309 sw = of_n_size_cells(np); 309 sw = of_n_size_cells(np);
310 reg = (const unsigned int *)of_get_property(np, "reg", &len); 310 reg = (const unsigned int *)of_get_property(np, "reg", &len);
311 reg_end = reg + len/4; 311 reg_end = reg + len/4;
312 312
313 pdata->total_mem = 0; 313 pdata->total_mem = 0;
314 do { 314 do {
315 start = of_read_number(reg, aw); 315 start = of_read_number(reg, aw);
316 reg += aw; 316 reg += aw;
317 size = of_read_number(reg, sw); 317 size = of_read_number(reg, sw);
318 reg += sw; 318 reg += sw;
319 debugf1("%s: start 0x%lx, size 0x%lx\n", __func__, 319 debugf1("%s: start 0x%lx, size 0x%lx\n", __func__,
320 start, size); 320 start, size);
321 pdata->total_mem += size; 321 pdata->total_mem += size;
322 } while (reg < reg_end); 322 } while (reg < reg_end);
323 323
324 of_node_put(np); 324 of_node_put(np);
325 debugf0("%s: total_mem 0x%lx\n", __func__, pdata->total_mem); 325 debugf0("%s: total_mem 0x%lx\n", __func__, pdata->total_mem);
326 } 326 }
327 327
328 static void cpc925_init_csrows(struct mem_ctl_info *mci) 328 static void cpc925_init_csrows(struct mem_ctl_info *mci)
329 { 329 {
330 struct cpc925_mc_pdata *pdata = mci->pvt_info; 330 struct cpc925_mc_pdata *pdata = mci->pvt_info;
331 struct csrow_info *csrow; 331 struct csrow_info *csrow;
332 struct dimm_info *dimm; 332 struct dimm_info *dimm;
333 enum dev_type dtype; 333 enum dev_type dtype;
334 int index, j; 334 int index, j;
335 u32 mbmr, mbbar, bba, grain; 335 u32 mbmr, mbbar, bba, grain;
336 unsigned long row_size, nr_pages, last_nr_pages = 0; 336 unsigned long row_size, nr_pages, last_nr_pages = 0;
337 337
338 get_total_mem(pdata); 338 get_total_mem(pdata);
339 339
340 for (index = 0; index < mci->nr_csrows; index++) { 340 for (index = 0; index < mci->nr_csrows; index++) {
341 mbmr = __raw_readl(pdata->vbase + REG_MBMR_OFFSET + 341 mbmr = __raw_readl(pdata->vbase + REG_MBMR_OFFSET +
342 0x20 * index); 342 0x20 * index);
343 mbbar = __raw_readl(pdata->vbase + REG_MBBAR_OFFSET + 343 mbbar = __raw_readl(pdata->vbase + REG_MBBAR_OFFSET +
344 0x20 + index); 344 0x20 + index);
345 bba = (((mbmr & MBMR_BBA_MASK) >> MBMR_BBA_SHIFT) << 8) | 345 bba = (((mbmr & MBMR_BBA_MASK) >> MBMR_BBA_SHIFT) << 8) |
346 ((mbbar & MBBAR_BBA_MASK) >> MBBAR_BBA_SHIFT); 346 ((mbbar & MBBAR_BBA_MASK) >> MBBAR_BBA_SHIFT);
347 347
348 if (bba == 0) 348 if (bba == 0)
349 continue; /* not populated */ 349 continue; /* not populated */
350 350
351 csrow = &mci->csrows[index]; 351 csrow = mci->csrows[index];
352 352
353 row_size = bba * (1UL << 28); /* 256M */ 353 row_size = bba * (1UL << 28); /* 256M */
354 csrow->first_page = last_nr_pages; 354 csrow->first_page = last_nr_pages;
355 nr_pages = row_size >> PAGE_SHIFT; 355 nr_pages = row_size >> PAGE_SHIFT;
356 csrow->last_page = csrow->first_page + nr_pages - 1; 356 csrow->last_page = csrow->first_page + nr_pages - 1;
357 last_nr_pages = csrow->last_page + 1; 357 last_nr_pages = csrow->last_page + 1;
358 358
359 switch (csrow->nr_channels) { 359 switch (csrow->nr_channels) {
360 case 1: /* Single channel */ 360 case 1: /* Single channel */
361 grain = 32; /* four-beat burst of 32 bytes */ 361 grain = 32; /* four-beat burst of 32 bytes */
362 break; 362 break;
363 case 2: /* Dual channel */ 363 case 2: /* Dual channel */
364 default: 364 default:
365 grain = 64; /* four-beat burst of 64 bytes */ 365 grain = 64; /* four-beat burst of 64 bytes */
366 break; 366 break;
367 } 367 }
368 switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) { 368 switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
369 case 6: /* 0110, no way to differentiate X8 VS X16 */ 369 case 6: /* 0110, no way to differentiate X8 VS X16 */
370 case 5: /* 0101 */ 370 case 5: /* 0101 */
371 case 8: /* 1000 */ 371 case 8: /* 1000 */
372 dtype = DEV_X16; 372 dtype = DEV_X16;
373 break; 373 break;
374 case 7: /* 0111 */ 374 case 7: /* 0111 */
375 case 9: /* 1001 */ 375 case 9: /* 1001 */
376 dtype = DEV_X8; 376 dtype = DEV_X8;
377 break; 377 break;
378 default: 378 default:
379 dtype = DEV_UNKNOWN; 379 dtype = DEV_UNKNOWN;
380 break; 380 break;
381 } 381 }
382 for (j = 0; j < csrow->nr_channels; j++) { 382 for (j = 0; j < csrow->nr_channels; j++) {
383 dimm = csrow->channels[j].dimm; 383 dimm = csrow->channels[j]->dimm;
384 dimm->nr_pages = nr_pages / csrow->nr_channels; 384 dimm->nr_pages = nr_pages / csrow->nr_channels;
385 dimm->mtype = MEM_RDDR; 385 dimm->mtype = MEM_RDDR;
386 dimm->edac_mode = EDAC_SECDED; 386 dimm->edac_mode = EDAC_SECDED;
387 dimm->grain = grain; 387 dimm->grain = grain;
388 dimm->dtype = dtype; 388 dimm->dtype = dtype;
389 } 389 }
390 } 390 }
391 } 391 }
392 392
393 /* Enable memory controller ECC detection */ 393 /* Enable memory controller ECC detection */
394 static void cpc925_mc_init(struct mem_ctl_info *mci) 394 static void cpc925_mc_init(struct mem_ctl_info *mci)
395 { 395 {
396 struct cpc925_mc_pdata *pdata = mci->pvt_info; 396 struct cpc925_mc_pdata *pdata = mci->pvt_info;
397 u32 apimask; 397 u32 apimask;
398 u32 mccr; 398 u32 mccr;
399 399
400 /* Enable various ECC error exceptions */ 400 /* Enable various ECC error exceptions */
401 apimask = __raw_readl(pdata->vbase + REG_APIMASK_OFFSET); 401 apimask = __raw_readl(pdata->vbase + REG_APIMASK_OFFSET);
402 if ((apimask & ECC_MASK_ENABLE) == 0) { 402 if ((apimask & ECC_MASK_ENABLE) == 0) {
403 apimask |= ECC_MASK_ENABLE; 403 apimask |= ECC_MASK_ENABLE;
404 __raw_writel(apimask, pdata->vbase + REG_APIMASK_OFFSET); 404 __raw_writel(apimask, pdata->vbase + REG_APIMASK_OFFSET);
405 } 405 }
406 406
407 /* Enable ECC detection */ 407 /* Enable ECC detection */
408 mccr = __raw_readl(pdata->vbase + REG_MCCR_OFFSET); 408 mccr = __raw_readl(pdata->vbase + REG_MCCR_OFFSET);
409 if ((mccr & MCCR_ECC_EN) == 0) { 409 if ((mccr & MCCR_ECC_EN) == 0) {
410 mccr |= MCCR_ECC_EN; 410 mccr |= MCCR_ECC_EN;
411 __raw_writel(mccr, pdata->vbase + REG_MCCR_OFFSET); 411 __raw_writel(mccr, pdata->vbase + REG_MCCR_OFFSET);
412 } 412 }
413 } 413 }
414 414
415 /* Disable memory controller ECC detection */ 415 /* Disable memory controller ECC detection */
416 static void cpc925_mc_exit(struct mem_ctl_info *mci) 416 static void cpc925_mc_exit(struct mem_ctl_info *mci)
417 { 417 {
418 /* 418 /*
419 * WARNING: 419 * WARNING:
420 * We are supposed to clear the ECC error detection bits, 420 * We are supposed to clear the ECC error detection bits,
421 * and it will be no problem to do so. However, once they 421 * and it will be no problem to do so. However, once they
422 * are cleared here if we want to re-install CPC925 EDAC 422 * are cleared here if we want to re-install CPC925 EDAC
423 * module later, setting them up in cpc925_mc_init() will 423 * module later, setting them up in cpc925_mc_init() will
424 * trigger machine check exception. 424 * trigger machine check exception.
425 * Also, it's ok to leave ECC error detection bits enabled, 425 * Also, it's ok to leave ECC error detection bits enabled,
426 * since they are reset to 1 by default or by boot loader. 426 * since they are reset to 1 by default or by boot loader.
427 */ 427 */
428 428
429 return; 429 return;
430 } 430 }
431 431
432 /* 432 /*
433 * Revert DDR column/row/bank addresses into page frame number and 433 * Revert DDR column/row/bank addresses into page frame number and
434 * offset in page. 434 * offset in page.
435 * 435 *
436 * Suppose memory mode is 0x0111(128-bit mode, identical DIMM pairs), 436 * Suppose memory mode is 0x0111(128-bit mode, identical DIMM pairs),
437 * physical address(PA) bits to column address(CA) bits mappings are: 437 * physical address(PA) bits to column address(CA) bits mappings are:
438 * CA 0 1 2 3 4 5 6 7 8 9 10 438 * CA 0 1 2 3 4 5 6 7 8 9 10
439 * PA 59 58 57 56 55 54 53 52 51 50 49 439 * PA 59 58 57 56 55 54 53 52 51 50 49
440 * 440 *
441 * physical address(PA) bits to bank address(BA) bits mappings are: 441 * physical address(PA) bits to bank address(BA) bits mappings are:
442 * BA 0 1 442 * BA 0 1
443 * PA 43 44 443 * PA 43 44
444 * 444 *
445 * physical address(PA) bits to row address(RA) bits mappings are: 445 * physical address(PA) bits to row address(RA) bits mappings are:
446 * RA 0 1 2 3 4 5 6 7 8 9 10 11 12 446 * RA 0 1 2 3 4 5 6 7 8 9 10 11 12
447 * PA 36 35 34 48 47 46 45 40 41 42 39 38 37 447 * PA 36 35 34 48 47 46 45 40 41 42 39 38 37
448 */ 448 */
449 static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear, 449 static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
450 unsigned long *pfn, unsigned long *offset, int *csrow) 450 unsigned long *pfn, unsigned long *offset, int *csrow)
451 { 451 {
452 u32 bcnt, rank, col, bank, row; 452 u32 bcnt, rank, col, bank, row;
453 u32 c; 453 u32 c;
454 unsigned long pa; 454 unsigned long pa;
455 int i; 455 int i;
456 456
457 bcnt = (mear & MEAR_BCNT_MASK) >> MEAR_BCNT_SHIFT; 457 bcnt = (mear & MEAR_BCNT_MASK) >> MEAR_BCNT_SHIFT;
458 rank = (mear & MEAR_RANK_MASK) >> MEAR_RANK_SHIFT; 458 rank = (mear & MEAR_RANK_MASK) >> MEAR_RANK_SHIFT;
459 col = (mear & MEAR_COL_MASK) >> MEAR_COL_SHIFT; 459 col = (mear & MEAR_COL_MASK) >> MEAR_COL_SHIFT;
460 bank = (mear & MEAR_BANK_MASK) >> MEAR_BANK_SHIFT; 460 bank = (mear & MEAR_BANK_MASK) >> MEAR_BANK_SHIFT;
461 row = mear & MEAR_ROW_MASK; 461 row = mear & MEAR_ROW_MASK;
462 462
463 *csrow = rank; 463 *csrow = rank;
464 464
465 #ifdef CONFIG_EDAC_DEBUG 465 #ifdef CONFIG_EDAC_DEBUG
466 if (mci->csrows[rank].first_page == 0) { 466 if (mci->csrows[rank]->first_page == 0) {
467 cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a " 467 cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a "
468 "non-populated csrow, broken hardware?\n"); 468 "non-populated csrow, broken hardware?\n");
469 return; 469 return;
470 } 470 }
471 #endif 471 #endif
472 472
473 /* Revert csrow number */ 473 /* Revert csrow number */
474 pa = mci->csrows[rank].first_page << PAGE_SHIFT; 474 pa = mci->csrows[rank]->first_page << PAGE_SHIFT;
475 475
476 /* Revert column address */ 476 /* Revert column address */
477 col += bcnt; 477 col += bcnt;
478 for (i = 0; i < 11; i++) { 478 for (i = 0; i < 11; i++) {
479 c = col & 0x1; 479 c = col & 0x1;
480 col >>= 1; 480 col >>= 1;
481 pa |= c << (14 - i); 481 pa |= c << (14 - i);
482 } 482 }
483 483
484 /* Revert bank address */ 484 /* Revert bank address */
485 pa |= bank << 19; 485 pa |= bank << 19;
486 486
487 /* Revert row address, in 4 steps */ 487 /* Revert row address, in 4 steps */
488 for (i = 0; i < 3; i++) { 488 for (i = 0; i < 3; i++) {
489 c = row & 0x1; 489 c = row & 0x1;
490 row >>= 1; 490 row >>= 1;
491 pa |= c << (26 - i); 491 pa |= c << (26 - i);
492 } 492 }
493 493
494 for (i = 0; i < 3; i++) { 494 for (i = 0; i < 3; i++) {
495 c = row & 0x1; 495 c = row & 0x1;
496 row >>= 1; 496 row >>= 1;
497 pa |= c << (21 + i); 497 pa |= c << (21 + i);
498 } 498 }
499 499
500 for (i = 0; i < 4; i++) { 500 for (i = 0; i < 4; i++) {
501 c = row & 0x1; 501 c = row & 0x1;
502 row >>= 1; 502 row >>= 1;
503 pa |= c << (18 - i); 503 pa |= c << (18 - i);
504 } 504 }
505 505
506 for (i = 0; i < 3; i++) { 506 for (i = 0; i < 3; i++) {
507 c = row & 0x1; 507 c = row & 0x1;
508 row >>= 1; 508 row >>= 1;
509 pa |= c << (29 - i); 509 pa |= c << (29 - i);
510 } 510 }
511 511
512 *offset = pa & (PAGE_SIZE - 1); 512 *offset = pa & (PAGE_SIZE - 1);
513 *pfn = pa >> PAGE_SHIFT; 513 *pfn = pa >> PAGE_SHIFT;
514 514
515 debugf0("%s: ECC physical address 0x%lx\n", __func__, pa); 515 debugf0("%s: ECC physical address 0x%lx\n", __func__, pa);
516 } 516 }
517 517
518 static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome) 518 static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome)
519 { 519 {
520 if ((syndrome & MESR_ECC_SYN_H_MASK) == 0) 520 if ((syndrome & MESR_ECC_SYN_H_MASK) == 0)
521 return 0; 521 return 0;
522 522
523 if ((syndrome & MESR_ECC_SYN_L_MASK) == 0) 523 if ((syndrome & MESR_ECC_SYN_L_MASK) == 0)
524 return 1; 524 return 1;
525 525
526 cpc925_mc_printk(mci, KERN_INFO, "Unexpected syndrome value: 0x%x\n", 526 cpc925_mc_printk(mci, KERN_INFO, "Unexpected syndrome value: 0x%x\n",
527 syndrome); 527 syndrome);
528 return 1; 528 return 1;
529 } 529 }
530 530
531 /* Check memory controller registers for ECC errors */ 531 /* Check memory controller registers for ECC errors */
532 static void cpc925_mc_check(struct mem_ctl_info *mci) 532 static void cpc925_mc_check(struct mem_ctl_info *mci)
533 { 533 {
534 struct cpc925_mc_pdata *pdata = mci->pvt_info; 534 struct cpc925_mc_pdata *pdata = mci->pvt_info;
535 u32 apiexcp; 535 u32 apiexcp;
536 u32 mear; 536 u32 mear;
537 u32 mesr; 537 u32 mesr;
538 u16 syndrome; 538 u16 syndrome;
539 unsigned long pfn = 0, offset = 0; 539 unsigned long pfn = 0, offset = 0;
540 int csrow = 0, channel = 0; 540 int csrow = 0, channel = 0;
541 541
542 /* APIEXCP is cleared when read */ 542 /* APIEXCP is cleared when read */
543 apiexcp = __raw_readl(pdata->vbase + REG_APIEXCP_OFFSET); 543 apiexcp = __raw_readl(pdata->vbase + REG_APIEXCP_OFFSET);
544 if ((apiexcp & ECC_EXCP_DETECTED) == 0) 544 if ((apiexcp & ECC_EXCP_DETECTED) == 0)
545 return; 545 return;
546 546
547 mesr = __raw_readl(pdata->vbase + REG_MESR_OFFSET); 547 mesr = __raw_readl(pdata->vbase + REG_MESR_OFFSET);
548 syndrome = mesr | (MESR_ECC_SYN_H_MASK | MESR_ECC_SYN_L_MASK); 548 syndrome = mesr | (MESR_ECC_SYN_H_MASK | MESR_ECC_SYN_L_MASK);
549 549
550 mear = __raw_readl(pdata->vbase + REG_MEAR_OFFSET); 550 mear = __raw_readl(pdata->vbase + REG_MEAR_OFFSET);
551 551
552 /* Revert column/row addresses into page frame number, etc */ 552 /* Revert column/row addresses into page frame number, etc */
553 cpc925_mc_get_pfn(mci, mear, &pfn, &offset, &csrow); 553 cpc925_mc_get_pfn(mci, mear, &pfn, &offset, &csrow);
554 554
555 if (apiexcp & CECC_EXCP_DETECTED) { 555 if (apiexcp & CECC_EXCP_DETECTED) {
556 cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n"); 556 cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
557 channel = cpc925_mc_find_channel(mci, syndrome); 557 channel = cpc925_mc_find_channel(mci, syndrome);
558 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 558 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
559 pfn, offset, syndrome, 559 pfn, offset, syndrome,
560 csrow, channel, -1, 560 csrow, channel, -1,
561 mci->ctl_name, "", NULL); 561 mci->ctl_name, "", NULL);
562 } 562 }
563 563
564 if (apiexcp & UECC_EXCP_DETECTED) { 564 if (apiexcp & UECC_EXCP_DETECTED) {
565 cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n"); 565 cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
566 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 566 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
567 pfn, offset, 0, 567 pfn, offset, 0,
568 csrow, -1, -1, 568 csrow, -1, -1,
569 mci->ctl_name, "", NULL); 569 mci->ctl_name, "", NULL);
570 } 570 }
571 571
572 cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n"); 572 cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
573 cpc925_mc_printk(mci, KERN_INFO, "APIMASK 0x%08x\n", 573 cpc925_mc_printk(mci, KERN_INFO, "APIMASK 0x%08x\n",
574 __raw_readl(pdata->vbase + REG_APIMASK_OFFSET)); 574 __raw_readl(pdata->vbase + REG_APIMASK_OFFSET));
575 cpc925_mc_printk(mci, KERN_INFO, "APIEXCP 0x%08x\n", 575 cpc925_mc_printk(mci, KERN_INFO, "APIEXCP 0x%08x\n",
576 apiexcp); 576 apiexcp);
577 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Ctrl 0x%08x\n", 577 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Ctrl 0x%08x\n",
578 __raw_readl(pdata->vbase + REG_MSCR_OFFSET)); 578 __raw_readl(pdata->vbase + REG_MSCR_OFFSET));
579 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge Start 0x%08x\n", 579 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge Start 0x%08x\n",
580 __raw_readl(pdata->vbase + REG_MSRSR_OFFSET)); 580 __raw_readl(pdata->vbase + REG_MSRSR_OFFSET));
581 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge End 0x%08x\n", 581 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge End 0x%08x\n",
582 __raw_readl(pdata->vbase + REG_MSRER_OFFSET)); 582 __raw_readl(pdata->vbase + REG_MSRER_OFFSET));
583 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Pattern 0x%08x\n", 583 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Pattern 0x%08x\n",
584 __raw_readl(pdata->vbase + REG_MSPR_OFFSET)); 584 __raw_readl(pdata->vbase + REG_MSPR_OFFSET));
585 cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Ctrl 0x%08x\n", 585 cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Ctrl 0x%08x\n",
586 __raw_readl(pdata->vbase + REG_MCCR_OFFSET)); 586 __raw_readl(pdata->vbase + REG_MCCR_OFFSET));
587 cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Rge End 0x%08x\n", 587 cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Rge End 0x%08x\n",
588 __raw_readl(pdata->vbase + REG_MCRER_OFFSET)); 588 __raw_readl(pdata->vbase + REG_MCRER_OFFSET));
589 cpc925_mc_printk(mci, KERN_INFO, "Mem Err Address 0x%08x\n", 589 cpc925_mc_printk(mci, KERN_INFO, "Mem Err Address 0x%08x\n",
590 mesr); 590 mesr);
591 cpc925_mc_printk(mci, KERN_INFO, "Mem Err Syndrome 0x%08x\n", 591 cpc925_mc_printk(mci, KERN_INFO, "Mem Err Syndrome 0x%08x\n",
592 syndrome); 592 syndrome);
593 } 593 }
594 594
595 /******************** CPU err device********************************/ 595 /******************** CPU err device********************************/
596 static u32 cpc925_cpu_mask_disabled(void) 596 static u32 cpc925_cpu_mask_disabled(void)
597 { 597 {
598 struct device_node *cpus; 598 struct device_node *cpus;
599 struct device_node *cpunode = NULL; 599 struct device_node *cpunode = NULL;
600 static u32 mask = 0; 600 static u32 mask = 0;
601 601
602 /* use cached value if available */ 602 /* use cached value if available */
603 if (mask != 0) 603 if (mask != 0)
604 return mask; 604 return mask;
605 605
606 mask = APIMASK_ADI0 | APIMASK_ADI1; 606 mask = APIMASK_ADI0 | APIMASK_ADI1;
607 607
608 cpus = of_find_node_by_path("/cpus"); 608 cpus = of_find_node_by_path("/cpus");
609 if (cpus == NULL) { 609 if (cpus == NULL) {
610 cpc925_printk(KERN_DEBUG, "No /cpus node !\n"); 610 cpc925_printk(KERN_DEBUG, "No /cpus node !\n");
611 return 0; 611 return 0;
612 } 612 }
613 613
614 while ((cpunode = of_get_next_child(cpus, cpunode)) != NULL) { 614 while ((cpunode = of_get_next_child(cpus, cpunode)) != NULL) {
615 const u32 *reg = of_get_property(cpunode, "reg", NULL); 615 const u32 *reg = of_get_property(cpunode, "reg", NULL);
616 616
617 if (strcmp(cpunode->type, "cpu")) { 617 if (strcmp(cpunode->type, "cpu")) {
618 cpc925_printk(KERN_ERR, "Not a cpu node in /cpus: %s\n", cpunode->name); 618 cpc925_printk(KERN_ERR, "Not a cpu node in /cpus: %s\n", cpunode->name);
619 continue; 619 continue;
620 } 620 }
621 621
622 if (reg == NULL || *reg > 2) { 622 if (reg == NULL || *reg > 2) {
623 cpc925_printk(KERN_ERR, "Bad reg value at %s\n", cpunode->full_name); 623 cpc925_printk(KERN_ERR, "Bad reg value at %s\n", cpunode->full_name);
624 continue; 624 continue;
625 } 625 }
626 626
627 mask &= ~APIMASK_ADI(*reg); 627 mask &= ~APIMASK_ADI(*reg);
628 } 628 }
629 629
630 if (mask != (APIMASK_ADI0 | APIMASK_ADI1)) { 630 if (mask != (APIMASK_ADI0 | APIMASK_ADI1)) {
631 /* We assume that each CPU sits on it's own PI and that 631 /* We assume that each CPU sits on it's own PI and that
632 * for present CPUs the reg property equals to the PI 632 * for present CPUs the reg property equals to the PI
633 * interface id */ 633 * interface id */
634 cpc925_printk(KERN_WARNING, 634 cpc925_printk(KERN_WARNING,
635 "Assuming PI id is equal to CPU MPIC id!\n"); 635 "Assuming PI id is equal to CPU MPIC id!\n");
636 } 636 }
637 637
638 of_node_put(cpunode); 638 of_node_put(cpunode);
639 of_node_put(cpus); 639 of_node_put(cpus);
640 640
641 return mask; 641 return mask;
642 } 642 }
643 643
644 /* Enable CPU Errors detection */ 644 /* Enable CPU Errors detection */
645 static void cpc925_cpu_init(struct cpc925_dev_info *dev_info) 645 static void cpc925_cpu_init(struct cpc925_dev_info *dev_info)
646 { 646 {
647 u32 apimask; 647 u32 apimask;
648 u32 cpumask; 648 u32 cpumask;
649 649
650 apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET); 650 apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
651 651
652 cpumask = cpc925_cpu_mask_disabled(); 652 cpumask = cpc925_cpu_mask_disabled();
653 if (apimask & cpumask) { 653 if (apimask & cpumask) {
654 cpc925_printk(KERN_WARNING, "CPU(s) not present, " 654 cpc925_printk(KERN_WARNING, "CPU(s) not present, "
655 "but enabled in APIMASK, disabling\n"); 655 "but enabled in APIMASK, disabling\n");
656 apimask &= ~cpumask; 656 apimask &= ~cpumask;
657 } 657 }
658 658
659 if ((apimask & CPU_MASK_ENABLE) == 0) 659 if ((apimask & CPU_MASK_ENABLE) == 0)
660 apimask |= CPU_MASK_ENABLE; 660 apimask |= CPU_MASK_ENABLE;
661 661
662 __raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET); 662 __raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET);
663 } 663 }
664 664
665 /* Disable CPU Errors detection */ 665 /* Disable CPU Errors detection */
666 static void cpc925_cpu_exit(struct cpc925_dev_info *dev_info) 666 static void cpc925_cpu_exit(struct cpc925_dev_info *dev_info)
667 { 667 {
668 /* 668 /*
669 * WARNING: 669 * WARNING:
670 * We are supposed to clear the CPU error detection bits, 670 * We are supposed to clear the CPU error detection bits,
671 * and it will be no problem to do so. However, once they 671 * and it will be no problem to do so. However, once they
672 * are cleared here if we want to re-install CPC925 EDAC 672 * are cleared here if we want to re-install CPC925 EDAC
673 * module later, setting them up in cpc925_cpu_init() will 673 * module later, setting them up in cpc925_cpu_init() will
674 * trigger machine check exception. 674 * trigger machine check exception.
675 * Also, it's ok to leave CPU error detection bits enabled, 675 * Also, it's ok to leave CPU error detection bits enabled,
676 * since they are reset to 1 by default. 676 * since they are reset to 1 by default.
677 */ 677 */
678 678
679 return; 679 return;
680 } 680 }
681 681
682 /* Check for CPU Errors */ 682 /* Check for CPU Errors */
683 static void cpc925_cpu_check(struct edac_device_ctl_info *edac_dev) 683 static void cpc925_cpu_check(struct edac_device_ctl_info *edac_dev)
684 { 684 {
685 struct cpc925_dev_info *dev_info = edac_dev->pvt_info; 685 struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
686 u32 apiexcp; 686 u32 apiexcp;
687 u32 apimask; 687 u32 apimask;
688 688
689 /* APIEXCP is cleared when read */ 689 /* APIEXCP is cleared when read */
690 apiexcp = __raw_readl(dev_info->vbase + REG_APIEXCP_OFFSET); 690 apiexcp = __raw_readl(dev_info->vbase + REG_APIEXCP_OFFSET);
691 if ((apiexcp & CPU_EXCP_DETECTED) == 0) 691 if ((apiexcp & CPU_EXCP_DETECTED) == 0)
692 return; 692 return;
693 693
694 if ((apiexcp & ~cpc925_cpu_mask_disabled()) == 0) 694 if ((apiexcp & ~cpc925_cpu_mask_disabled()) == 0)
695 return; 695 return;
696 696
697 apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET); 697 apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
698 cpc925_printk(KERN_INFO, "Processor Interface Fault\n" 698 cpc925_printk(KERN_INFO, "Processor Interface Fault\n"
699 "Processor Interface register dump:\n"); 699 "Processor Interface register dump:\n");
700 cpc925_printk(KERN_INFO, "APIMASK 0x%08x\n", apimask); 700 cpc925_printk(KERN_INFO, "APIMASK 0x%08x\n", apimask);
701 cpc925_printk(KERN_INFO, "APIEXCP 0x%08x\n", apiexcp); 701 cpc925_printk(KERN_INFO, "APIEXCP 0x%08x\n", apiexcp);
702 702
703 edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); 703 edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
704 } 704 }
705 705
706 /******************** HT Link err device****************************/ 706 /******************** HT Link err device****************************/
707 /* Enable HyperTransport Link Error detection */ 707 /* Enable HyperTransport Link Error detection */
708 static void cpc925_htlink_init(struct cpc925_dev_info *dev_info) 708 static void cpc925_htlink_init(struct cpc925_dev_info *dev_info)
709 { 709 {
710 u32 ht_errctrl; 710 u32 ht_errctrl;
711 711
712 ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); 712 ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
713 if ((ht_errctrl & HT_ERRCTRL_ENABLE) == 0) { 713 if ((ht_errctrl & HT_ERRCTRL_ENABLE) == 0) {
714 ht_errctrl |= HT_ERRCTRL_ENABLE; 714 ht_errctrl |= HT_ERRCTRL_ENABLE;
715 __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET); 715 __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
716 } 716 }
717 } 717 }
718 718
719 /* Disable HyperTransport Link Error detection */ 719 /* Disable HyperTransport Link Error detection */
720 static void cpc925_htlink_exit(struct cpc925_dev_info *dev_info) 720 static void cpc925_htlink_exit(struct cpc925_dev_info *dev_info)
721 { 721 {
722 u32 ht_errctrl; 722 u32 ht_errctrl;
723 723
724 ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); 724 ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
725 ht_errctrl &= ~HT_ERRCTRL_ENABLE; 725 ht_errctrl &= ~HT_ERRCTRL_ENABLE;
726 __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET); 726 __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
727 } 727 }
728 728
729 /* Check for HyperTransport Link errors */ 729 /* Check for HyperTransport Link errors */
730 static void cpc925_htlink_check(struct edac_device_ctl_info *edac_dev) 730 static void cpc925_htlink_check(struct edac_device_ctl_info *edac_dev)
731 { 731 {
732 struct cpc925_dev_info *dev_info = edac_dev->pvt_info; 732 struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
733 u32 brgctrl = __raw_readl(dev_info->vbase + REG_BRGCTRL_OFFSET); 733 u32 brgctrl = __raw_readl(dev_info->vbase + REG_BRGCTRL_OFFSET);
734 u32 linkctrl = __raw_readl(dev_info->vbase + REG_LINKCTRL_OFFSET); 734 u32 linkctrl = __raw_readl(dev_info->vbase + REG_LINKCTRL_OFFSET);
735 u32 errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); 735 u32 errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
736 u32 linkerr = __raw_readl(dev_info->vbase + REG_LINKERR_OFFSET); 736 u32 linkerr = __raw_readl(dev_info->vbase + REG_LINKERR_OFFSET);
737 737
738 if (!((brgctrl & BRGCTRL_DETSERR) || 738 if (!((brgctrl & BRGCTRL_DETSERR) ||
739 (linkctrl & HT_LINKCTRL_DETECTED) || 739 (linkctrl & HT_LINKCTRL_DETECTED) ||
740 (errctrl & HT_ERRCTRL_DETECTED) || 740 (errctrl & HT_ERRCTRL_DETECTED) ||
741 (linkerr & HT_LINKERR_DETECTED))) 741 (linkerr & HT_LINKERR_DETECTED)))
742 return; 742 return;
743 743
744 cpc925_printk(KERN_INFO, "HT Link Fault\n" 744 cpc925_printk(KERN_INFO, "HT Link Fault\n"
745 "HT register dump:\n"); 745 "HT register dump:\n");
746 cpc925_printk(KERN_INFO, "Bridge Ctrl 0x%08x\n", 746 cpc925_printk(KERN_INFO, "Bridge Ctrl 0x%08x\n",
747 brgctrl); 747 brgctrl);
748 cpc925_printk(KERN_INFO, "Link Config Ctrl 0x%08x\n", 748 cpc925_printk(KERN_INFO, "Link Config Ctrl 0x%08x\n",
749 linkctrl); 749 linkctrl);
750 cpc925_printk(KERN_INFO, "Error Enum and Ctrl 0x%08x\n", 750 cpc925_printk(KERN_INFO, "Error Enum and Ctrl 0x%08x\n",
751 errctrl); 751 errctrl);
752 cpc925_printk(KERN_INFO, "Link Error 0x%08x\n", 752 cpc925_printk(KERN_INFO, "Link Error 0x%08x\n",
753 linkerr); 753 linkerr);
754 754
755 /* Clear by write 1 */ 755 /* Clear by write 1 */
756 if (brgctrl & BRGCTRL_DETSERR) 756 if (brgctrl & BRGCTRL_DETSERR)
757 __raw_writel(BRGCTRL_DETSERR, 757 __raw_writel(BRGCTRL_DETSERR,
758 dev_info->vbase + REG_BRGCTRL_OFFSET); 758 dev_info->vbase + REG_BRGCTRL_OFFSET);
759 759
760 if (linkctrl & HT_LINKCTRL_DETECTED) 760 if (linkctrl & HT_LINKCTRL_DETECTED)
761 __raw_writel(HT_LINKCTRL_DETECTED, 761 __raw_writel(HT_LINKCTRL_DETECTED,
762 dev_info->vbase + REG_LINKCTRL_OFFSET); 762 dev_info->vbase + REG_LINKCTRL_OFFSET);
763 763
764 /* Initiate Secondary Bus Reset to clear the chain failure */ 764 /* Initiate Secondary Bus Reset to clear the chain failure */
765 if (errctrl & ERRCTRL_CHN_FAL) 765 if (errctrl & ERRCTRL_CHN_FAL)
766 __raw_writel(BRGCTRL_SECBUSRESET, 766 __raw_writel(BRGCTRL_SECBUSRESET,
767 dev_info->vbase + REG_BRGCTRL_OFFSET); 767 dev_info->vbase + REG_BRGCTRL_OFFSET);
768 768
769 if (errctrl & ERRCTRL_RSP_ERR) 769 if (errctrl & ERRCTRL_RSP_ERR)
770 __raw_writel(ERRCTRL_RSP_ERR, 770 __raw_writel(ERRCTRL_RSP_ERR,
771 dev_info->vbase + REG_ERRCTRL_OFFSET); 771 dev_info->vbase + REG_ERRCTRL_OFFSET);
772 772
773 if (linkerr & HT_LINKERR_DETECTED) 773 if (linkerr & HT_LINKERR_DETECTED)
774 __raw_writel(HT_LINKERR_DETECTED, 774 __raw_writel(HT_LINKERR_DETECTED,
775 dev_info->vbase + REG_LINKERR_OFFSET); 775 dev_info->vbase + REG_LINKERR_OFFSET);
776 776
777 edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name); 777 edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
778 } 778 }
779 779
780 static struct cpc925_dev_info cpc925_devs[] = { 780 static struct cpc925_dev_info cpc925_devs[] = {
781 { 781 {
782 .ctl_name = CPC925_CPU_ERR_DEV, 782 .ctl_name = CPC925_CPU_ERR_DEV,
783 .init = cpc925_cpu_init, 783 .init = cpc925_cpu_init,
784 .exit = cpc925_cpu_exit, 784 .exit = cpc925_cpu_exit,
785 .check = cpc925_cpu_check, 785 .check = cpc925_cpu_check,
786 }, 786 },
787 { 787 {
788 .ctl_name = CPC925_HT_LINK_DEV, 788 .ctl_name = CPC925_HT_LINK_DEV,
789 .init = cpc925_htlink_init, 789 .init = cpc925_htlink_init,
790 .exit = cpc925_htlink_exit, 790 .exit = cpc925_htlink_exit,
791 .check = cpc925_htlink_check, 791 .check = cpc925_htlink_check,
792 }, 792 },
793 {0}, /* Terminated by NULL */ 793 {0}, /* Terminated by NULL */
794 }; 794 };
795 795
796 /* 796 /*
797 * Add CPU Err detection and HyperTransport Link Err detection 797 * Add CPU Err detection and HyperTransport Link Err detection
798 * as common "edac_device", they have no corresponding device 798 * as common "edac_device", they have no corresponding device
799 * nodes in the Open Firmware DTB and we have to add platform 799 * nodes in the Open Firmware DTB and we have to add platform
800 * devices for them. Also, they will share the MMIO with that 800 * devices for them. Also, they will share the MMIO with that
801 * of memory controller. 801 * of memory controller.
802 */ 802 */
803 static void cpc925_add_edac_devices(void __iomem *vbase) 803 static void cpc925_add_edac_devices(void __iomem *vbase)
804 { 804 {
805 struct cpc925_dev_info *dev_info; 805 struct cpc925_dev_info *dev_info;
806 806
807 if (!vbase) { 807 if (!vbase) {
808 cpc925_printk(KERN_ERR, "MMIO not established yet\n"); 808 cpc925_printk(KERN_ERR, "MMIO not established yet\n");
809 return; 809 return;
810 } 810 }
811 811
812 for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) { 812 for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
813 dev_info->vbase = vbase; 813 dev_info->vbase = vbase;
814 dev_info->pdev = platform_device_register_simple( 814 dev_info->pdev = platform_device_register_simple(
815 dev_info->ctl_name, 0, NULL, 0); 815 dev_info->ctl_name, 0, NULL, 0);
816 if (IS_ERR(dev_info->pdev)) { 816 if (IS_ERR(dev_info->pdev)) {
817 cpc925_printk(KERN_ERR, 817 cpc925_printk(KERN_ERR,
818 "Can't register platform device for %s\n", 818 "Can't register platform device for %s\n",
819 dev_info->ctl_name); 819 dev_info->ctl_name);
820 continue; 820 continue;
821 } 821 }
822 822
823 /* 823 /*
824 * Don't have to allocate private structure but 824 * Don't have to allocate private structure but
825 * make use of cpc925_devs[] instead. 825 * make use of cpc925_devs[] instead.
826 */ 826 */
827 dev_info->edac_idx = edac_device_alloc_index(); 827 dev_info->edac_idx = edac_device_alloc_index();
828 dev_info->edac_dev = 828 dev_info->edac_dev =
829 edac_device_alloc_ctl_info(0, dev_info->ctl_name, 829 edac_device_alloc_ctl_info(0, dev_info->ctl_name,
830 1, NULL, 0, 0, NULL, 0, dev_info->edac_idx); 830 1, NULL, 0, 0, NULL, 0, dev_info->edac_idx);
831 if (!dev_info->edac_dev) { 831 if (!dev_info->edac_dev) {
832 cpc925_printk(KERN_ERR, "No memory for edac device\n"); 832 cpc925_printk(KERN_ERR, "No memory for edac device\n");
833 goto err1; 833 goto err1;
834 } 834 }
835 835
836 dev_info->edac_dev->pvt_info = dev_info; 836 dev_info->edac_dev->pvt_info = dev_info;
837 dev_info->edac_dev->dev = &dev_info->pdev->dev; 837 dev_info->edac_dev->dev = &dev_info->pdev->dev;
838 dev_info->edac_dev->ctl_name = dev_info->ctl_name; 838 dev_info->edac_dev->ctl_name = dev_info->ctl_name;
839 dev_info->edac_dev->mod_name = CPC925_EDAC_MOD_STR; 839 dev_info->edac_dev->mod_name = CPC925_EDAC_MOD_STR;
840 dev_info->edac_dev->dev_name = dev_name(&dev_info->pdev->dev); 840 dev_info->edac_dev->dev_name = dev_name(&dev_info->pdev->dev);
841 841
842 if (edac_op_state == EDAC_OPSTATE_POLL) 842 if (edac_op_state == EDAC_OPSTATE_POLL)
843 dev_info->edac_dev->edac_check = dev_info->check; 843 dev_info->edac_dev->edac_check = dev_info->check;
844 844
845 if (dev_info->init) 845 if (dev_info->init)
846 dev_info->init(dev_info); 846 dev_info->init(dev_info);
847 847
848 if (edac_device_add_device(dev_info->edac_dev) > 0) { 848 if (edac_device_add_device(dev_info->edac_dev) > 0) {
849 cpc925_printk(KERN_ERR, 849 cpc925_printk(KERN_ERR,
850 "Unable to add edac device for %s\n", 850 "Unable to add edac device for %s\n",
851 dev_info->ctl_name); 851 dev_info->ctl_name);
852 goto err2; 852 goto err2;
853 } 853 }
854 854
855 debugf0("%s: Successfully added edac device for %s\n", 855 debugf0("%s: Successfully added edac device for %s\n",
856 __func__, dev_info->ctl_name); 856 __func__, dev_info->ctl_name);
857 857
858 continue; 858 continue;
859 859
860 err2: 860 err2:
861 if (dev_info->exit) 861 if (dev_info->exit)
862 dev_info->exit(dev_info); 862 dev_info->exit(dev_info);
863 edac_device_free_ctl_info(dev_info->edac_dev); 863 edac_device_free_ctl_info(dev_info->edac_dev);
864 err1: 864 err1:
865 platform_device_unregister(dev_info->pdev); 865 platform_device_unregister(dev_info->pdev);
866 } 866 }
867 } 867 }
868 868
869 /* 869 /*
870 * Delete the common "edac_device" for CPU Err Detection 870 * Delete the common "edac_device" for CPU Err Detection
871 * and HyperTransport Link Err Detection 871 * and HyperTransport Link Err Detection
872 */ 872 */
873 static void cpc925_del_edac_devices(void) 873 static void cpc925_del_edac_devices(void)
874 { 874 {
875 struct cpc925_dev_info *dev_info; 875 struct cpc925_dev_info *dev_info;
876 876
877 for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) { 877 for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
878 if (dev_info->edac_dev) { 878 if (dev_info->edac_dev) {
879 edac_device_del_device(dev_info->edac_dev->dev); 879 edac_device_del_device(dev_info->edac_dev->dev);
880 edac_device_free_ctl_info(dev_info->edac_dev); 880 edac_device_free_ctl_info(dev_info->edac_dev);
881 platform_device_unregister(dev_info->pdev); 881 platform_device_unregister(dev_info->pdev);
882 } 882 }
883 883
884 if (dev_info->exit) 884 if (dev_info->exit)
885 dev_info->exit(dev_info); 885 dev_info->exit(dev_info);
886 886
887 debugf0("%s: Successfully deleted edac device for %s\n", 887 debugf0("%s: Successfully deleted edac device for %s\n",
888 __func__, dev_info->ctl_name); 888 __func__, dev_info->ctl_name);
889 } 889 }
890 } 890 }
891 891
892 /* Convert current back-ground scrub rate into byte/sec bandwidth */ 892 /* Convert current back-ground scrub rate into byte/sec bandwidth */
893 static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci) 893 static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci)
894 { 894 {
895 struct cpc925_mc_pdata *pdata = mci->pvt_info; 895 struct cpc925_mc_pdata *pdata = mci->pvt_info;
896 int bw; 896 int bw;
897 u32 mscr; 897 u32 mscr;
898 u8 si; 898 u8 si;
899 899
900 mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET); 900 mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET);
901 si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT; 901 si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT;
902 902
903 debugf0("%s, Mem Scrub Ctrl Register 0x%x\n", __func__, mscr); 903 debugf0("%s, Mem Scrub Ctrl Register 0x%x\n", __func__, mscr);
904 904
905 if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) || 905 if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) ||
906 (si == 0)) { 906 (si == 0)) {
907 cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n"); 907 cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n");
908 bw = 0; 908 bw = 0;
909 } else 909 } else
910 bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si; 910 bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si;
911 911
912 return bw; 912 return bw;
913 } 913 }
914 914
915 /* Return 0 for single channel; 1 for dual channel */ 915 /* Return 0 for single channel; 1 for dual channel */
916 static int cpc925_mc_get_channels(void __iomem *vbase) 916 static int cpc925_mc_get_channels(void __iomem *vbase)
917 { 917 {
918 int dual = 0; 918 int dual = 0;
919 u32 mbcr; 919 u32 mbcr;
920 920
921 mbcr = __raw_readl(vbase + REG_MBCR_OFFSET); 921 mbcr = __raw_readl(vbase + REG_MBCR_OFFSET);
922 922
923 /* 923 /*
924 * Dual channel only when 128-bit wide physical bus 924 * Dual channel only when 128-bit wide physical bus
925 * and 128-bit configuration. 925 * and 128-bit configuration.
926 */ 926 */
927 if (((mbcr & MBCR_64BITCFG_MASK) == 0) && 927 if (((mbcr & MBCR_64BITCFG_MASK) == 0) &&
928 ((mbcr & MBCR_64BITBUS_MASK) == 0)) 928 ((mbcr & MBCR_64BITBUS_MASK) == 0))
929 dual = 1; 929 dual = 1;
930 930
931 debugf0("%s: %s channel\n", __func__, 931 debugf0("%s: %s channel\n", __func__,
932 (dual > 0) ? "Dual" : "Single"); 932 (dual > 0) ? "Dual" : "Single");
933 933
934 return dual; 934 return dual;
935 } 935 }
936 936
937 static int __devinit cpc925_probe(struct platform_device *pdev) 937 static int __devinit cpc925_probe(struct platform_device *pdev)
938 { 938 {
939 static int edac_mc_idx; 939 static int edac_mc_idx;
940 struct mem_ctl_info *mci; 940 struct mem_ctl_info *mci;
941 struct edac_mc_layer layers[2]; 941 struct edac_mc_layer layers[2];
942 void __iomem *vbase; 942 void __iomem *vbase;
943 struct cpc925_mc_pdata *pdata; 943 struct cpc925_mc_pdata *pdata;
944 struct resource *r; 944 struct resource *r;
945 int res = 0, nr_channels; 945 int res = 0, nr_channels;
946 946
947 debugf0("%s: %s platform device found!\n", __func__, pdev->name); 947 debugf0("%s: %s platform device found!\n", __func__, pdev->name);
948 948
949 if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) { 949 if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) {
950 res = -ENOMEM; 950 res = -ENOMEM;
951 goto out; 951 goto out;
952 } 952 }
953 953
954 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 954 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
955 if (!r) { 955 if (!r) {
956 cpc925_printk(KERN_ERR, "Unable to get resource\n"); 956 cpc925_printk(KERN_ERR, "Unable to get resource\n");
957 res = -ENOENT; 957 res = -ENOENT;
958 goto err1; 958 goto err1;
959 } 959 }
960 960
961 if (!devm_request_mem_region(&pdev->dev, 961 if (!devm_request_mem_region(&pdev->dev,
962 r->start, 962 r->start,
963 resource_size(r), 963 resource_size(r),
964 pdev->name)) { 964 pdev->name)) {
965 cpc925_printk(KERN_ERR, "Unable to request mem region\n"); 965 cpc925_printk(KERN_ERR, "Unable to request mem region\n");
966 res = -EBUSY; 966 res = -EBUSY;
967 goto err1; 967 goto err1;
968 } 968 }
969 969
970 vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r)); 970 vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r));
971 if (!vbase) { 971 if (!vbase) {
972 cpc925_printk(KERN_ERR, "Unable to ioremap device\n"); 972 cpc925_printk(KERN_ERR, "Unable to ioremap device\n");
973 res = -ENOMEM; 973 res = -ENOMEM;
974 goto err2; 974 goto err2;
975 } 975 }
976 976
977 nr_channels = cpc925_mc_get_channels(vbase) + 1; 977 nr_channels = cpc925_mc_get_channels(vbase) + 1;
978 978
979 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 979 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
980 layers[0].size = CPC925_NR_CSROWS; 980 layers[0].size = CPC925_NR_CSROWS;
981 layers[0].is_virt_csrow = true; 981 layers[0].is_virt_csrow = true;
982 layers[1].type = EDAC_MC_LAYER_CHANNEL; 982 layers[1].type = EDAC_MC_LAYER_CHANNEL;
983 layers[1].size = nr_channels; 983 layers[1].size = nr_channels;
984 layers[1].is_virt_csrow = false; 984 layers[1].is_virt_csrow = false;
985 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers, 985 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
986 sizeof(struct cpc925_mc_pdata)); 986 sizeof(struct cpc925_mc_pdata));
987 if (!mci) { 987 if (!mci) {
988 cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n"); 988 cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n");
989 res = -ENOMEM; 989 res = -ENOMEM;
990 goto err2; 990 goto err2;
991 } 991 }
992 992
993 pdata = mci->pvt_info; 993 pdata = mci->pvt_info;
994 pdata->vbase = vbase; 994 pdata->vbase = vbase;
995 pdata->edac_idx = edac_mc_idx++; 995 pdata->edac_idx = edac_mc_idx++;
996 pdata->name = pdev->name; 996 pdata->name = pdev->name;
997 997
998 mci->pdev = &pdev->dev; 998 mci->pdev = &pdev->dev;
999 platform_set_drvdata(pdev, mci); 999 platform_set_drvdata(pdev, mci);
1000 mci->dev_name = dev_name(&pdev->dev); 1000 mci->dev_name = dev_name(&pdev->dev);
1001 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; 1001 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
1002 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 1002 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
1003 mci->edac_cap = EDAC_FLAG_SECDED; 1003 mci->edac_cap = EDAC_FLAG_SECDED;
1004 mci->mod_name = CPC925_EDAC_MOD_STR; 1004 mci->mod_name = CPC925_EDAC_MOD_STR;
1005 mci->mod_ver = CPC925_EDAC_REVISION; 1005 mci->mod_ver = CPC925_EDAC_REVISION;
1006 mci->ctl_name = pdev->name; 1006 mci->ctl_name = pdev->name;
1007 1007
1008 if (edac_op_state == EDAC_OPSTATE_POLL) 1008 if (edac_op_state == EDAC_OPSTATE_POLL)
1009 mci->edac_check = cpc925_mc_check; 1009 mci->edac_check = cpc925_mc_check;
1010 1010
1011 mci->ctl_page_to_phys = NULL; 1011 mci->ctl_page_to_phys = NULL;
1012 mci->scrub_mode = SCRUB_SW_SRC; 1012 mci->scrub_mode = SCRUB_SW_SRC;
1013 mci->set_sdram_scrub_rate = NULL; 1013 mci->set_sdram_scrub_rate = NULL;
1014 mci->get_sdram_scrub_rate = cpc925_get_sdram_scrub_rate; 1014 mci->get_sdram_scrub_rate = cpc925_get_sdram_scrub_rate;
1015 1015
1016 cpc925_init_csrows(mci); 1016 cpc925_init_csrows(mci);
1017 1017
1018 /* Setup memory controller registers */ 1018 /* Setup memory controller registers */
1019 cpc925_mc_init(mci); 1019 cpc925_mc_init(mci);
1020 1020
1021 if (edac_mc_add_mc(mci) > 0) { 1021 if (edac_mc_add_mc(mci) > 0) {
1022 cpc925_mc_printk(mci, KERN_ERR, "Failed edac_mc_add_mc()\n"); 1022 cpc925_mc_printk(mci, KERN_ERR, "Failed edac_mc_add_mc()\n");
1023 goto err3; 1023 goto err3;
1024 } 1024 }
1025 1025
1026 cpc925_add_edac_devices(vbase); 1026 cpc925_add_edac_devices(vbase);
1027 1027
1028 /* get this far and it's successful */ 1028 /* get this far and it's successful */
1029 debugf0("%s: success\n", __func__); 1029 debugf0("%s: success\n", __func__);
1030 1030
1031 res = 0; 1031 res = 0;
1032 goto out; 1032 goto out;
1033 1033
1034 err3: 1034 err3:
1035 cpc925_mc_exit(mci); 1035 cpc925_mc_exit(mci);
1036 edac_mc_free(mci); 1036 edac_mc_free(mci);
1037 err2: 1037 err2:
1038 devm_release_mem_region(&pdev->dev, r->start, resource_size(r)); 1038 devm_release_mem_region(&pdev->dev, r->start, resource_size(r));
1039 err1: 1039 err1:
1040 devres_release_group(&pdev->dev, cpc925_probe); 1040 devres_release_group(&pdev->dev, cpc925_probe);
1041 out: 1041 out:
1042 return res; 1042 return res;
1043 } 1043 }
1044 1044
1045 static int cpc925_remove(struct platform_device *pdev) 1045 static int cpc925_remove(struct platform_device *pdev)
1046 { 1046 {
1047 struct mem_ctl_info *mci = platform_get_drvdata(pdev); 1047 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
1048 1048
1049 /* 1049 /*
1050 * Delete common edac devices before edac mc, because 1050 * Delete common edac devices before edac mc, because
1051 * the former share the MMIO of the latter. 1051 * the former share the MMIO of the latter.
1052 */ 1052 */
1053 cpc925_del_edac_devices(); 1053 cpc925_del_edac_devices();
1054 cpc925_mc_exit(mci); 1054 cpc925_mc_exit(mci);
1055 1055
1056 edac_mc_del_mc(&pdev->dev); 1056 edac_mc_del_mc(&pdev->dev);
1057 edac_mc_free(mci); 1057 edac_mc_free(mci);
1058 1058
1059 return 0; 1059 return 0;
1060 } 1060 }
1061 1061
1062 static struct platform_driver cpc925_edac_driver = { 1062 static struct platform_driver cpc925_edac_driver = {
1063 .probe = cpc925_probe, 1063 .probe = cpc925_probe,
1064 .remove = cpc925_remove, 1064 .remove = cpc925_remove,
1065 .driver = { 1065 .driver = {
1066 .name = "cpc925_edac", 1066 .name = "cpc925_edac",
1067 } 1067 }
1068 }; 1068 };
1069 1069
1070 static int __init cpc925_edac_init(void) 1070 static int __init cpc925_edac_init(void)
1071 { 1071 {
1072 int ret = 0; 1072 int ret = 0;
1073 1073
1074 printk(KERN_INFO "IBM CPC925 EDAC driver " CPC925_EDAC_REVISION "\n"); 1074 printk(KERN_INFO "IBM CPC925 EDAC driver " CPC925_EDAC_REVISION "\n");
1075 printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc\n"); 1075 printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc\n");
1076 1076
1077 /* Only support POLL mode so far */ 1077 /* Only support POLL mode so far */
1078 edac_op_state = EDAC_OPSTATE_POLL; 1078 edac_op_state = EDAC_OPSTATE_POLL;
1079 1079
1080 ret = platform_driver_register(&cpc925_edac_driver); 1080 ret = platform_driver_register(&cpc925_edac_driver);
1081 if (ret) { 1081 if (ret) {
1082 printk(KERN_WARNING "Failed to register %s\n", 1082 printk(KERN_WARNING "Failed to register %s\n",
1083 CPC925_EDAC_MOD_STR); 1083 CPC925_EDAC_MOD_STR);
1084 } 1084 }
1085 1085
1086 return ret; 1086 return ret;
1087 } 1087 }
1088 1088
1089 static void __exit cpc925_edac_exit(void) 1089 static void __exit cpc925_edac_exit(void)
1090 { 1090 {
1091 platform_driver_unregister(&cpc925_edac_driver); 1091 platform_driver_unregister(&cpc925_edac_driver);
1092 } 1092 }
1093 1093
1094 module_init(cpc925_edac_init); 1094 module_init(cpc925_edac_init);
1095 module_exit(cpc925_edac_exit); 1095 module_exit(cpc925_edac_exit);
1096 1096
1097 MODULE_LICENSE("GPL"); 1097 MODULE_LICENSE("GPL");
1098 MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>"); 1098 MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>");
1099 MODULE_DESCRIPTION("IBM CPC925 Bridge and MC EDAC kernel module"); 1099 MODULE_DESCRIPTION("IBM CPC925 Bridge and MC EDAC kernel module");
1100 1100
drivers/edac/e752x_edac.c
1 /* 1 /*
2 * Intel e752x Memory Controller kernel module 2 * Intel e752x Memory Controller kernel module
3 * (C) 2004 Linux Networx (http://lnxi.com) 3 * (C) 2004 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the 4 * This file may be distributed under the terms of the
5 * GNU General Public License. 5 * GNU General Public License.
6 * 6 *
7 * Implement support for the e7520, E7525, e7320 and i3100 memory controllers. 7 * Implement support for the e7520, E7525, e7320 and i3100 memory controllers.
8 * 8 *
9 * Datasheets: 9 * Datasheets:
10 * http://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html 10 * http://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html
11 * ftp://download.intel.com/design/intarch/datashts/31345803.pdf 11 * ftp://download.intel.com/design/intarch/datashts/31345803.pdf
12 * 12 *
13 * Written by Tom Zimmerman 13 * Written by Tom Zimmerman
14 * 14 *
15 * Contributors: 15 * Contributors:
16 * Thayne Harbaugh at realmsys.com (?) 16 * Thayne Harbaugh at realmsys.com (?)
17 * Wang Zhenyu at intel.com 17 * Wang Zhenyu at intel.com
18 * Dave Jiang at mvista.com 18 * Dave Jiang at mvista.com
19 * 19 *
20 */ 20 */
21 21
22 #include <linux/module.h> 22 #include <linux/module.h>
23 #include <linux/init.h> 23 #include <linux/init.h>
24 #include <linux/pci.h> 24 #include <linux/pci.h>
25 #include <linux/pci_ids.h> 25 #include <linux/pci_ids.h>
26 #include <linux/edac.h> 26 #include <linux/edac.h>
27 #include "edac_core.h" 27 #include "edac_core.h"
28 28
29 #define E752X_REVISION " Ver: 2.0.2" 29 #define E752X_REVISION " Ver: 2.0.2"
30 #define EDAC_MOD_STR "e752x_edac" 30 #define EDAC_MOD_STR "e752x_edac"
31 31
32 static int report_non_memory_errors; 32 static int report_non_memory_errors;
33 static int force_function_unhide; 33 static int force_function_unhide;
34 static int sysbus_parity = -1; 34 static int sysbus_parity = -1;
35 35
36 static struct edac_pci_ctl_info *e752x_pci; 36 static struct edac_pci_ctl_info *e752x_pci;
37 37
38 #define e752x_printk(level, fmt, arg...) \ 38 #define e752x_printk(level, fmt, arg...) \
39 edac_printk(level, "e752x", fmt, ##arg) 39 edac_printk(level, "e752x", fmt, ##arg)
40 40
41 #define e752x_mc_printk(mci, level, fmt, arg...) \ 41 #define e752x_mc_printk(mci, level, fmt, arg...) \
42 edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg) 42 edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg)
43 43
44 #ifndef PCI_DEVICE_ID_INTEL_7520_0 44 #ifndef PCI_DEVICE_ID_INTEL_7520_0
45 #define PCI_DEVICE_ID_INTEL_7520_0 0x3590 45 #define PCI_DEVICE_ID_INTEL_7520_0 0x3590
46 #endif /* PCI_DEVICE_ID_INTEL_7520_0 */ 46 #endif /* PCI_DEVICE_ID_INTEL_7520_0 */
47 47
48 #ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR 48 #ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
49 #define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591 49 #define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591
50 #endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */ 50 #endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */
51 51
52 #ifndef PCI_DEVICE_ID_INTEL_7525_0 52 #ifndef PCI_DEVICE_ID_INTEL_7525_0
53 #define PCI_DEVICE_ID_INTEL_7525_0 0x359E 53 #define PCI_DEVICE_ID_INTEL_7525_0 0x359E
54 #endif /* PCI_DEVICE_ID_INTEL_7525_0 */ 54 #endif /* PCI_DEVICE_ID_INTEL_7525_0 */
55 55
56 #ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR 56 #ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
57 #define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593 57 #define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593
58 #endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */ 58 #endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */
59 59
60 #ifndef PCI_DEVICE_ID_INTEL_7320_0 60 #ifndef PCI_DEVICE_ID_INTEL_7320_0
61 #define PCI_DEVICE_ID_INTEL_7320_0 0x3592 61 #define PCI_DEVICE_ID_INTEL_7320_0 0x3592
62 #endif /* PCI_DEVICE_ID_INTEL_7320_0 */ 62 #endif /* PCI_DEVICE_ID_INTEL_7320_0 */
63 63
64 #ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR 64 #ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
65 #define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593 65 #define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
66 #endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */ 66 #endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
67 67
68 #ifndef PCI_DEVICE_ID_INTEL_3100_0 68 #ifndef PCI_DEVICE_ID_INTEL_3100_0
69 #define PCI_DEVICE_ID_INTEL_3100_0 0x35B0 69 #define PCI_DEVICE_ID_INTEL_3100_0 0x35B0
70 #endif /* PCI_DEVICE_ID_INTEL_3100_0 */ 70 #endif /* PCI_DEVICE_ID_INTEL_3100_0 */
71 71
72 #ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR 72 #ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR
73 #define PCI_DEVICE_ID_INTEL_3100_1_ERR 0x35B1 73 #define PCI_DEVICE_ID_INTEL_3100_1_ERR 0x35B1
74 #endif /* PCI_DEVICE_ID_INTEL_3100_1_ERR */ 74 #endif /* PCI_DEVICE_ID_INTEL_3100_1_ERR */
75 75
76 #define E752X_NR_CSROWS 8 /* number of csrows */ 76 #define E752X_NR_CSROWS 8 /* number of csrows */
77 77
78 /* E752X register addresses - device 0 function 0 */ 78 /* E752X register addresses - device 0 function 0 */
79 #define E752X_MCHSCRB 0x52 /* Memory Scrub register (16b) */ 79 #define E752X_MCHSCRB 0x52 /* Memory Scrub register (16b) */
80 /* 80 /*
81 * 6:5 Scrub Completion Count 81 * 6:5 Scrub Completion Count
82 * 3:2 Scrub Rate (i3100 only) 82 * 3:2 Scrub Rate (i3100 only)
83 * 01=fast 10=normal 83 * 01=fast 10=normal
84 * 1:0 Scrub Mode enable 84 * 1:0 Scrub Mode enable
85 * 00=off 10=on 85 * 00=off 10=on
86 */ 86 */
87 #define E752X_DRB 0x60 /* DRAM row boundary register (8b) */ 87 #define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
88 #define E752X_DRA 0x70 /* DRAM row attribute register (8b) */ 88 #define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
89 /* 89 /*
90 * 31:30 Device width row 7 90 * 31:30 Device width row 7
91 * 01=x8 10=x4 11=x8 DDR2 91 * 01=x8 10=x4 11=x8 DDR2
92 * 27:26 Device width row 6 92 * 27:26 Device width row 6
93 * 23:22 Device width row 5 93 * 23:22 Device width row 5
94 * 19:20 Device width row 4 94 * 19:20 Device width row 4
95 * 15:14 Device width row 3 95 * 15:14 Device width row 3
96 * 11:10 Device width row 2 96 * 11:10 Device width row 2
97 * 7:6 Device width row 1 97 * 7:6 Device width row 1
98 * 3:2 Device width row 0 98 * 3:2 Device width row 0
99 */ 99 */
100 #define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */ 100 #define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */
101 /* FIXME:IS THIS RIGHT? */ 101 /* FIXME:IS THIS RIGHT? */
102 /* 102 /*
103 * 22 Number channels 0=1,1=2 103 * 22 Number channels 0=1,1=2
104 * 19:18 DRB Granularity 32/64MB 104 * 19:18 DRB Granularity 32/64MB
105 */ 105 */
106 #define E752X_DRM 0x80 /* Dimm mapping register */ 106 #define E752X_DRM 0x80 /* Dimm mapping register */
107 #define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */ 107 #define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */
108 /* 108 /*
109 * 14:12 1 single A, 2 single B, 3 dual 109 * 14:12 1 single A, 2 single B, 3 dual
110 */ 110 */
111 #define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */ 111 #define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
112 #define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */ 112 #define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
113 #define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */ 113 #define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
114 #define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */ 114 #define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */
115 115
116 /* E752X register addresses - device 0 function 1 */ 116 /* E752X register addresses - device 0 function 1 */
117 #define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */ 117 #define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */
118 #define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */ 118 #define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */
119 #define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */ 119 #define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */
120 #define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */ 120 #define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */
121 #define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */ 121 #define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */
122 #define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */ 122 #define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */
123 #define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */ 123 #define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */
124 #define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */ 124 #define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */
125 #define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */ 125 #define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */
126 #define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */ 126 #define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */
127 #define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */ 127 #define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */
128 #define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */ 128 #define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */
129 #define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */ 129 #define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */
130 #define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI cmd reg (8b) */ 130 #define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI cmd reg (8b) */
131 #define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */ 131 #define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */
132 #define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */ 132 #define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */
133 #define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */ 133 #define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */
134 #define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */ 134 #define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */
135 #define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */ 135 #define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */
136 #define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */ 136 #define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */
137 /* error address register (32b) */ 137 /* error address register (32b) */
138 /* 138 /*
139 * 31 Reserved 139 * 31 Reserved
140 * 30:2 CE address (64 byte block 34:6 140 * 30:2 CE address (64 byte block 34:6
141 * 1 Reserved 141 * 1 Reserved
142 * 0 HiLoCS 142 * 0 HiLoCS
143 */ 143 */
144 #define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */ 144 #define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */
145 /* error address register (32b) */ 145 /* error address register (32b) */
146 /* 146 /*
147 * 31 Reserved 147 * 31 Reserved
148 * 30:2 CE address (64 byte block 34:6) 148 * 30:2 CE address (64 byte block 34:6)
149 * 1 Reserved 149 * 1 Reserved
150 * 0 HiLoCS 150 * 0 HiLoCS
151 */ 151 */
152 #define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */ 152 #define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */
153 /* error address register (32b) */ 153 /* error address register (32b) */
154 /* 154 /*
155 * 31 Reserved 155 * 31 Reserved
156 * 30:2 CE address (64 byte block 34:6) 156 * 30:2 CE address (64 byte block 34:6)
157 * 1 Reserved 157 * 1 Reserved
158 * 0 HiLoCS 158 * 0 HiLoCS
159 */ 159 */
160 #define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM 1st uncorrectable scrub mem */ 160 #define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM 1st uncorrectable scrub mem */
161 /* error address register (32b) */ 161 /* error address register (32b) */
162 /* 162 /*
163 * 31 Reserved 163 * 31 Reserved
164 * 30:2 CE address (64 byte block 34:6 164 * 30:2 CE address (64 byte block 34:6
165 * 1 Reserved 165 * 1 Reserved
166 * 0 HiLoCS 166 * 0 HiLoCS
167 */ 167 */
168 #define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */ 168 #define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */
169 /* error syndrome register (16b) */ 169 /* error syndrome register (16b) */
170 #define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */ 170 #define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */
171 /* error syndrome register (16b) */ 171 /* error syndrome register (16b) */
172 #define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */ 172 #define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */
173 173
174 /* 3100 IMCH specific register addresses - device 0 function 1 */ 174 /* 3100 IMCH specific register addresses - device 0 function 1 */
175 #define I3100_NSI_FERR 0x48 /* NSI first error reg (32b) */ 175 #define I3100_NSI_FERR 0x48 /* NSI first error reg (32b) */
176 #define I3100_NSI_NERR 0x4C /* NSI next error reg (32b) */ 176 #define I3100_NSI_NERR 0x4C /* NSI next error reg (32b) */
177 #define I3100_NSI_SMICMD 0x54 /* NSI SMI command register (32b) */ 177 #define I3100_NSI_SMICMD 0x54 /* NSI SMI command register (32b) */
178 #define I3100_NSI_EMASK 0x90 /* NSI error mask register (32b) */ 178 #define I3100_NSI_EMASK 0x90 /* NSI error mask register (32b) */
179 179
180 /* ICH5R register addresses - device 30 function 0 */ 180 /* ICH5R register addresses - device 30 function 0 */
181 #define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */ 181 #define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */
182 #define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */ 182 #define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */
183 #define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */ 183 #define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */
184 184
185 enum e752x_chips { 185 enum e752x_chips {
186 E7520 = 0, 186 E7520 = 0,
187 E7525 = 1, 187 E7525 = 1,
188 E7320 = 2, 188 E7320 = 2,
189 I3100 = 3 189 I3100 = 3
190 }; 190 };
191 191
192 /* 192 /*
193 * Those chips Support single-rank and dual-rank memories only. 193 * Those chips Support single-rank and dual-rank memories only.
194 * 194 *
195 * On e752x chips, the odd rows are present only on dual-rank memories. 195 * On e752x chips, the odd rows are present only on dual-rank memories.
196 * Dividing the rank by two will provide the dimm# 196 * Dividing the rank by two will provide the dimm#
197 * 197 *
198 * i3100 MC has a different mapping: it supports only 4 ranks. 198 * i3100 MC has a different mapping: it supports only 4 ranks.
199 * 199 *
200 * The mapping is (from 1 to n): 200 * The mapping is (from 1 to n):
201 * slot single-ranked double-ranked 201 * slot single-ranked double-ranked
202 * dimm #1 -> rank #4 NA 202 * dimm #1 -> rank #4 NA
203 * dimm #2 -> rank #3 NA 203 * dimm #2 -> rank #3 NA
204 * dimm #3 -> rank #2 Ranks 2 and 3 204 * dimm #3 -> rank #2 Ranks 2 and 3
205 * dimm #4 -> rank $1 Ranks 1 and 4 205 * dimm #4 -> rank $1 Ranks 1 and 4
206 * 206 *
207 * FIXME: The current mapping for i3100 considers that it supports up to 8 207 * FIXME: The current mapping for i3100 considers that it supports up to 8
208 * ranks/chanel, but datasheet says that the MC supports only 4 ranks. 208 * ranks/chanel, but datasheet says that the MC supports only 4 ranks.
209 */ 209 */
210 210
211 struct e752x_pvt { 211 struct e752x_pvt {
212 struct pci_dev *bridge_ck; 212 struct pci_dev *bridge_ck;
213 struct pci_dev *dev_d0f0; 213 struct pci_dev *dev_d0f0;
214 struct pci_dev *dev_d0f1; 214 struct pci_dev *dev_d0f1;
215 u32 tolm; 215 u32 tolm;
216 u32 remapbase; 216 u32 remapbase;
217 u32 remaplimit; 217 u32 remaplimit;
218 int mc_symmetric; 218 int mc_symmetric;
219 u8 map[8]; 219 u8 map[8];
220 int map_type; 220 int map_type;
221 const struct e752x_dev_info *dev_info; 221 const struct e752x_dev_info *dev_info;
222 }; 222 };
223 223
224 struct e752x_dev_info { 224 struct e752x_dev_info {
225 u16 err_dev; 225 u16 err_dev;
226 u16 ctl_dev; 226 u16 ctl_dev;
227 const char *ctl_name; 227 const char *ctl_name;
228 }; 228 };
229 229
230 struct e752x_error_info { 230 struct e752x_error_info {
231 u32 ferr_global; 231 u32 ferr_global;
232 u32 nerr_global; 232 u32 nerr_global;
233 u32 nsi_ferr; /* 3100 only */ 233 u32 nsi_ferr; /* 3100 only */
234 u32 nsi_nerr; /* 3100 only */ 234 u32 nsi_nerr; /* 3100 only */
235 u8 hi_ferr; /* all but 3100 */ 235 u8 hi_ferr; /* all but 3100 */
236 u8 hi_nerr; /* all but 3100 */ 236 u8 hi_nerr; /* all but 3100 */
237 u16 sysbus_ferr; 237 u16 sysbus_ferr;
238 u16 sysbus_nerr; 238 u16 sysbus_nerr;
239 u8 buf_ferr; 239 u8 buf_ferr;
240 u8 buf_nerr; 240 u8 buf_nerr;
241 u16 dram_ferr; 241 u16 dram_ferr;
242 u16 dram_nerr; 242 u16 dram_nerr;
243 u32 dram_sec1_add; 243 u32 dram_sec1_add;
244 u32 dram_sec2_add; 244 u32 dram_sec2_add;
245 u16 dram_sec1_syndrome; 245 u16 dram_sec1_syndrome;
246 u16 dram_sec2_syndrome; 246 u16 dram_sec2_syndrome;
247 u32 dram_ded_add; 247 u32 dram_ded_add;
248 u32 dram_scrb_add; 248 u32 dram_scrb_add;
249 u32 dram_retr_add; 249 u32 dram_retr_add;
250 }; 250 };
251 251
252 static const struct e752x_dev_info e752x_devs[] = { 252 static const struct e752x_dev_info e752x_devs[] = {
253 [E7520] = { 253 [E7520] = {
254 .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR, 254 .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
255 .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0, 255 .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
256 .ctl_name = "E7520"}, 256 .ctl_name = "E7520"},
257 [E7525] = { 257 [E7525] = {
258 .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR, 258 .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
259 .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0, 259 .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
260 .ctl_name = "E7525"}, 260 .ctl_name = "E7525"},
261 [E7320] = { 261 [E7320] = {
262 .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR, 262 .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
263 .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0, 263 .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
264 .ctl_name = "E7320"}, 264 .ctl_name = "E7320"},
265 [I3100] = { 265 [I3100] = {
266 .err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR, 266 .err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR,
267 .ctl_dev = PCI_DEVICE_ID_INTEL_3100_0, 267 .ctl_dev = PCI_DEVICE_ID_INTEL_3100_0,
268 .ctl_name = "3100"}, 268 .ctl_name = "3100"},
269 }; 269 };
270 270
271 /* Valid scrub rates for the e752x/3100 hardware memory scrubber. We 271 /* Valid scrub rates for the e752x/3100 hardware memory scrubber. We
272 * map the scrubbing bandwidth to a hardware register value. The 'set' 272 * map the scrubbing bandwidth to a hardware register value. The 'set'
273 * operation finds the 'matching or higher value'. Note that scrubbing 273 * operation finds the 'matching or higher value'. Note that scrubbing
274 * on the e752x can only be enabled/disabled. The 3100 supports 274 * on the e752x can only be enabled/disabled. The 3100 supports
275 * a normal and fast mode. 275 * a normal and fast mode.
276 */ 276 */
277 277
278 #define SDRATE_EOT 0xFFFFFFFF 278 #define SDRATE_EOT 0xFFFFFFFF
279 279
280 struct scrubrate { 280 struct scrubrate {
281 u32 bandwidth; /* bandwidth consumed by scrubbing in bytes/sec */ 281 u32 bandwidth; /* bandwidth consumed by scrubbing in bytes/sec */
282 u16 scrubval; /* register value for scrub rate */ 282 u16 scrubval; /* register value for scrub rate */
283 }; 283 };
284 284
285 /* Rate below assumes same performance as i3100 using PC3200 DDR2 in 285 /* Rate below assumes same performance as i3100 using PC3200 DDR2 in
286 * normal mode. e752x bridges don't support choosing normal or fast mode, 286 * normal mode. e752x bridges don't support choosing normal or fast mode,
287 * so the scrubbing bandwidth value isn't all that important - scrubbing is 287 * so the scrubbing bandwidth value isn't all that important - scrubbing is
288 * either on or off. 288 * either on or off.
289 */ 289 */
290 static const struct scrubrate scrubrates_e752x[] = { 290 static const struct scrubrate scrubrates_e752x[] = {
291 {0, 0x00}, /* Scrubbing Off */ 291 {0, 0x00}, /* Scrubbing Off */
292 {500000, 0x02}, /* Scrubbing On */ 292 {500000, 0x02}, /* Scrubbing On */
293 {SDRATE_EOT, 0x00} /* End of Table */ 293 {SDRATE_EOT, 0x00} /* End of Table */
294 }; 294 };
295 295
296 /* Fast mode: 2 GByte PC3200 DDR2 scrubbed in 33s = 63161283 bytes/s 296 /* Fast mode: 2 GByte PC3200 DDR2 scrubbed in 33s = 63161283 bytes/s
297 * Normal mode: 125 (32000 / 256) times slower than fast mode. 297 * Normal mode: 125 (32000 / 256) times slower than fast mode.
298 */ 298 */
299 static const struct scrubrate scrubrates_i3100[] = { 299 static const struct scrubrate scrubrates_i3100[] = {
300 {0, 0x00}, /* Scrubbing Off */ 300 {0, 0x00}, /* Scrubbing Off */
301 {500000, 0x0a}, /* Normal mode - 32k clocks */ 301 {500000, 0x0a}, /* Normal mode - 32k clocks */
302 {62500000, 0x06}, /* Fast mode - 256 clocks */ 302 {62500000, 0x06}, /* Fast mode - 256 clocks */
303 {SDRATE_EOT, 0x00} /* End of Table */ 303 {SDRATE_EOT, 0x00} /* End of Table */
304 }; 304 };
305 305
306 static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, 306 static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
307 unsigned long page) 307 unsigned long page)
308 { 308 {
309 u32 remap; 309 u32 remap;
310 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; 310 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
311 311
312 debugf3("%s()\n", __func__); 312 debugf3("%s()\n", __func__);
313 313
314 if (page < pvt->tolm) 314 if (page < pvt->tolm)
315 return page; 315 return page;
316 316
317 if ((page >= 0x100000) && (page < pvt->remapbase)) 317 if ((page >= 0x100000) && (page < pvt->remapbase))
318 return page; 318 return page;
319 319
320 remap = (page - pvt->tolm) + pvt->remapbase; 320 remap = (page - pvt->tolm) + pvt->remapbase;
321 321
322 if (remap < pvt->remaplimit) 322 if (remap < pvt->remaplimit)
323 return remap; 323 return remap;
324 324
325 e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page); 325 e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
326 return pvt->tolm - 1; 326 return pvt->tolm - 1;
327 } 327 }
328 328
329 static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, 329 static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
330 u32 sec1_add, u16 sec1_syndrome) 330 u32 sec1_add, u16 sec1_syndrome)
331 { 331 {
332 u32 page; 332 u32 page;
333 int row; 333 int row;
334 int channel; 334 int channel;
335 int i; 335 int i;
336 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; 336 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
337 337
338 debugf3("%s()\n", __func__); 338 debugf3("%s()\n", __func__);
339 339
340 /* convert the addr to 4k page */ 340 /* convert the addr to 4k page */
341 page = sec1_add >> (PAGE_SHIFT - 4); 341 page = sec1_add >> (PAGE_SHIFT - 4);
342 342
343 /* FIXME - check for -1 */ 343 /* FIXME - check for -1 */
344 if (pvt->mc_symmetric) { 344 if (pvt->mc_symmetric) {
345 /* chip select are bits 14 & 13 */ 345 /* chip select are bits 14 & 13 */
346 row = ((page >> 1) & 3); 346 row = ((page >> 1) & 3);
347 e752x_printk(KERN_WARNING, 347 e752x_printk(KERN_WARNING,
348 "Test row %d Table %d %d %d %d %d %d %d %d\n", row, 348 "Test row %d Table %d %d %d %d %d %d %d %d\n", row,
349 pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3], 349 pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
350 pvt->map[4], pvt->map[5], pvt->map[6], 350 pvt->map[4], pvt->map[5], pvt->map[6],
351 pvt->map[7]); 351 pvt->map[7]);
352 352
353 /* test for channel remapping */ 353 /* test for channel remapping */
354 for (i = 0; i < 8; i++) { 354 for (i = 0; i < 8; i++) {
355 if (pvt->map[i] == row) 355 if (pvt->map[i] == row)
356 break; 356 break;
357 } 357 }
358 358
359 e752x_printk(KERN_WARNING, "Test computed row %d\n", i); 359 e752x_printk(KERN_WARNING, "Test computed row %d\n", i);
360 360
361 if (i < 8) 361 if (i < 8)
362 row = i; 362 row = i;
363 else 363 else
364 e752x_mc_printk(mci, KERN_WARNING, 364 e752x_mc_printk(mci, KERN_WARNING,
365 "row %d not found in remap table\n", 365 "row %d not found in remap table\n",
366 row); 366 row);
367 } else 367 } else
368 row = edac_mc_find_csrow_by_page(mci, page); 368 row = edac_mc_find_csrow_by_page(mci, page);
369 369
370 /* 0 = channel A, 1 = channel B */ 370 /* 0 = channel A, 1 = channel B */
371 channel = !(error_one & 1); 371 channel = !(error_one & 1);
372 372
373 /* e752x mc reads 34:6 of the DRAM linear address */ 373 /* e752x mc reads 34:6 of the DRAM linear address */
374 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 374 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
375 page, offset_in_page(sec1_add << 4), sec1_syndrome, 375 page, offset_in_page(sec1_add << 4), sec1_syndrome,
376 row, channel, -1, 376 row, channel, -1,
377 "e752x CE", "", NULL); 377 "e752x CE", "", NULL);
378 } 378 }
379 379
380 static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, 380 static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
381 u32 sec1_add, u16 sec1_syndrome, int *error_found, 381 u32 sec1_add, u16 sec1_syndrome, int *error_found,
382 int handle_error) 382 int handle_error)
383 { 383 {
384 *error_found = 1; 384 *error_found = 1;
385 385
386 if (handle_error) 386 if (handle_error)
387 do_process_ce(mci, error_one, sec1_add, sec1_syndrome); 387 do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
388 } 388 }
389 389
390 static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, 390 static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
391 u32 ded_add, u32 scrb_add) 391 u32 ded_add, u32 scrb_add)
392 { 392 {
393 u32 error_2b, block_page; 393 u32 error_2b, block_page;
394 int row; 394 int row;
395 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; 395 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
396 396
397 debugf3("%s()\n", __func__); 397 debugf3("%s()\n", __func__);
398 398
399 if (error_one & 0x0202) { 399 if (error_one & 0x0202) {
400 error_2b = ded_add; 400 error_2b = ded_add;
401 401
402 /* convert to 4k address */ 402 /* convert to 4k address */
403 block_page = error_2b >> (PAGE_SHIFT - 4); 403 block_page = error_2b >> (PAGE_SHIFT - 4);
404 404
405 row = pvt->mc_symmetric ? 405 row = pvt->mc_symmetric ?
406 /* chip select are bits 14 & 13 */ 406 /* chip select are bits 14 & 13 */
407 ((block_page >> 1) & 3) : 407 ((block_page >> 1) & 3) :
408 edac_mc_find_csrow_by_page(mci, block_page); 408 edac_mc_find_csrow_by_page(mci, block_page);
409 409
410 /* e752x mc reads 34:6 of the DRAM linear address */ 410 /* e752x mc reads 34:6 of the DRAM linear address */
411 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 411 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
412 block_page, 412 block_page,
413 offset_in_page(error_2b << 4), 0, 413 offset_in_page(error_2b << 4), 0,
414 row, -1, -1, 414 row, -1, -1,
415 "e752x UE from Read", "", NULL); 415 "e752x UE from Read", "", NULL);
416 416
417 } 417 }
418 if (error_one & 0x0404) { 418 if (error_one & 0x0404) {
419 error_2b = scrb_add; 419 error_2b = scrb_add;
420 420
421 /* convert to 4k address */ 421 /* convert to 4k address */
422 block_page = error_2b >> (PAGE_SHIFT - 4); 422 block_page = error_2b >> (PAGE_SHIFT - 4);
423 423
424 row = pvt->mc_symmetric ? 424 row = pvt->mc_symmetric ?
425 /* chip select are bits 14 & 13 */ 425 /* chip select are bits 14 & 13 */
426 ((block_page >> 1) & 3) : 426 ((block_page >> 1) & 3) :
427 edac_mc_find_csrow_by_page(mci, block_page); 427 edac_mc_find_csrow_by_page(mci, block_page);
428 428
429 /* e752x mc reads 34:6 of the DRAM linear address */ 429 /* e752x mc reads 34:6 of the DRAM linear address */
430 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 430 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
431 block_page, 431 block_page,
432 offset_in_page(error_2b << 4), 0, 432 offset_in_page(error_2b << 4), 0,
433 row, -1, -1, 433 row, -1, -1,
434 "e752x UE from Scruber", "", NULL); 434 "e752x UE from Scruber", "", NULL);
435 } 435 }
436 } 436 }
437 437
438 static inline void process_ue(struct mem_ctl_info *mci, u16 error_one, 438 static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
439 u32 ded_add, u32 scrb_add, int *error_found, 439 u32 ded_add, u32 scrb_add, int *error_found,
440 int handle_error) 440 int handle_error)
441 { 441 {
442 *error_found = 1; 442 *error_found = 1;
443 443
444 if (handle_error) 444 if (handle_error)
445 do_process_ue(mci, error_one, ded_add, scrb_add); 445 do_process_ue(mci, error_one, ded_add, scrb_add);
446 } 446 }
447 447
448 static inline void process_ue_no_info_wr(struct mem_ctl_info *mci, 448 static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
449 int *error_found, int handle_error) 449 int *error_found, int handle_error)
450 { 450 {
451 *error_found = 1; 451 *error_found = 1;
452 452
453 if (!handle_error) 453 if (!handle_error)
454 return; 454 return;
455 455
456 debugf3("%s()\n", __func__); 456 debugf3("%s()\n", __func__);
457 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 457 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
458 -1, -1, -1, 458 -1, -1, -1,
459 "e752x UE log memory write", "", NULL); 459 "e752x UE log memory write", "", NULL);
460 } 460 }
461 461
462 static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error, 462 static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
463 u32 retry_add) 463 u32 retry_add)
464 { 464 {
465 u32 error_1b, page; 465 u32 error_1b, page;
466 int row; 466 int row;
467 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; 467 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
468 468
469 error_1b = retry_add; 469 error_1b = retry_add;
470 page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */ 470 page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
471 471
472 /* chip select are bits 14 & 13 */ 472 /* chip select are bits 14 & 13 */
473 row = pvt->mc_symmetric ? ((page >> 1) & 3) : 473 row = pvt->mc_symmetric ? ((page >> 1) & 3) :
474 edac_mc_find_csrow_by_page(mci, page); 474 edac_mc_find_csrow_by_page(mci, page);
475 475
476 e752x_mc_printk(mci, KERN_WARNING, 476 e752x_mc_printk(mci, KERN_WARNING,
477 "CE page 0x%lx, row %d : Memory read retry\n", 477 "CE page 0x%lx, row %d : Memory read retry\n",
478 (long unsigned int)page, row); 478 (long unsigned int)page, row);
479 } 479 }
480 480
481 static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error, 481 static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
482 u32 retry_add, int *error_found, 482 u32 retry_add, int *error_found,
483 int handle_error) 483 int handle_error)
484 { 484 {
485 *error_found = 1; 485 *error_found = 1;
486 486
487 if (handle_error) 487 if (handle_error)
488 do_process_ded_retry(mci, error, retry_add); 488 do_process_ded_retry(mci, error, retry_add);
489 } 489 }
490 490
491 static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error, 491 static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
492 int *error_found, int handle_error) 492 int *error_found, int handle_error)
493 { 493 {
494 *error_found = 1; 494 *error_found = 1;
495 495
496 if (handle_error) 496 if (handle_error)
497 e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n"); 497 e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n");
498 } 498 }
499 499
500 static char *global_message[11] = { 500 static char *global_message[11] = {
501 "PCI Express C1", 501 "PCI Express C1",
502 "PCI Express C", 502 "PCI Express C",
503 "PCI Express B1", 503 "PCI Express B1",
504 "PCI Express B", 504 "PCI Express B",
505 "PCI Express A1", 505 "PCI Express A1",
506 "PCI Express A", 506 "PCI Express A",
507 "DMA Controller", 507 "DMA Controller",
508 "HUB or NS Interface", 508 "HUB or NS Interface",
509 "System Bus", 509 "System Bus",
510 "DRAM Controller", /* 9th entry */ 510 "DRAM Controller", /* 9th entry */
511 "Internal Buffer" 511 "Internal Buffer"
512 }; 512 };
513 513
514 #define DRAM_ENTRY 9 514 #define DRAM_ENTRY 9
515 515
516 static char *fatal_message[2] = { "Non-Fatal ", "Fatal " }; 516 static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
517 517
518 static void do_global_error(int fatal, u32 errors) 518 static void do_global_error(int fatal, u32 errors)
519 { 519 {
520 int i; 520 int i;
521 521
522 for (i = 0; i < 11; i++) { 522 for (i = 0; i < 11; i++) {
523 if (errors & (1 << i)) { 523 if (errors & (1 << i)) {
524 /* If the error is from DRAM Controller OR 524 /* If the error is from DRAM Controller OR
525 * we are to report ALL errors, then 525 * we are to report ALL errors, then
526 * report the error 526 * report the error
527 */ 527 */
528 if ((i == DRAM_ENTRY) || report_non_memory_errors) 528 if ((i == DRAM_ENTRY) || report_non_memory_errors)
529 e752x_printk(KERN_WARNING, "%sError %s\n", 529 e752x_printk(KERN_WARNING, "%sError %s\n",
530 fatal_message[fatal], 530 fatal_message[fatal],
531 global_message[i]); 531 global_message[i]);
532 } 532 }
533 } 533 }
534 } 534 }
535 535
536 static inline void global_error(int fatal, u32 errors, int *error_found, 536 static inline void global_error(int fatal, u32 errors, int *error_found,
537 int handle_error) 537 int handle_error)
538 { 538 {
539 *error_found = 1; 539 *error_found = 1;
540 540
541 if (handle_error) 541 if (handle_error)
542 do_global_error(fatal, errors); 542 do_global_error(fatal, errors);
543 } 543 }
544 544
545 static char *hub_message[7] = { 545 static char *hub_message[7] = {
546 "HI Address or Command Parity", "HI Illegal Access", 546 "HI Address or Command Parity", "HI Illegal Access",
547 "HI Internal Parity", "Out of Range Access", 547 "HI Internal Parity", "Out of Range Access",
548 "HI Data Parity", "Enhanced Config Access", 548 "HI Data Parity", "Enhanced Config Access",
549 "Hub Interface Target Abort" 549 "Hub Interface Target Abort"
550 }; 550 };
551 551
552 static void do_hub_error(int fatal, u8 errors) 552 static void do_hub_error(int fatal, u8 errors)
553 { 553 {
554 int i; 554 int i;
555 555
556 for (i = 0; i < 7; i++) { 556 for (i = 0; i < 7; i++) {
557 if (errors & (1 << i)) 557 if (errors & (1 << i))
558 e752x_printk(KERN_WARNING, "%sError %s\n", 558 e752x_printk(KERN_WARNING, "%sError %s\n",
559 fatal_message[fatal], hub_message[i]); 559 fatal_message[fatal], hub_message[i]);
560 } 560 }
561 } 561 }
562 562
563 static inline void hub_error(int fatal, u8 errors, int *error_found, 563 static inline void hub_error(int fatal, u8 errors, int *error_found,
564 int handle_error) 564 int handle_error)
565 { 565 {
566 *error_found = 1; 566 *error_found = 1;
567 567
568 if (handle_error) 568 if (handle_error)
569 do_hub_error(fatal, errors); 569 do_hub_error(fatal, errors);
570 } 570 }
571 571
572 #define NSI_FATAL_MASK 0x0c080081 572 #define NSI_FATAL_MASK 0x0c080081
573 #define NSI_NON_FATAL_MASK 0x23a0ba64 573 #define NSI_NON_FATAL_MASK 0x23a0ba64
574 #define NSI_ERR_MASK (NSI_FATAL_MASK | NSI_NON_FATAL_MASK) 574 #define NSI_ERR_MASK (NSI_FATAL_MASK | NSI_NON_FATAL_MASK)
575 575
576 static char *nsi_message[30] = { 576 static char *nsi_message[30] = {
577 "NSI Link Down", /* NSI_FERR/NSI_NERR bit 0, fatal error */ 577 "NSI Link Down", /* NSI_FERR/NSI_NERR bit 0, fatal error */
578 "", /* reserved */ 578 "", /* reserved */
579 "NSI Parity Error", /* bit 2, non-fatal */ 579 "NSI Parity Error", /* bit 2, non-fatal */
580 "", /* reserved */ 580 "", /* reserved */
581 "", /* reserved */ 581 "", /* reserved */
582 "Correctable Error Message", /* bit 5, non-fatal */ 582 "Correctable Error Message", /* bit 5, non-fatal */
583 "Non-Fatal Error Message", /* bit 6, non-fatal */ 583 "Non-Fatal Error Message", /* bit 6, non-fatal */
584 "Fatal Error Message", /* bit 7, fatal */ 584 "Fatal Error Message", /* bit 7, fatal */
585 "", /* reserved */ 585 "", /* reserved */
586 "Receiver Error", /* bit 9, non-fatal */ 586 "Receiver Error", /* bit 9, non-fatal */
587 "", /* reserved */ 587 "", /* reserved */
588 "Bad TLP", /* bit 11, non-fatal */ 588 "Bad TLP", /* bit 11, non-fatal */
589 "Bad DLLP", /* bit 12, non-fatal */ 589 "Bad DLLP", /* bit 12, non-fatal */
590 "REPLAY_NUM Rollover", /* bit 13, non-fatal */ 590 "REPLAY_NUM Rollover", /* bit 13, non-fatal */
591 "", /* reserved */ 591 "", /* reserved */
592 "Replay Timer Timeout", /* bit 15, non-fatal */ 592 "Replay Timer Timeout", /* bit 15, non-fatal */
593 "", /* reserved */ 593 "", /* reserved */
594 "", /* reserved */ 594 "", /* reserved */
595 "", /* reserved */ 595 "", /* reserved */
596 "Data Link Protocol Error", /* bit 19, fatal */ 596 "Data Link Protocol Error", /* bit 19, fatal */
597 "", /* reserved */ 597 "", /* reserved */
598 "Poisoned TLP", /* bit 21, non-fatal */ 598 "Poisoned TLP", /* bit 21, non-fatal */
599 "", /* reserved */ 599 "", /* reserved */
600 "Completion Timeout", /* bit 23, non-fatal */ 600 "Completion Timeout", /* bit 23, non-fatal */
601 "Completer Abort", /* bit 24, non-fatal */ 601 "Completer Abort", /* bit 24, non-fatal */
602 "Unexpected Completion", /* bit 25, non-fatal */ 602 "Unexpected Completion", /* bit 25, non-fatal */
603 "Receiver Overflow", /* bit 26, fatal */ 603 "Receiver Overflow", /* bit 26, fatal */
604 "Malformed TLP", /* bit 27, fatal */ 604 "Malformed TLP", /* bit 27, fatal */
605 "", /* reserved */ 605 "", /* reserved */
606 "Unsupported Request" /* bit 29, non-fatal */ 606 "Unsupported Request" /* bit 29, non-fatal */
607 }; 607 };
608 608
609 static void do_nsi_error(int fatal, u32 errors) 609 static void do_nsi_error(int fatal, u32 errors)
610 { 610 {
611 int i; 611 int i;
612 612
613 for (i = 0; i < 30; i++) { 613 for (i = 0; i < 30; i++) {
614 if (errors & (1 << i)) 614 if (errors & (1 << i))
615 printk(KERN_WARNING "%sError %s\n", 615 printk(KERN_WARNING "%sError %s\n",
616 fatal_message[fatal], nsi_message[i]); 616 fatal_message[fatal], nsi_message[i]);
617 } 617 }
618 } 618 }
619 619
620 static inline void nsi_error(int fatal, u32 errors, int *error_found, 620 static inline void nsi_error(int fatal, u32 errors, int *error_found,
621 int handle_error) 621 int handle_error)
622 { 622 {
623 *error_found = 1; 623 *error_found = 1;
624 624
625 if (handle_error) 625 if (handle_error)
626 do_nsi_error(fatal, errors); 626 do_nsi_error(fatal, errors);
627 } 627 }
628 628
629 static char *membuf_message[4] = { 629 static char *membuf_message[4] = {
630 "Internal PMWB to DRAM parity", 630 "Internal PMWB to DRAM parity",
631 "Internal PMWB to System Bus Parity", 631 "Internal PMWB to System Bus Parity",
632 "Internal System Bus or IO to PMWB Parity", 632 "Internal System Bus or IO to PMWB Parity",
633 "Internal DRAM to PMWB Parity" 633 "Internal DRAM to PMWB Parity"
634 }; 634 };
635 635
636 static void do_membuf_error(u8 errors) 636 static void do_membuf_error(u8 errors)
637 { 637 {
638 int i; 638 int i;
639 639
640 for (i = 0; i < 4; i++) { 640 for (i = 0; i < 4; i++) {
641 if (errors & (1 << i)) 641 if (errors & (1 << i))
642 e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n", 642 e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n",
643 membuf_message[i]); 643 membuf_message[i]);
644 } 644 }
645 } 645 }
646 646
647 static inline void membuf_error(u8 errors, int *error_found, int handle_error) 647 static inline void membuf_error(u8 errors, int *error_found, int handle_error)
648 { 648 {
649 *error_found = 1; 649 *error_found = 1;
650 650
651 if (handle_error) 651 if (handle_error)
652 do_membuf_error(errors); 652 do_membuf_error(errors);
653 } 653 }
654 654
655 static char *sysbus_message[10] = { 655 static char *sysbus_message[10] = {
656 "Addr or Request Parity", 656 "Addr or Request Parity",
657 "Data Strobe Glitch", 657 "Data Strobe Glitch",
658 "Addr Strobe Glitch", 658 "Addr Strobe Glitch",
659 "Data Parity", 659 "Data Parity",
660 "Addr Above TOM", 660 "Addr Above TOM",
661 "Non DRAM Lock Error", 661 "Non DRAM Lock Error",
662 "MCERR", "BINIT", 662 "MCERR", "BINIT",
663 "Memory Parity", 663 "Memory Parity",
664 "IO Subsystem Parity" 664 "IO Subsystem Parity"
665 }; 665 };
666 666
667 static void do_sysbus_error(int fatal, u32 errors) 667 static void do_sysbus_error(int fatal, u32 errors)
668 { 668 {
669 int i; 669 int i;
670 670
671 for (i = 0; i < 10; i++) { 671 for (i = 0; i < 10; i++) {
672 if (errors & (1 << i)) 672 if (errors & (1 << i))
673 e752x_printk(KERN_WARNING, "%sError System Bus %s\n", 673 e752x_printk(KERN_WARNING, "%sError System Bus %s\n",
674 fatal_message[fatal], sysbus_message[i]); 674 fatal_message[fatal], sysbus_message[i]);
675 } 675 }
676 } 676 }
677 677
678 static inline void sysbus_error(int fatal, u32 errors, int *error_found, 678 static inline void sysbus_error(int fatal, u32 errors, int *error_found,
679 int handle_error) 679 int handle_error)
680 { 680 {
681 *error_found = 1; 681 *error_found = 1;
682 682
683 if (handle_error) 683 if (handle_error)
684 do_sysbus_error(fatal, errors); 684 do_sysbus_error(fatal, errors);
685 } 685 }
686 686
687 static void e752x_check_hub_interface(struct e752x_error_info *info, 687 static void e752x_check_hub_interface(struct e752x_error_info *info,
688 int *error_found, int handle_error) 688 int *error_found, int handle_error)
689 { 689 {
690 u8 stat8; 690 u8 stat8;
691 691
692 //pci_read_config_byte(dev,E752X_HI_FERR,&stat8); 692 //pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
693 693
694 stat8 = info->hi_ferr; 694 stat8 = info->hi_ferr;
695 695
696 if (stat8 & 0x7f) { /* Error, so process */ 696 if (stat8 & 0x7f) { /* Error, so process */
697 stat8 &= 0x7f; 697 stat8 &= 0x7f;
698 698
699 if (stat8 & 0x2b) 699 if (stat8 & 0x2b)
700 hub_error(1, stat8 & 0x2b, error_found, handle_error); 700 hub_error(1, stat8 & 0x2b, error_found, handle_error);
701 701
702 if (stat8 & 0x54) 702 if (stat8 & 0x54)
703 hub_error(0, stat8 & 0x54, error_found, handle_error); 703 hub_error(0, stat8 & 0x54, error_found, handle_error);
704 } 704 }
705 //pci_read_config_byte(dev,E752X_HI_NERR,&stat8); 705 //pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
706 706
707 stat8 = info->hi_nerr; 707 stat8 = info->hi_nerr;
708 708
709 if (stat8 & 0x7f) { /* Error, so process */ 709 if (stat8 & 0x7f) { /* Error, so process */
710 stat8 &= 0x7f; 710 stat8 &= 0x7f;
711 711
712 if (stat8 & 0x2b) 712 if (stat8 & 0x2b)
713 hub_error(1, stat8 & 0x2b, error_found, handle_error); 713 hub_error(1, stat8 & 0x2b, error_found, handle_error);
714 714
715 if (stat8 & 0x54) 715 if (stat8 & 0x54)
716 hub_error(0, stat8 & 0x54, error_found, handle_error); 716 hub_error(0, stat8 & 0x54, error_found, handle_error);
717 } 717 }
718 } 718 }
719 719
720 static void e752x_check_ns_interface(struct e752x_error_info *info, 720 static void e752x_check_ns_interface(struct e752x_error_info *info,
721 int *error_found, int handle_error) 721 int *error_found, int handle_error)
722 { 722 {
723 u32 stat32; 723 u32 stat32;
724 724
725 stat32 = info->nsi_ferr; 725 stat32 = info->nsi_ferr;
726 if (stat32 & NSI_ERR_MASK) { /* Error, so process */ 726 if (stat32 & NSI_ERR_MASK) { /* Error, so process */
727 if (stat32 & NSI_FATAL_MASK) /* check for fatal errors */ 727 if (stat32 & NSI_FATAL_MASK) /* check for fatal errors */
728 nsi_error(1, stat32 & NSI_FATAL_MASK, error_found, 728 nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
729 handle_error); 729 handle_error);
730 if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */ 730 if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */
731 nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found, 731 nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
732 handle_error); 732 handle_error);
733 } 733 }
734 stat32 = info->nsi_nerr; 734 stat32 = info->nsi_nerr;
735 if (stat32 & NSI_ERR_MASK) { 735 if (stat32 & NSI_ERR_MASK) {
736 if (stat32 & NSI_FATAL_MASK) 736 if (stat32 & NSI_FATAL_MASK)
737 nsi_error(1, stat32 & NSI_FATAL_MASK, error_found, 737 nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
738 handle_error); 738 handle_error);
739 if (stat32 & NSI_NON_FATAL_MASK) 739 if (stat32 & NSI_NON_FATAL_MASK)
740 nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found, 740 nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
741 handle_error); 741 handle_error);
742 } 742 }
743 } 743 }
744 744
745 static void e752x_check_sysbus(struct e752x_error_info *info, 745 static void e752x_check_sysbus(struct e752x_error_info *info,
746 int *error_found, int handle_error) 746 int *error_found, int handle_error)
747 { 747 {
748 u32 stat32, error32; 748 u32 stat32, error32;
749 749
750 //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32); 750 //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
751 stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16); 751 stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
752 752
753 if (stat32 == 0) 753 if (stat32 == 0)
754 return; /* no errors */ 754 return; /* no errors */
755 755
756 error32 = (stat32 >> 16) & 0x3ff; 756 error32 = (stat32 >> 16) & 0x3ff;
757 stat32 = stat32 & 0x3ff; 757 stat32 = stat32 & 0x3ff;
758 758
759 if (stat32 & 0x087) 759 if (stat32 & 0x087)
760 sysbus_error(1, stat32 & 0x087, error_found, handle_error); 760 sysbus_error(1, stat32 & 0x087, error_found, handle_error);
761 761
762 if (stat32 & 0x378) 762 if (stat32 & 0x378)
763 sysbus_error(0, stat32 & 0x378, error_found, handle_error); 763 sysbus_error(0, stat32 & 0x378, error_found, handle_error);
764 764
765 if (error32 & 0x087) 765 if (error32 & 0x087)
766 sysbus_error(1, error32 & 0x087, error_found, handle_error); 766 sysbus_error(1, error32 & 0x087, error_found, handle_error);
767 767
768 if (error32 & 0x378) 768 if (error32 & 0x378)
769 sysbus_error(0, error32 & 0x378, error_found, handle_error); 769 sysbus_error(0, error32 & 0x378, error_found, handle_error);
770 } 770 }
771 771
772 static void e752x_check_membuf(struct e752x_error_info *info, 772 static void e752x_check_membuf(struct e752x_error_info *info,
773 int *error_found, int handle_error) 773 int *error_found, int handle_error)
774 { 774 {
775 u8 stat8; 775 u8 stat8;
776 776
777 stat8 = info->buf_ferr; 777 stat8 = info->buf_ferr;
778 778
779 if (stat8 & 0x0f) { /* Error, so process */ 779 if (stat8 & 0x0f) { /* Error, so process */
780 stat8 &= 0x0f; 780 stat8 &= 0x0f;
781 membuf_error(stat8, error_found, handle_error); 781 membuf_error(stat8, error_found, handle_error);
782 } 782 }
783 783
784 stat8 = info->buf_nerr; 784 stat8 = info->buf_nerr;
785 785
786 if (stat8 & 0x0f) { /* Error, so process */ 786 if (stat8 & 0x0f) { /* Error, so process */
787 stat8 &= 0x0f; 787 stat8 &= 0x0f;
788 membuf_error(stat8, error_found, handle_error); 788 membuf_error(stat8, error_found, handle_error);
789 } 789 }
790 } 790 }
791 791
792 static void e752x_check_dram(struct mem_ctl_info *mci, 792 static void e752x_check_dram(struct mem_ctl_info *mci,
793 struct e752x_error_info *info, int *error_found, 793 struct e752x_error_info *info, int *error_found,
794 int handle_error) 794 int handle_error)
795 { 795 {
796 u16 error_one, error_next; 796 u16 error_one, error_next;
797 797
798 error_one = info->dram_ferr; 798 error_one = info->dram_ferr;
799 error_next = info->dram_nerr; 799 error_next = info->dram_nerr;
800 800
801 /* decode and report errors */ 801 /* decode and report errors */
802 if (error_one & 0x0101) /* check first error correctable */ 802 if (error_one & 0x0101) /* check first error correctable */
803 process_ce(mci, error_one, info->dram_sec1_add, 803 process_ce(mci, error_one, info->dram_sec1_add,
804 info->dram_sec1_syndrome, error_found, handle_error); 804 info->dram_sec1_syndrome, error_found, handle_error);
805 805
806 if (error_next & 0x0101) /* check next error correctable */ 806 if (error_next & 0x0101) /* check next error correctable */
807 process_ce(mci, error_next, info->dram_sec2_add, 807 process_ce(mci, error_next, info->dram_sec2_add,
808 info->dram_sec2_syndrome, error_found, handle_error); 808 info->dram_sec2_syndrome, error_found, handle_error);
809 809
810 if (error_one & 0x4040) 810 if (error_one & 0x4040)
811 process_ue_no_info_wr(mci, error_found, handle_error); 811 process_ue_no_info_wr(mci, error_found, handle_error);
812 812
813 if (error_next & 0x4040) 813 if (error_next & 0x4040)
814 process_ue_no_info_wr(mci, error_found, handle_error); 814 process_ue_no_info_wr(mci, error_found, handle_error);
815 815
816 if (error_one & 0x2020) 816 if (error_one & 0x2020)
817 process_ded_retry(mci, error_one, info->dram_retr_add, 817 process_ded_retry(mci, error_one, info->dram_retr_add,
818 error_found, handle_error); 818 error_found, handle_error);
819 819
820 if (error_next & 0x2020) 820 if (error_next & 0x2020)
821 process_ded_retry(mci, error_next, info->dram_retr_add, 821 process_ded_retry(mci, error_next, info->dram_retr_add,
822 error_found, handle_error); 822 error_found, handle_error);
823 823
824 if (error_one & 0x0808) 824 if (error_one & 0x0808)
825 process_threshold_ce(mci, error_one, error_found, handle_error); 825 process_threshold_ce(mci, error_one, error_found, handle_error);
826 826
827 if (error_next & 0x0808) 827 if (error_next & 0x0808)
828 process_threshold_ce(mci, error_next, error_found, 828 process_threshold_ce(mci, error_next, error_found,
829 handle_error); 829 handle_error);
830 830
831 if (error_one & 0x0606) 831 if (error_one & 0x0606)
832 process_ue(mci, error_one, info->dram_ded_add, 832 process_ue(mci, error_one, info->dram_ded_add,
833 info->dram_scrb_add, error_found, handle_error); 833 info->dram_scrb_add, error_found, handle_error);
834 834
835 if (error_next & 0x0606) 835 if (error_next & 0x0606)
836 process_ue(mci, error_next, info->dram_ded_add, 836 process_ue(mci, error_next, info->dram_ded_add,
837 info->dram_scrb_add, error_found, handle_error); 837 info->dram_scrb_add, error_found, handle_error);
838 } 838 }
839 839
840 static void e752x_get_error_info(struct mem_ctl_info *mci, 840 static void e752x_get_error_info(struct mem_ctl_info *mci,
841 struct e752x_error_info *info) 841 struct e752x_error_info *info)
842 { 842 {
843 struct pci_dev *dev; 843 struct pci_dev *dev;
844 struct e752x_pvt *pvt; 844 struct e752x_pvt *pvt;
845 845
846 memset(info, 0, sizeof(*info)); 846 memset(info, 0, sizeof(*info));
847 pvt = (struct e752x_pvt *)mci->pvt_info; 847 pvt = (struct e752x_pvt *)mci->pvt_info;
848 dev = pvt->dev_d0f1; 848 dev = pvt->dev_d0f1;
849 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global); 849 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
850 850
851 if (info->ferr_global) { 851 if (info->ferr_global) {
852 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) { 852 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
853 pci_read_config_dword(dev, I3100_NSI_FERR, 853 pci_read_config_dword(dev, I3100_NSI_FERR,
854 &info->nsi_ferr); 854 &info->nsi_ferr);
855 info->hi_ferr = 0; 855 info->hi_ferr = 0;
856 } else { 856 } else {
857 pci_read_config_byte(dev, E752X_HI_FERR, 857 pci_read_config_byte(dev, E752X_HI_FERR,
858 &info->hi_ferr); 858 &info->hi_ferr);
859 info->nsi_ferr = 0; 859 info->nsi_ferr = 0;
860 } 860 }
861 pci_read_config_word(dev, E752X_SYSBUS_FERR, 861 pci_read_config_word(dev, E752X_SYSBUS_FERR,
862 &info->sysbus_ferr); 862 &info->sysbus_ferr);
863 pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr); 863 pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
864 pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr); 864 pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr);
865 pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD, 865 pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
866 &info->dram_sec1_add); 866 &info->dram_sec1_add);
867 pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME, 867 pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
868 &info->dram_sec1_syndrome); 868 &info->dram_sec1_syndrome);
869 pci_read_config_dword(dev, E752X_DRAM_DED_ADD, 869 pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
870 &info->dram_ded_add); 870 &info->dram_ded_add);
871 pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD, 871 pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
872 &info->dram_scrb_add); 872 &info->dram_scrb_add);
873 pci_read_config_dword(dev, E752X_DRAM_RETR_ADD, 873 pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
874 &info->dram_retr_add); 874 &info->dram_retr_add);
875 875
876 /* ignore the reserved bits just in case */ 876 /* ignore the reserved bits just in case */
877 if (info->hi_ferr & 0x7f) 877 if (info->hi_ferr & 0x7f)
878 pci_write_config_byte(dev, E752X_HI_FERR, 878 pci_write_config_byte(dev, E752X_HI_FERR,
879 info->hi_ferr); 879 info->hi_ferr);
880 880
881 if (info->nsi_ferr & NSI_ERR_MASK) 881 if (info->nsi_ferr & NSI_ERR_MASK)
882 pci_write_config_dword(dev, I3100_NSI_FERR, 882 pci_write_config_dword(dev, I3100_NSI_FERR,
883 info->nsi_ferr); 883 info->nsi_ferr);
884 884
885 if (info->sysbus_ferr) 885 if (info->sysbus_ferr)
886 pci_write_config_word(dev, E752X_SYSBUS_FERR, 886 pci_write_config_word(dev, E752X_SYSBUS_FERR,
887 info->sysbus_ferr); 887 info->sysbus_ferr);
888 888
889 if (info->buf_ferr & 0x0f) 889 if (info->buf_ferr & 0x0f)
890 pci_write_config_byte(dev, E752X_BUF_FERR, 890 pci_write_config_byte(dev, E752X_BUF_FERR,
891 info->buf_ferr); 891 info->buf_ferr);
892 892
893 if (info->dram_ferr) 893 if (info->dram_ferr)
894 pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR, 894 pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
895 info->dram_ferr, info->dram_ferr); 895 info->dram_ferr, info->dram_ferr);
896 896
897 pci_write_config_dword(dev, E752X_FERR_GLOBAL, 897 pci_write_config_dword(dev, E752X_FERR_GLOBAL,
898 info->ferr_global); 898 info->ferr_global);
899 } 899 }
900 900
901 pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global); 901 pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
902 902
903 if (info->nerr_global) { 903 if (info->nerr_global) {
904 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) { 904 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
905 pci_read_config_dword(dev, I3100_NSI_NERR, 905 pci_read_config_dword(dev, I3100_NSI_NERR,
906 &info->nsi_nerr); 906 &info->nsi_nerr);
907 info->hi_nerr = 0; 907 info->hi_nerr = 0;
908 } else { 908 } else {
909 pci_read_config_byte(dev, E752X_HI_NERR, 909 pci_read_config_byte(dev, E752X_HI_NERR,
910 &info->hi_nerr); 910 &info->hi_nerr);
911 info->nsi_nerr = 0; 911 info->nsi_nerr = 0;
912 } 912 }
913 pci_read_config_word(dev, E752X_SYSBUS_NERR, 913 pci_read_config_word(dev, E752X_SYSBUS_NERR,
914 &info->sysbus_nerr); 914 &info->sysbus_nerr);
915 pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr); 915 pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
916 pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr); 916 pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr);
917 pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD, 917 pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
918 &info->dram_sec2_add); 918 &info->dram_sec2_add);
919 pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME, 919 pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
920 &info->dram_sec2_syndrome); 920 &info->dram_sec2_syndrome);
921 921
922 if (info->hi_nerr & 0x7f) 922 if (info->hi_nerr & 0x7f)
923 pci_write_config_byte(dev, E752X_HI_NERR, 923 pci_write_config_byte(dev, E752X_HI_NERR,
924 info->hi_nerr); 924 info->hi_nerr);
925 925
926 if (info->nsi_nerr & NSI_ERR_MASK) 926 if (info->nsi_nerr & NSI_ERR_MASK)
927 pci_write_config_dword(dev, I3100_NSI_NERR, 927 pci_write_config_dword(dev, I3100_NSI_NERR,
928 info->nsi_nerr); 928 info->nsi_nerr);
929 929
930 if (info->sysbus_nerr) 930 if (info->sysbus_nerr)
931 pci_write_config_word(dev, E752X_SYSBUS_NERR, 931 pci_write_config_word(dev, E752X_SYSBUS_NERR,
932 info->sysbus_nerr); 932 info->sysbus_nerr);
933 933
934 if (info->buf_nerr & 0x0f) 934 if (info->buf_nerr & 0x0f)
935 pci_write_config_byte(dev, E752X_BUF_NERR, 935 pci_write_config_byte(dev, E752X_BUF_NERR,
936 info->buf_nerr); 936 info->buf_nerr);
937 937
938 if (info->dram_nerr) 938 if (info->dram_nerr)
939 pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR, 939 pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
940 info->dram_nerr, info->dram_nerr); 940 info->dram_nerr, info->dram_nerr);
941 941
942 pci_write_config_dword(dev, E752X_NERR_GLOBAL, 942 pci_write_config_dword(dev, E752X_NERR_GLOBAL,
943 info->nerr_global); 943 info->nerr_global);
944 } 944 }
945 } 945 }
946 946
947 static int e752x_process_error_info(struct mem_ctl_info *mci, 947 static int e752x_process_error_info(struct mem_ctl_info *mci,
948 struct e752x_error_info *info, 948 struct e752x_error_info *info,
949 int handle_errors) 949 int handle_errors)
950 { 950 {
951 u32 error32, stat32; 951 u32 error32, stat32;
952 int error_found; 952 int error_found;
953 953
954 error_found = 0; 954 error_found = 0;
955 error32 = (info->ferr_global >> 18) & 0x3ff; 955 error32 = (info->ferr_global >> 18) & 0x3ff;
956 stat32 = (info->ferr_global >> 4) & 0x7ff; 956 stat32 = (info->ferr_global >> 4) & 0x7ff;
957 957
958 if (error32) 958 if (error32)
959 global_error(1, error32, &error_found, handle_errors); 959 global_error(1, error32, &error_found, handle_errors);
960 960
961 if (stat32) 961 if (stat32)
962 global_error(0, stat32, &error_found, handle_errors); 962 global_error(0, stat32, &error_found, handle_errors);
963 963
964 error32 = (info->nerr_global >> 18) & 0x3ff; 964 error32 = (info->nerr_global >> 18) & 0x3ff;
965 stat32 = (info->nerr_global >> 4) & 0x7ff; 965 stat32 = (info->nerr_global >> 4) & 0x7ff;
966 966
967 if (error32) 967 if (error32)
968 global_error(1, error32, &error_found, handle_errors); 968 global_error(1, error32, &error_found, handle_errors);
969 969
970 if (stat32) 970 if (stat32)
971 global_error(0, stat32, &error_found, handle_errors); 971 global_error(0, stat32, &error_found, handle_errors);
972 972
973 e752x_check_hub_interface(info, &error_found, handle_errors); 973 e752x_check_hub_interface(info, &error_found, handle_errors);
974 e752x_check_ns_interface(info, &error_found, handle_errors); 974 e752x_check_ns_interface(info, &error_found, handle_errors);
975 e752x_check_sysbus(info, &error_found, handle_errors); 975 e752x_check_sysbus(info, &error_found, handle_errors);
976 e752x_check_membuf(info, &error_found, handle_errors); 976 e752x_check_membuf(info, &error_found, handle_errors);
977 e752x_check_dram(mci, info, &error_found, handle_errors); 977 e752x_check_dram(mci, info, &error_found, handle_errors);
978 return error_found; 978 return error_found;
979 } 979 }
980 980
981 static void e752x_check(struct mem_ctl_info *mci) 981 static void e752x_check(struct mem_ctl_info *mci)
982 { 982 {
983 struct e752x_error_info info; 983 struct e752x_error_info info;
984 984
985 debugf3("%s()\n", __func__); 985 debugf3("%s()\n", __func__);
986 e752x_get_error_info(mci, &info); 986 e752x_get_error_info(mci, &info);
987 e752x_process_error_info(mci, &info, 1); 987 e752x_process_error_info(mci, &info, 1);
988 } 988 }
989 989
990 /* Program byte/sec bandwidth scrub rate to hardware */ 990 /* Program byte/sec bandwidth scrub rate to hardware */
991 static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw) 991 static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
992 { 992 {
993 const struct scrubrate *scrubrates; 993 const struct scrubrate *scrubrates;
994 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; 994 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
995 struct pci_dev *pdev = pvt->dev_d0f0; 995 struct pci_dev *pdev = pvt->dev_d0f0;
996 int i; 996 int i;
997 997
998 if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0) 998 if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
999 scrubrates = scrubrates_i3100; 999 scrubrates = scrubrates_i3100;
1000 else 1000 else
1001 scrubrates = scrubrates_e752x; 1001 scrubrates = scrubrates_e752x;
1002 1002
1003 /* Translate the desired scrub rate to a e752x/3100 register value. 1003 /* Translate the desired scrub rate to a e752x/3100 register value.
1004 * Search for the bandwidth that is equal or greater than the 1004 * Search for the bandwidth that is equal or greater than the
1005 * desired rate and program the cooresponding register value. 1005 * desired rate and program the cooresponding register value.
1006 */ 1006 */
1007 for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++) 1007 for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
1008 if (scrubrates[i].bandwidth >= new_bw) 1008 if (scrubrates[i].bandwidth >= new_bw)
1009 break; 1009 break;
1010 1010
1011 if (scrubrates[i].bandwidth == SDRATE_EOT) 1011 if (scrubrates[i].bandwidth == SDRATE_EOT)
1012 return -1; 1012 return -1;
1013 1013
1014 pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval); 1014 pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval);
1015 1015
1016 return scrubrates[i].bandwidth; 1016 return scrubrates[i].bandwidth;
1017 } 1017 }
1018 1018
1019 /* Convert current scrub rate value into byte/sec bandwidth */ 1019 /* Convert current scrub rate value into byte/sec bandwidth */
1020 static int get_sdram_scrub_rate(struct mem_ctl_info *mci) 1020 static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
1021 { 1021 {
1022 const struct scrubrate *scrubrates; 1022 const struct scrubrate *scrubrates;
1023 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; 1023 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
1024 struct pci_dev *pdev = pvt->dev_d0f0; 1024 struct pci_dev *pdev = pvt->dev_d0f0;
1025 u16 scrubval; 1025 u16 scrubval;
1026 int i; 1026 int i;
1027 1027
1028 if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0) 1028 if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
1029 scrubrates = scrubrates_i3100; 1029 scrubrates = scrubrates_i3100;
1030 else 1030 else
1031 scrubrates = scrubrates_e752x; 1031 scrubrates = scrubrates_e752x;
1032 1032
1033 /* Find the bandwidth matching the memory scrubber configuration */ 1033 /* Find the bandwidth matching the memory scrubber configuration */
1034 pci_read_config_word(pdev, E752X_MCHSCRB, &scrubval); 1034 pci_read_config_word(pdev, E752X_MCHSCRB, &scrubval);
1035 scrubval = scrubval & 0x0f; 1035 scrubval = scrubval & 0x0f;
1036 1036
1037 for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++) 1037 for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
1038 if (scrubrates[i].scrubval == scrubval) 1038 if (scrubrates[i].scrubval == scrubval)
1039 break; 1039 break;
1040 1040
1041 if (scrubrates[i].bandwidth == SDRATE_EOT) { 1041 if (scrubrates[i].bandwidth == SDRATE_EOT) {
1042 e752x_printk(KERN_WARNING, 1042 e752x_printk(KERN_WARNING,
1043 "Invalid sdram scrub control value: 0x%x\n", scrubval); 1043 "Invalid sdram scrub control value: 0x%x\n", scrubval);
1044 return -1; 1044 return -1;
1045 } 1045 }
1046 return scrubrates[i].bandwidth; 1046 return scrubrates[i].bandwidth;
1047 1047
1048 } 1048 }
1049 1049
1050 /* Return 1 if dual channel mode is active. Else return 0. */ 1050 /* Return 1 if dual channel mode is active. Else return 0. */
1051 static inline int dual_channel_active(u16 ddrcsr) 1051 static inline int dual_channel_active(u16 ddrcsr)
1052 { 1052 {
1053 return (((ddrcsr >> 12) & 3) == 3); 1053 return (((ddrcsr >> 12) & 3) == 3);
1054 } 1054 }
1055 1055
1056 /* Remap csrow index numbers if map_type is "reverse" 1056 /* Remap csrow index numbers if map_type is "reverse"
1057 */ 1057 */
1058 static inline int remap_csrow_index(struct mem_ctl_info *mci, int index) 1058 static inline int remap_csrow_index(struct mem_ctl_info *mci, int index)
1059 { 1059 {
1060 struct e752x_pvt *pvt = mci->pvt_info; 1060 struct e752x_pvt *pvt = mci->pvt_info;
1061 1061
1062 if (!pvt->map_type) 1062 if (!pvt->map_type)
1063 return (7 - index); 1063 return (7 - index);
1064 1064
1065 return (index); 1065 return (index);
1066 } 1066 }
1067 1067
1068 static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, 1068 static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
1069 u16 ddrcsr) 1069 u16 ddrcsr)
1070 { 1070 {
1071 struct csrow_info *csrow; 1071 struct csrow_info *csrow;
1072 enum edac_type edac_mode; 1072 enum edac_type edac_mode;
1073 unsigned long last_cumul_size; 1073 unsigned long last_cumul_size;
1074 int index, mem_dev, drc_chan; 1074 int index, mem_dev, drc_chan;
1075 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ 1075 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
1076 int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */ 1076 int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
1077 u8 value; 1077 u8 value;
1078 u32 dra, drc, cumul_size, i, nr_pages; 1078 u32 dra, drc, cumul_size, i, nr_pages;
1079 1079
1080 dra = 0; 1080 dra = 0;
1081 for (index = 0; index < 4; index++) { 1081 for (index = 0; index < 4; index++) {
1082 u8 dra_reg; 1082 u8 dra_reg;
1083 pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg); 1083 pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg);
1084 dra |= dra_reg << (index * 8); 1084 dra |= dra_reg << (index * 8);
1085 } 1085 }
1086 pci_read_config_dword(pdev, E752X_DRC, &drc); 1086 pci_read_config_dword(pdev, E752X_DRC, &drc);
1087 drc_chan = dual_channel_active(ddrcsr) ? 1 : 0; 1087 drc_chan = dual_channel_active(ddrcsr) ? 1 : 0;
1088 drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */ 1088 drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
1089 drc_ddim = (drc >> 20) & 0x3; 1089 drc_ddim = (drc >> 20) & 0x3;
1090 1090
1091 /* The dram row boundary (DRB) reg values are boundary address for 1091 /* The dram row boundary (DRB) reg values are boundary address for
1092 * each DRAM row with a granularity of 64 or 128MB (single/dual 1092 * each DRAM row with a granularity of 64 or 128MB (single/dual
1093 * channel operation). DRB regs are cumulative; therefore DRB7 will 1093 * channel operation). DRB regs are cumulative; therefore DRB7 will
1094 * contain the total memory contained in all eight rows. 1094 * contain the total memory contained in all eight rows.
1095 */ 1095 */
1096 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { 1096 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
1097 /* mem_dev 0=x8, 1=x4 */ 1097 /* mem_dev 0=x8, 1=x4 */
1098 mem_dev = (dra >> (index * 4 + 2)) & 0x3; 1098 mem_dev = (dra >> (index * 4 + 2)) & 0x3;
1099 csrow = &mci->csrows[remap_csrow_index(mci, index)]; 1099 csrow = mci->csrows[remap_csrow_index(mci, index)];
1100 1100
1101 mem_dev = (mem_dev == 2); 1101 mem_dev = (mem_dev == 2);
1102 pci_read_config_byte(pdev, E752X_DRB + index, &value); 1102 pci_read_config_byte(pdev, E752X_DRB + index, &value);
1103 /* convert a 128 or 64 MiB DRB to a page size. */ 1103 /* convert a 128 or 64 MiB DRB to a page size. */
1104 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); 1104 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
1105 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 1105 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
1106 cumul_size); 1106 cumul_size);
1107 if (cumul_size == last_cumul_size) 1107 if (cumul_size == last_cumul_size)
1108 continue; /* not populated */ 1108 continue; /* not populated */
1109 1109
1110 csrow->first_page = last_cumul_size; 1110 csrow->first_page = last_cumul_size;
1111 csrow->last_page = cumul_size - 1; 1111 csrow->last_page = cumul_size - 1;
1112 nr_pages = cumul_size - last_cumul_size; 1112 nr_pages = cumul_size - last_cumul_size;
1113 last_cumul_size = cumul_size; 1113 last_cumul_size = cumul_size;
1114 1114
1115 /* 1115 /*
1116 * if single channel or x8 devices then SECDED 1116 * if single channel or x8 devices then SECDED
1117 * if dual channel and x4 then S4ECD4ED 1117 * if dual channel and x4 then S4ECD4ED
1118 */ 1118 */
1119 if (drc_ddim) { 1119 if (drc_ddim) {
1120 if (drc_chan && mem_dev) { 1120 if (drc_chan && mem_dev) {
1121 edac_mode = EDAC_S4ECD4ED; 1121 edac_mode = EDAC_S4ECD4ED;
1122 mci->edac_cap |= EDAC_FLAG_S4ECD4ED; 1122 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
1123 } else { 1123 } else {
1124 edac_mode = EDAC_SECDED; 1124 edac_mode = EDAC_SECDED;
1125 mci->edac_cap |= EDAC_FLAG_SECDED; 1125 mci->edac_cap |= EDAC_FLAG_SECDED;
1126 } 1126 }
1127 } else 1127 } else
1128 edac_mode = EDAC_NONE; 1128 edac_mode = EDAC_NONE;
1129 for (i = 0; i < csrow->nr_channels; i++) { 1129 for (i = 0; i < csrow->nr_channels; i++) {
1130 struct dimm_info *dimm = csrow->channels[i].dimm; 1130 struct dimm_info *dimm = csrow->channels[i]->dimm;
1131 1131
1132 debugf3("Initializing rank at (%i,%i)\n", index, i); 1132 debugf3("Initializing rank at (%i,%i)\n", index, i);
1133 dimm->nr_pages = nr_pages / csrow->nr_channels; 1133 dimm->nr_pages = nr_pages / csrow->nr_channels;
1134 dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */ 1134 dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
1135 dimm->mtype = MEM_RDDR; /* only one type supported */ 1135 dimm->mtype = MEM_RDDR; /* only one type supported */
1136 dimm->dtype = mem_dev ? DEV_X4 : DEV_X8; 1136 dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
1137 dimm->edac_mode = edac_mode; 1137 dimm->edac_mode = edac_mode;
1138 } 1138 }
1139 } 1139 }
1140 } 1140 }
1141 1141
1142 static void e752x_init_mem_map_table(struct pci_dev *pdev, 1142 static void e752x_init_mem_map_table(struct pci_dev *pdev,
1143 struct e752x_pvt *pvt) 1143 struct e752x_pvt *pvt)
1144 { 1144 {
1145 int index; 1145 int index;
1146 u8 value, last, row; 1146 u8 value, last, row;
1147 1147
1148 last = 0; 1148 last = 0;
1149 row = 0; 1149 row = 0;
1150 1150
1151 for (index = 0; index < 8; index += 2) { 1151 for (index = 0; index < 8; index += 2) {
1152 pci_read_config_byte(pdev, E752X_DRB + index, &value); 1152 pci_read_config_byte(pdev, E752X_DRB + index, &value);
1153 /* test if there is a dimm in this slot */ 1153 /* test if there is a dimm in this slot */
1154 if (value == last) { 1154 if (value == last) {
1155 /* no dimm in the slot, so flag it as empty */ 1155 /* no dimm in the slot, so flag it as empty */
1156 pvt->map[index] = 0xff; 1156 pvt->map[index] = 0xff;
1157 pvt->map[index + 1] = 0xff; 1157 pvt->map[index + 1] = 0xff;
1158 } else { /* there is a dimm in the slot */ 1158 } else { /* there is a dimm in the slot */
1159 pvt->map[index] = row; 1159 pvt->map[index] = row;
1160 row++; 1160 row++;
1161 last = value; 1161 last = value;
1162 /* test the next value to see if the dimm is double 1162 /* test the next value to see if the dimm is double
1163 * sided 1163 * sided
1164 */ 1164 */
1165 pci_read_config_byte(pdev, E752X_DRB + index + 1, 1165 pci_read_config_byte(pdev, E752X_DRB + index + 1,
1166 &value); 1166 &value);
1167 1167
1168 /* the dimm is single sided, so flag as empty */ 1168 /* the dimm is single sided, so flag as empty */
1169 /* this is a double sided dimm to save the next row #*/ 1169 /* this is a double sided dimm to save the next row #*/
1170 pvt->map[index + 1] = (value == last) ? 0xff : row; 1170 pvt->map[index + 1] = (value == last) ? 0xff : row;
1171 row++; 1171 row++;
1172 last = value; 1172 last = value;
1173 } 1173 }
1174 } 1174 }
1175 } 1175 }
1176 1176
1177 /* Return 0 on success or 1 on failure. */ 1177 /* Return 0 on success or 1 on failure. */
1178 static int e752x_get_devs(struct pci_dev *pdev, int dev_idx, 1178 static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
1179 struct e752x_pvt *pvt) 1179 struct e752x_pvt *pvt)
1180 { 1180 {
1181 struct pci_dev *dev; 1181 struct pci_dev *dev;
1182 1182
1183 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, 1183 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
1184 pvt->dev_info->err_dev, pvt->bridge_ck); 1184 pvt->dev_info->err_dev, pvt->bridge_ck);
1185 1185
1186 if (pvt->bridge_ck == NULL) 1186 if (pvt->bridge_ck == NULL)
1187 pvt->bridge_ck = pci_scan_single_device(pdev->bus, 1187 pvt->bridge_ck = pci_scan_single_device(pdev->bus,
1188 PCI_DEVFN(0, 1)); 1188 PCI_DEVFN(0, 1));
1189 1189
1190 if (pvt->bridge_ck == NULL) { 1190 if (pvt->bridge_ck == NULL) {
1191 e752x_printk(KERN_ERR, "error reporting device not found:" 1191 e752x_printk(KERN_ERR, "error reporting device not found:"
1192 "vendor %x device 0x%x (broken BIOS?)\n", 1192 "vendor %x device 0x%x (broken BIOS?)\n",
1193 PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev); 1193 PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
1194 return 1; 1194 return 1;
1195 } 1195 }
1196 1196
1197 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 1197 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
1198 e752x_devs[dev_idx].ctl_dev, 1198 e752x_devs[dev_idx].ctl_dev,
1199 NULL); 1199 NULL);
1200 1200
1201 if (dev == NULL) 1201 if (dev == NULL)
1202 goto fail; 1202 goto fail;
1203 1203
1204 pvt->dev_d0f0 = dev; 1204 pvt->dev_d0f0 = dev;
1205 pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck); 1205 pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
1206 1206
1207 return 0; 1207 return 0;
1208 1208
1209 fail: 1209 fail:
1210 pci_dev_put(pvt->bridge_ck); 1210 pci_dev_put(pvt->bridge_ck);
1211 return 1; 1211 return 1;
1212 } 1212 }
1213 1213
1214 /* Setup system bus parity mask register. 1214 /* Setup system bus parity mask register.
1215 * Sysbus parity supported on: 1215 * Sysbus parity supported on:
1216 * e7320/e7520/e7525 + Xeon 1216 * e7320/e7520/e7525 + Xeon
1217 */ 1217 */
1218 static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt) 1218 static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
1219 { 1219 {
1220 char *cpu_id = cpu_data(0).x86_model_id; 1220 char *cpu_id = cpu_data(0).x86_model_id;
1221 struct pci_dev *dev = pvt->dev_d0f1; 1221 struct pci_dev *dev = pvt->dev_d0f1;
1222 int enable = 1; 1222 int enable = 1;
1223 1223
1224 /* Allow module parameter override, else see if CPU supports parity */ 1224 /* Allow module parameter override, else see if CPU supports parity */
1225 if (sysbus_parity != -1) { 1225 if (sysbus_parity != -1) {
1226 enable = sysbus_parity; 1226 enable = sysbus_parity;
1227 } else if (cpu_id[0] && !strstr(cpu_id, "Xeon")) { 1227 } else if (cpu_id[0] && !strstr(cpu_id, "Xeon")) {
1228 e752x_printk(KERN_INFO, "System Bus Parity not " 1228 e752x_printk(KERN_INFO, "System Bus Parity not "
1229 "supported by CPU, disabling\n"); 1229 "supported by CPU, disabling\n");
1230 enable = 0; 1230 enable = 0;
1231 } 1231 }
1232 1232
1233 if (enable) 1233 if (enable)
1234 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000); 1234 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000);
1235 else 1235 else
1236 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309); 1236 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309);
1237 } 1237 }
1238 1238
1239 static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt) 1239 static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
1240 { 1240 {
1241 struct pci_dev *dev; 1241 struct pci_dev *dev;
1242 1242
1243 dev = pvt->dev_d0f1; 1243 dev = pvt->dev_d0f1;
1244 /* Turn off error disable & SMI in case the BIOS turned it on */ 1244 /* Turn off error disable & SMI in case the BIOS turned it on */
1245 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) { 1245 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
1246 pci_write_config_dword(dev, I3100_NSI_EMASK, 0); 1246 pci_write_config_dword(dev, I3100_NSI_EMASK, 0);
1247 pci_write_config_dword(dev, I3100_NSI_SMICMD, 0); 1247 pci_write_config_dword(dev, I3100_NSI_SMICMD, 0);
1248 } else { 1248 } else {
1249 pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00); 1249 pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
1250 pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00); 1250 pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
1251 } 1251 }
1252 1252
1253 e752x_init_sysbus_parity_mask(pvt); 1253 e752x_init_sysbus_parity_mask(pvt);
1254 1254
1255 pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00); 1255 pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
1256 pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00); 1256 pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
1257 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00); 1257 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
1258 pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00); 1258 pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
1259 pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00); 1259 pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
1260 } 1260 }
1261 1261
1262 static int e752x_probe1(struct pci_dev *pdev, int dev_idx) 1262 static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1263 { 1263 {
1264 u16 pci_data; 1264 u16 pci_data;
1265 u8 stat8; 1265 u8 stat8;
1266 struct mem_ctl_info *mci; 1266 struct mem_ctl_info *mci;
1267 struct edac_mc_layer layers[2]; 1267 struct edac_mc_layer layers[2];
1268 struct e752x_pvt *pvt; 1268 struct e752x_pvt *pvt;
1269 u16 ddrcsr; 1269 u16 ddrcsr;
1270 int drc_chan; /* Number of channels 0=1chan,1=2chan */ 1270 int drc_chan; /* Number of channels 0=1chan,1=2chan */
1271 struct e752x_error_info discard; 1271 struct e752x_error_info discard;
1272 1272
1273 debugf0("%s(): mci\n", __func__); 1273 debugf0("%s(): mci\n", __func__);
1274 debugf0("Starting Probe1\n"); 1274 debugf0("Starting Probe1\n");
1275 1275
1276 /* check to see if device 0 function 1 is enabled; if it isn't, we 1276 /* check to see if device 0 function 1 is enabled; if it isn't, we
1277 * assume the BIOS has reserved it for a reason and is expecting 1277 * assume the BIOS has reserved it for a reason and is expecting
1278 * exclusive access, we take care not to violate that assumption and 1278 * exclusive access, we take care not to violate that assumption and
1279 * fail the probe. */ 1279 * fail the probe. */
1280 pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8); 1280 pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
1281 if (!force_function_unhide && !(stat8 & (1 << 5))) { 1281 if (!force_function_unhide && !(stat8 & (1 << 5))) {
1282 printk(KERN_INFO "Contact your BIOS vendor to see if the " 1282 printk(KERN_INFO "Contact your BIOS vendor to see if the "
1283 "E752x error registers can be safely un-hidden\n"); 1283 "E752x error registers can be safely un-hidden\n");
1284 return -ENODEV; 1284 return -ENODEV;
1285 } 1285 }
1286 stat8 |= (1 << 5); 1286 stat8 |= (1 << 5);
1287 pci_write_config_byte(pdev, E752X_DEVPRES1, stat8); 1287 pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
1288 1288
1289 pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr); 1289 pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
1290 /* FIXME: should check >>12 or 0xf, true for all? */ 1290 /* FIXME: should check >>12 or 0xf, true for all? */
1291 /* Dual channel = 1, Single channel = 0 */ 1291 /* Dual channel = 1, Single channel = 0 */
1292 drc_chan = dual_channel_active(ddrcsr); 1292 drc_chan = dual_channel_active(ddrcsr);
1293 1293
1294 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 1294 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1295 layers[0].size = E752X_NR_CSROWS; 1295 layers[0].size = E752X_NR_CSROWS;
1296 layers[0].is_virt_csrow = true; 1296 layers[0].is_virt_csrow = true;
1297 layers[1].type = EDAC_MC_LAYER_CHANNEL; 1297 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1298 layers[1].size = drc_chan + 1; 1298 layers[1].size = drc_chan + 1;
1299 layers[1].is_virt_csrow = false; 1299 layers[1].is_virt_csrow = false;
1300 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); 1300 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1301 if (mci == NULL) 1301 if (mci == NULL)
1302 return -ENOMEM; 1302 return -ENOMEM;
1303 1303
1304 debugf3("%s(): init mci\n", __func__); 1304 debugf3("%s(): init mci\n", __func__);
1305 mci->mtype_cap = MEM_FLAG_RDDR; 1305 mci->mtype_cap = MEM_FLAG_RDDR;
1306 /* 3100 IMCH supports SECDEC only */ 1306 /* 3100 IMCH supports SECDEC only */
1307 mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED : 1307 mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
1308 (EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED); 1308 (EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
1309 /* FIXME - what if different memory types are in different csrows? */ 1309 /* FIXME - what if different memory types are in different csrows? */
1310 mci->mod_name = EDAC_MOD_STR; 1310 mci->mod_name = EDAC_MOD_STR;
1311 mci->mod_ver = E752X_REVISION; 1311 mci->mod_ver = E752X_REVISION;
1312 mci->pdev = &pdev->dev; 1312 mci->pdev = &pdev->dev;
1313 1313
1314 debugf3("%s(): init pvt\n", __func__); 1314 debugf3("%s(): init pvt\n", __func__);
1315 pvt = (struct e752x_pvt *)mci->pvt_info; 1315 pvt = (struct e752x_pvt *)mci->pvt_info;
1316 pvt->dev_info = &e752x_devs[dev_idx]; 1316 pvt->dev_info = &e752x_devs[dev_idx];
1317 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0); 1317 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
1318 1318
1319 if (e752x_get_devs(pdev, dev_idx, pvt)) { 1319 if (e752x_get_devs(pdev, dev_idx, pvt)) {
1320 edac_mc_free(mci); 1320 edac_mc_free(mci);
1321 return -ENODEV; 1321 return -ENODEV;
1322 } 1322 }
1323 1323
1324 debugf3("%s(): more mci init\n", __func__); 1324 debugf3("%s(): more mci init\n", __func__);
1325 mci->ctl_name = pvt->dev_info->ctl_name; 1325 mci->ctl_name = pvt->dev_info->ctl_name;
1326 mci->dev_name = pci_name(pdev); 1326 mci->dev_name = pci_name(pdev);
1327 mci->edac_check = e752x_check; 1327 mci->edac_check = e752x_check;
1328 mci->ctl_page_to_phys = ctl_page_to_phys; 1328 mci->ctl_page_to_phys = ctl_page_to_phys;
1329 mci->set_sdram_scrub_rate = set_sdram_scrub_rate; 1329 mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
1330 mci->get_sdram_scrub_rate = get_sdram_scrub_rate; 1330 mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
1331 1331
1332 /* set the map type. 1 = normal, 0 = reversed 1332 /* set the map type. 1 = normal, 0 = reversed
1333 * Must be set before e752x_init_csrows in case csrow mapping 1333 * Must be set before e752x_init_csrows in case csrow mapping
1334 * is reversed. 1334 * is reversed.
1335 */ 1335 */
1336 pci_read_config_byte(pdev, E752X_DRM, &stat8); 1336 pci_read_config_byte(pdev, E752X_DRM, &stat8);
1337 pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f)); 1337 pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
1338 1338
1339 e752x_init_csrows(mci, pdev, ddrcsr); 1339 e752x_init_csrows(mci, pdev, ddrcsr);
1340 e752x_init_mem_map_table(pdev, pvt); 1340 e752x_init_mem_map_table(pdev, pvt);
1341 1341
1342 if (dev_idx == I3100) 1342 if (dev_idx == I3100)
1343 mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */ 1343 mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
1344 else 1344 else
1345 mci->edac_cap |= EDAC_FLAG_NONE; 1345 mci->edac_cap |= EDAC_FLAG_NONE;
1346 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); 1346 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
1347 1347
1348 /* load the top of low memory, remap base, and remap limit vars */ 1348 /* load the top of low memory, remap base, and remap limit vars */
1349 pci_read_config_word(pdev, E752X_TOLM, &pci_data); 1349 pci_read_config_word(pdev, E752X_TOLM, &pci_data);
1350 pvt->tolm = ((u32) pci_data) << 4; 1350 pvt->tolm = ((u32) pci_data) << 4;
1351 pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data); 1351 pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data);
1352 pvt->remapbase = ((u32) pci_data) << 14; 1352 pvt->remapbase = ((u32) pci_data) << 14;
1353 pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data); 1353 pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data);
1354 pvt->remaplimit = ((u32) pci_data) << 14; 1354 pvt->remaplimit = ((u32) pci_data) << 14;
1355 e752x_printk(KERN_INFO, 1355 e752x_printk(KERN_INFO,
1356 "tolm = %x, remapbase = %x, remaplimit = %x\n", 1356 "tolm = %x, remapbase = %x, remaplimit = %x\n",
1357 pvt->tolm, pvt->remapbase, pvt->remaplimit); 1357 pvt->tolm, pvt->remapbase, pvt->remaplimit);
1358 1358
1359 /* Here we assume that we will never see multiple instances of this 1359 /* Here we assume that we will never see multiple instances of this
1360 * type of memory controller. The ID is therefore hardcoded to 0. 1360 * type of memory controller. The ID is therefore hardcoded to 0.
1361 */ 1361 */
1362 if (edac_mc_add_mc(mci)) { 1362 if (edac_mc_add_mc(mci)) {
1363 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 1363 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
1364 goto fail; 1364 goto fail;
1365 } 1365 }
1366 1366
1367 e752x_init_error_reporting_regs(pvt); 1367 e752x_init_error_reporting_regs(pvt);
1368 e752x_get_error_info(mci, &discard); /* clear other MCH errors */ 1368 e752x_get_error_info(mci, &discard); /* clear other MCH errors */
1369 1369
1370 /* allocating generic PCI control info */ 1370 /* allocating generic PCI control info */
1371 e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); 1371 e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1372 if (!e752x_pci) { 1372 if (!e752x_pci) {
1373 printk(KERN_WARNING 1373 printk(KERN_WARNING
1374 "%s(): Unable to create PCI control\n", __func__); 1374 "%s(): Unable to create PCI control\n", __func__);
1375 printk(KERN_WARNING 1375 printk(KERN_WARNING
1376 "%s(): PCI error report via EDAC not setup\n", 1376 "%s(): PCI error report via EDAC not setup\n",
1377 __func__); 1377 __func__);
1378 } 1378 }
1379 1379
1380 /* get this far and it's successful */ 1380 /* get this far and it's successful */
1381 debugf3("%s(): success\n", __func__); 1381 debugf3("%s(): success\n", __func__);
1382 return 0; 1382 return 0;
1383 1383
1384 fail: 1384 fail:
1385 pci_dev_put(pvt->dev_d0f0); 1385 pci_dev_put(pvt->dev_d0f0);
1386 pci_dev_put(pvt->dev_d0f1); 1386 pci_dev_put(pvt->dev_d0f1);
1387 pci_dev_put(pvt->bridge_ck); 1387 pci_dev_put(pvt->bridge_ck);
1388 edac_mc_free(mci); 1388 edac_mc_free(mci);
1389 1389
1390 return -ENODEV; 1390 return -ENODEV;
1391 } 1391 }
1392 1392
1393 /* returns count (>= 0), or negative on error */ 1393 /* returns count (>= 0), or negative on error */
1394 static int __devinit e752x_init_one(struct pci_dev *pdev, 1394 static int __devinit e752x_init_one(struct pci_dev *pdev,
1395 const struct pci_device_id *ent) 1395 const struct pci_device_id *ent)
1396 { 1396 {
1397 debugf0("%s()\n", __func__); 1397 debugf0("%s()\n", __func__);
1398 1398
1399 /* wake up and enable device */ 1399 /* wake up and enable device */
1400 if (pci_enable_device(pdev) < 0) 1400 if (pci_enable_device(pdev) < 0)
1401 return -EIO; 1401 return -EIO;
1402 1402
1403 return e752x_probe1(pdev, ent->driver_data); 1403 return e752x_probe1(pdev, ent->driver_data);
1404 } 1404 }
1405 1405
1406 static void __devexit e752x_remove_one(struct pci_dev *pdev) 1406 static void __devexit e752x_remove_one(struct pci_dev *pdev)
1407 { 1407 {
1408 struct mem_ctl_info *mci; 1408 struct mem_ctl_info *mci;
1409 struct e752x_pvt *pvt; 1409 struct e752x_pvt *pvt;
1410 1410
1411 debugf0("%s()\n", __func__); 1411 debugf0("%s()\n", __func__);
1412 1412
1413 if (e752x_pci) 1413 if (e752x_pci)
1414 edac_pci_release_generic_ctl(e752x_pci); 1414 edac_pci_release_generic_ctl(e752x_pci);
1415 1415
1416 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) 1416 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
1417 return; 1417 return;
1418 1418
1419 pvt = (struct e752x_pvt *)mci->pvt_info; 1419 pvt = (struct e752x_pvt *)mci->pvt_info;
1420 pci_dev_put(pvt->dev_d0f0); 1420 pci_dev_put(pvt->dev_d0f0);
1421 pci_dev_put(pvt->dev_d0f1); 1421 pci_dev_put(pvt->dev_d0f1);
1422 pci_dev_put(pvt->bridge_ck); 1422 pci_dev_put(pvt->bridge_ck);
1423 edac_mc_free(mci); 1423 edac_mc_free(mci);
1424 } 1424 }
1425 1425
1426 static DEFINE_PCI_DEVICE_TABLE(e752x_pci_tbl) = { 1426 static DEFINE_PCI_DEVICE_TABLE(e752x_pci_tbl) = {
1427 { 1427 {
1428 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1428 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1429 E7520}, 1429 E7520},
1430 { 1430 {
1431 PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1431 PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1432 E7525}, 1432 E7525},
1433 { 1433 {
1434 PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1434 PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1435 E7320}, 1435 E7320},
1436 { 1436 {
1437 PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1437 PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1438 I3100}, 1438 I3100},
1439 { 1439 {
1440 0, 1440 0,
1441 } /* 0 terminated list. */ 1441 } /* 0 terminated list. */
1442 }; 1442 };
1443 1443
1444 MODULE_DEVICE_TABLE(pci, e752x_pci_tbl); 1444 MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
1445 1445
1446 static struct pci_driver e752x_driver = { 1446 static struct pci_driver e752x_driver = {
1447 .name = EDAC_MOD_STR, 1447 .name = EDAC_MOD_STR,
1448 .probe = e752x_init_one, 1448 .probe = e752x_init_one,
1449 .remove = __devexit_p(e752x_remove_one), 1449 .remove = __devexit_p(e752x_remove_one),
1450 .id_table = e752x_pci_tbl, 1450 .id_table = e752x_pci_tbl,
1451 }; 1451 };
1452 1452
1453 static int __init e752x_init(void) 1453 static int __init e752x_init(void)
1454 { 1454 {
1455 int pci_rc; 1455 int pci_rc;
1456 1456
1457 debugf3("%s()\n", __func__); 1457 debugf3("%s()\n", __func__);
1458 1458
1459 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1459 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1460 opstate_init(); 1460 opstate_init();
1461 1461
1462 pci_rc = pci_register_driver(&e752x_driver); 1462 pci_rc = pci_register_driver(&e752x_driver);
1463 return (pci_rc < 0) ? pci_rc : 0; 1463 return (pci_rc < 0) ? pci_rc : 0;
1464 } 1464 }
1465 1465
1466 static void __exit e752x_exit(void) 1466 static void __exit e752x_exit(void)
1467 { 1467 {
1468 debugf3("%s()\n", __func__); 1468 debugf3("%s()\n", __func__);
1469 pci_unregister_driver(&e752x_driver); 1469 pci_unregister_driver(&e752x_driver);
1470 } 1470 }
1471 1471
1472 module_init(e752x_init); 1472 module_init(e752x_init);
1473 module_exit(e752x_exit); 1473 module_exit(e752x_exit);
1474 1474
1475 MODULE_LICENSE("GPL"); 1475 MODULE_LICENSE("GPL");
1476 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n"); 1476 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
1477 MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers"); 1477 MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
1478 1478
1479 module_param(force_function_unhide, int, 0444); 1479 module_param(force_function_unhide, int, 0444);
1480 MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:" 1480 MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
1481 " 1=force unhide and hope BIOS doesn't fight driver for " 1481 " 1=force unhide and hope BIOS doesn't fight driver for "
1482 "Dev0:Fun1 access"); 1482 "Dev0:Fun1 access");
1483 1483
1484 module_param(edac_op_state, int, 0444); 1484 module_param(edac_op_state, int, 0444);
1485 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 1485 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1486 1486
1487 module_param(sysbus_parity, int, 0444); 1487 module_param(sysbus_parity, int, 0444);
1488 MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking," 1488 MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
1489 " 1=enable system bus parity checking, default=auto-detect"); 1489 " 1=enable system bus parity checking, default=auto-detect");
1490 module_param(report_non_memory_errors, int, 0644); 1490 module_param(report_non_memory_errors, int, 0644);
1491 MODULE_PARM_DESC(report_non_memory_errors, "0=disable non-memory error " 1491 MODULE_PARM_DESC(report_non_memory_errors, "0=disable non-memory error "
1492 "reporting, 1=enable non-memory error reporting"); 1492 "reporting, 1=enable non-memory error reporting");
1493 1493
drivers/edac/e7xxx_edac.c
1 /* 1 /*
2 * Intel e7xxx Memory Controller kernel module 2 * Intel e7xxx Memory Controller kernel module
3 * (C) 2003 Linux Networx (http://lnxi.com) 3 * (C) 2003 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the 4 * This file may be distributed under the terms of the
5 * GNU General Public License. 5 * GNU General Public License.
6 * 6 *
7 * See "enum e7xxx_chips" below for supported chipsets 7 * See "enum e7xxx_chips" below for supported chipsets
8 * 8 *
9 * Written by Thayne Harbaugh 9 * Written by Thayne Harbaugh
10 * Based on work by Dan Hollis <goemon at anime dot net> and others. 10 * Based on work by Dan Hollis <goemon at anime dot net> and others.
11 * http://www.anime.net/~goemon/linux-ecc/ 11 * http://www.anime.net/~goemon/linux-ecc/
12 * 12 *
13 * Datasheet: 13 * Datasheet:
14 * http://www.intel.com/content/www/us/en/chipsets/e7501-chipset-memory-controller-hub-datasheet.html 14 * http://www.intel.com/content/www/us/en/chipsets/e7501-chipset-memory-controller-hub-datasheet.html
15 * 15 *
16 * Contributors: 16 * Contributors:
17 * Eric Biederman (Linux Networx) 17 * Eric Biederman (Linux Networx)
18 * Tom Zimmerman (Linux Networx) 18 * Tom Zimmerman (Linux Networx)
19 * Jim Garlick (Lawrence Livermore National Labs) 19 * Jim Garlick (Lawrence Livermore National Labs)
20 * Dave Peterson (Lawrence Livermore National Labs) 20 * Dave Peterson (Lawrence Livermore National Labs)
21 * That One Guy (Some other place) 21 * That One Guy (Some other place)
22 * Wang Zhenyu (intel.com) 22 * Wang Zhenyu (intel.com)
23 * 23 *
24 * $Id: edac_e7xxx.c,v 1.5.2.9 2005/10/05 00:43:44 dsp_llnl Exp $ 24 * $Id: edac_e7xxx.c,v 1.5.2.9 2005/10/05 00:43:44 dsp_llnl Exp $
25 * 25 *
26 */ 26 */
27 27
28 #include <linux/module.h> 28 #include <linux/module.h>
29 #include <linux/init.h> 29 #include <linux/init.h>
30 #include <linux/pci.h> 30 #include <linux/pci.h>
31 #include <linux/pci_ids.h> 31 #include <linux/pci_ids.h>
32 #include <linux/edac.h> 32 #include <linux/edac.h>
33 #include "edac_core.h" 33 #include "edac_core.h"
34 34
35 #define E7XXX_REVISION " Ver: 2.0.2" 35 #define E7XXX_REVISION " Ver: 2.0.2"
36 #define EDAC_MOD_STR "e7xxx_edac" 36 #define EDAC_MOD_STR "e7xxx_edac"
37 37
38 #define e7xxx_printk(level, fmt, arg...) \ 38 #define e7xxx_printk(level, fmt, arg...) \
39 edac_printk(level, "e7xxx", fmt, ##arg) 39 edac_printk(level, "e7xxx", fmt, ##arg)
40 40
41 #define e7xxx_mc_printk(mci, level, fmt, arg...) \ 41 #define e7xxx_mc_printk(mci, level, fmt, arg...) \
42 edac_mc_chipset_printk(mci, level, "e7xxx", fmt, ##arg) 42 edac_mc_chipset_printk(mci, level, "e7xxx", fmt, ##arg)
43 43
44 #ifndef PCI_DEVICE_ID_INTEL_7205_0 44 #ifndef PCI_DEVICE_ID_INTEL_7205_0
45 #define PCI_DEVICE_ID_INTEL_7205_0 0x255d 45 #define PCI_DEVICE_ID_INTEL_7205_0 0x255d
46 #endif /* PCI_DEVICE_ID_INTEL_7205_0 */ 46 #endif /* PCI_DEVICE_ID_INTEL_7205_0 */
47 47
48 #ifndef PCI_DEVICE_ID_INTEL_7205_1_ERR 48 #ifndef PCI_DEVICE_ID_INTEL_7205_1_ERR
49 #define PCI_DEVICE_ID_INTEL_7205_1_ERR 0x2551 49 #define PCI_DEVICE_ID_INTEL_7205_1_ERR 0x2551
50 #endif /* PCI_DEVICE_ID_INTEL_7205_1_ERR */ 50 #endif /* PCI_DEVICE_ID_INTEL_7205_1_ERR */
51 51
52 #ifndef PCI_DEVICE_ID_INTEL_7500_0 52 #ifndef PCI_DEVICE_ID_INTEL_7500_0
53 #define PCI_DEVICE_ID_INTEL_7500_0 0x2540 53 #define PCI_DEVICE_ID_INTEL_7500_0 0x2540
54 #endif /* PCI_DEVICE_ID_INTEL_7500_0 */ 54 #endif /* PCI_DEVICE_ID_INTEL_7500_0 */
55 55
56 #ifndef PCI_DEVICE_ID_INTEL_7500_1_ERR 56 #ifndef PCI_DEVICE_ID_INTEL_7500_1_ERR
57 #define PCI_DEVICE_ID_INTEL_7500_1_ERR 0x2541 57 #define PCI_DEVICE_ID_INTEL_7500_1_ERR 0x2541
58 #endif /* PCI_DEVICE_ID_INTEL_7500_1_ERR */ 58 #endif /* PCI_DEVICE_ID_INTEL_7500_1_ERR */
59 59
60 #ifndef PCI_DEVICE_ID_INTEL_7501_0 60 #ifndef PCI_DEVICE_ID_INTEL_7501_0
61 #define PCI_DEVICE_ID_INTEL_7501_0 0x254c 61 #define PCI_DEVICE_ID_INTEL_7501_0 0x254c
62 #endif /* PCI_DEVICE_ID_INTEL_7501_0 */ 62 #endif /* PCI_DEVICE_ID_INTEL_7501_0 */
63 63
64 #ifndef PCI_DEVICE_ID_INTEL_7501_1_ERR 64 #ifndef PCI_DEVICE_ID_INTEL_7501_1_ERR
65 #define PCI_DEVICE_ID_INTEL_7501_1_ERR 0x2541 65 #define PCI_DEVICE_ID_INTEL_7501_1_ERR 0x2541
66 #endif /* PCI_DEVICE_ID_INTEL_7501_1_ERR */ 66 #endif /* PCI_DEVICE_ID_INTEL_7501_1_ERR */
67 67
68 #ifndef PCI_DEVICE_ID_INTEL_7505_0 68 #ifndef PCI_DEVICE_ID_INTEL_7505_0
69 #define PCI_DEVICE_ID_INTEL_7505_0 0x2550 69 #define PCI_DEVICE_ID_INTEL_7505_0 0x2550
70 #endif /* PCI_DEVICE_ID_INTEL_7505_0 */ 70 #endif /* PCI_DEVICE_ID_INTEL_7505_0 */
71 71
72 #ifndef PCI_DEVICE_ID_INTEL_7505_1_ERR 72 #ifndef PCI_DEVICE_ID_INTEL_7505_1_ERR
73 #define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551 73 #define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551
74 #endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */ 74 #endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
75 75
76 #define E7XXX_NR_CSROWS 8 /* number of csrows */ 76 #define E7XXX_NR_CSROWS 8 /* number of csrows */
77 #define E7XXX_NR_DIMMS 8 /* 2 channels, 4 dimms/channel */ 77 #define E7XXX_NR_DIMMS 8 /* 2 channels, 4 dimms/channel */
78 78
79 /* E7XXX register addresses - device 0 function 0 */ 79 /* E7XXX register addresses - device 0 function 0 */
80 #define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */ 80 #define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */
81 #define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */ 81 #define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */
82 /* 82 /*
83 * 31 Device width row 7 0=x8 1=x4 83 * 31 Device width row 7 0=x8 1=x4
84 * 27 Device width row 6 84 * 27 Device width row 6
85 * 23 Device width row 5 85 * 23 Device width row 5
86 * 19 Device width row 4 86 * 19 Device width row 4
87 * 15 Device width row 3 87 * 15 Device width row 3
88 * 11 Device width row 2 88 * 11 Device width row 2
89 * 7 Device width row 1 89 * 7 Device width row 1
90 * 3 Device width row 0 90 * 3 Device width row 0
91 */ 91 */
92 #define E7XXX_DRC 0x7C /* DRAM controller mode reg (32b) */ 92 #define E7XXX_DRC 0x7C /* DRAM controller mode reg (32b) */
93 /* 93 /*
94 * 22 Number channels 0=1,1=2 94 * 22 Number channels 0=1,1=2
95 * 19:18 DRB Granularity 32/64MB 95 * 19:18 DRB Granularity 32/64MB
96 */ 96 */
97 #define E7XXX_TOLM 0xC4 /* DRAM top of low memory reg (16b) */ 97 #define E7XXX_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
98 #define E7XXX_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */ 98 #define E7XXX_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
99 #define E7XXX_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */ 99 #define E7XXX_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
100 100
101 /* E7XXX register addresses - device 0 function 1 */ 101 /* E7XXX register addresses - device 0 function 1 */
102 #define E7XXX_DRAM_FERR 0x80 /* DRAM first error register (8b) */ 102 #define E7XXX_DRAM_FERR 0x80 /* DRAM first error register (8b) */
103 #define E7XXX_DRAM_NERR 0x82 /* DRAM next error register (8b) */ 103 #define E7XXX_DRAM_NERR 0x82 /* DRAM next error register (8b) */
104 #define E7XXX_DRAM_CELOG_ADD 0xA0 /* DRAM first correctable memory */ 104 #define E7XXX_DRAM_CELOG_ADD 0xA0 /* DRAM first correctable memory */
105 /* error address register (32b) */ 105 /* error address register (32b) */
106 /* 106 /*
107 * 31:28 Reserved 107 * 31:28 Reserved
108 * 27:6 CE address (4k block 33:12) 108 * 27:6 CE address (4k block 33:12)
109 * 5:0 Reserved 109 * 5:0 Reserved
110 */ 110 */
111 #define E7XXX_DRAM_UELOG_ADD 0xB0 /* DRAM first uncorrectable memory */ 111 #define E7XXX_DRAM_UELOG_ADD 0xB0 /* DRAM first uncorrectable memory */
112 /* error address register (32b) */ 112 /* error address register (32b) */
113 /* 113 /*
114 * 31:28 Reserved 114 * 31:28 Reserved
115 * 27:6 CE address (4k block 33:12) 115 * 27:6 CE address (4k block 33:12)
116 * 5:0 Reserved 116 * 5:0 Reserved
117 */ 117 */
118 #define E7XXX_DRAM_CELOG_SYNDROME 0xD0 /* DRAM first correctable memory */ 118 #define E7XXX_DRAM_CELOG_SYNDROME 0xD0 /* DRAM first correctable memory */
119 /* error syndrome register (16b) */ 119 /* error syndrome register (16b) */
120 120
121 enum e7xxx_chips { 121 enum e7xxx_chips {
122 E7500 = 0, 122 E7500 = 0,
123 E7501, 123 E7501,
124 E7505, 124 E7505,
125 E7205, 125 E7205,
126 }; 126 };
127 127
128 struct e7xxx_pvt { 128 struct e7xxx_pvt {
129 struct pci_dev *bridge_ck; 129 struct pci_dev *bridge_ck;
130 u32 tolm; 130 u32 tolm;
131 u32 remapbase; 131 u32 remapbase;
132 u32 remaplimit; 132 u32 remaplimit;
133 const struct e7xxx_dev_info *dev_info; 133 const struct e7xxx_dev_info *dev_info;
134 }; 134 };
135 135
136 struct e7xxx_dev_info { 136 struct e7xxx_dev_info {
137 u16 err_dev; 137 u16 err_dev;
138 const char *ctl_name; 138 const char *ctl_name;
139 }; 139 };
140 140
141 struct e7xxx_error_info { 141 struct e7xxx_error_info {
142 u8 dram_ferr; 142 u8 dram_ferr;
143 u8 dram_nerr; 143 u8 dram_nerr;
144 u32 dram_celog_add; 144 u32 dram_celog_add;
145 u16 dram_celog_syndrome; 145 u16 dram_celog_syndrome;
146 u32 dram_uelog_add; 146 u32 dram_uelog_add;
147 }; 147 };
148 148
149 static struct edac_pci_ctl_info *e7xxx_pci; 149 static struct edac_pci_ctl_info *e7xxx_pci;
150 150
151 static const struct e7xxx_dev_info e7xxx_devs[] = { 151 static const struct e7xxx_dev_info e7xxx_devs[] = {
152 [E7500] = { 152 [E7500] = {
153 .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR, 153 .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
154 .ctl_name = "E7500"}, 154 .ctl_name = "E7500"},
155 [E7501] = { 155 [E7501] = {
156 .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR, 156 .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
157 .ctl_name = "E7501"}, 157 .ctl_name = "E7501"},
158 [E7505] = { 158 [E7505] = {
159 .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR, 159 .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
160 .ctl_name = "E7505"}, 160 .ctl_name = "E7505"},
161 [E7205] = { 161 [E7205] = {
162 .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR, 162 .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
163 .ctl_name = "E7205"}, 163 .ctl_name = "E7205"},
164 }; 164 };
165 165
166 /* FIXME - is this valid for both SECDED and S4ECD4ED? */ 166 /* FIXME - is this valid for both SECDED and S4ECD4ED? */
167 static inline int e7xxx_find_channel(u16 syndrome) 167 static inline int e7xxx_find_channel(u16 syndrome)
168 { 168 {
169 debugf3("%s()\n", __func__); 169 debugf3("%s()\n", __func__);
170 170
171 if ((syndrome & 0xff00) == 0) 171 if ((syndrome & 0xff00) == 0)
172 return 0; 172 return 0;
173 173
174 if ((syndrome & 0x00ff) == 0) 174 if ((syndrome & 0x00ff) == 0)
175 return 1; 175 return 1;
176 176
177 if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0) 177 if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0)
178 return 0; 178 return 0;
179 179
180 return 1; 180 return 1;
181 } 181 }
182 182
183 static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, 183 static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
184 unsigned long page) 184 unsigned long page)
185 { 185 {
186 u32 remap; 186 u32 remap;
187 struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info; 187 struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info;
188 188
189 debugf3("%s()\n", __func__); 189 debugf3("%s()\n", __func__);
190 190
191 if ((page < pvt->tolm) || 191 if ((page < pvt->tolm) ||
192 ((page >= 0x100000) && (page < pvt->remapbase))) 192 ((page >= 0x100000) && (page < pvt->remapbase)))
193 return page; 193 return page;
194 194
195 remap = (page - pvt->tolm) + pvt->remapbase; 195 remap = (page - pvt->tolm) + pvt->remapbase;
196 196
197 if (remap < pvt->remaplimit) 197 if (remap < pvt->remaplimit)
198 return remap; 198 return remap;
199 199
200 e7xxx_printk(KERN_ERR, "Invalid page %lx - out of range\n", page); 200 e7xxx_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
201 return pvt->tolm - 1; 201 return pvt->tolm - 1;
202 } 202 }
203 203
204 static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info) 204 static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
205 { 205 {
206 u32 error_1b, page; 206 u32 error_1b, page;
207 u16 syndrome; 207 u16 syndrome;
208 int row; 208 int row;
209 int channel; 209 int channel;
210 210
211 debugf3("%s()\n", __func__); 211 debugf3("%s()\n", __func__);
212 /* read the error address */ 212 /* read the error address */
213 error_1b = info->dram_celog_add; 213 error_1b = info->dram_celog_add;
214 /* FIXME - should use PAGE_SHIFT */ 214 /* FIXME - should use PAGE_SHIFT */
215 page = error_1b >> 6; /* convert the address to 4k page */ 215 page = error_1b >> 6; /* convert the address to 4k page */
216 /* read the syndrome */ 216 /* read the syndrome */
217 syndrome = info->dram_celog_syndrome; 217 syndrome = info->dram_celog_syndrome;
218 /* FIXME - check for -1 */ 218 /* FIXME - check for -1 */
219 row = edac_mc_find_csrow_by_page(mci, page); 219 row = edac_mc_find_csrow_by_page(mci, page);
220 /* convert syndrome to channel */ 220 /* convert syndrome to channel */
221 channel = e7xxx_find_channel(syndrome); 221 channel = e7xxx_find_channel(syndrome);
222 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, page, 0, syndrome, 222 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, page, 0, syndrome,
223 row, channel, -1, "e7xxx CE", "", NULL); 223 row, channel, -1, "e7xxx CE", "", NULL);
224 } 224 }
225 225
226 static void process_ce_no_info(struct mem_ctl_info *mci) 226 static void process_ce_no_info(struct mem_ctl_info *mci)
227 { 227 {
228 debugf3("%s()\n", __func__); 228 debugf3("%s()\n", __func__);
229 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1, 229 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
230 "e7xxx CE log register overflow", "", NULL); 230 "e7xxx CE log register overflow", "", NULL);
231 } 231 }
232 232
233 static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info) 233 static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
234 { 234 {
235 u32 error_2b, block_page; 235 u32 error_2b, block_page;
236 int row; 236 int row;
237 237
238 debugf3("%s()\n", __func__); 238 debugf3("%s()\n", __func__);
239 /* read the error address */ 239 /* read the error address */
240 error_2b = info->dram_uelog_add; 240 error_2b = info->dram_uelog_add;
241 /* FIXME - should use PAGE_SHIFT */ 241 /* FIXME - should use PAGE_SHIFT */
242 block_page = error_2b >> 6; /* convert to 4k address */ 242 block_page = error_2b >> 6; /* convert to 4k address */
243 row = edac_mc_find_csrow_by_page(mci, block_page); 243 row = edac_mc_find_csrow_by_page(mci, block_page);
244 244
245 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, block_page, 0, 0, 245 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, block_page, 0, 0,
246 row, -1, -1, "e7xxx UE", "", NULL); 246 row, -1, -1, "e7xxx UE", "", NULL);
247 } 247 }
248 248
249 static void process_ue_no_info(struct mem_ctl_info *mci) 249 static void process_ue_no_info(struct mem_ctl_info *mci)
250 { 250 {
251 debugf3("%s()\n", __func__); 251 debugf3("%s()\n", __func__);
252 252
253 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1, 253 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
254 "e7xxx UE log register overflow", "", NULL); 254 "e7xxx UE log register overflow", "", NULL);
255 } 255 }
256 256
257 static void e7xxx_get_error_info(struct mem_ctl_info *mci, 257 static void e7xxx_get_error_info(struct mem_ctl_info *mci,
258 struct e7xxx_error_info *info) 258 struct e7xxx_error_info *info)
259 { 259 {
260 struct e7xxx_pvt *pvt; 260 struct e7xxx_pvt *pvt;
261 261
262 pvt = (struct e7xxx_pvt *)mci->pvt_info; 262 pvt = (struct e7xxx_pvt *)mci->pvt_info;
263 pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, &info->dram_ferr); 263 pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, &info->dram_ferr);
264 pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, &info->dram_nerr); 264 pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, &info->dram_nerr);
265 265
266 if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) { 266 if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) {
267 pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD, 267 pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD,
268 &info->dram_celog_add); 268 &info->dram_celog_add);
269 pci_read_config_word(pvt->bridge_ck, 269 pci_read_config_word(pvt->bridge_ck,
270 E7XXX_DRAM_CELOG_SYNDROME, 270 E7XXX_DRAM_CELOG_SYNDROME,
271 &info->dram_celog_syndrome); 271 &info->dram_celog_syndrome);
272 } 272 }
273 273
274 if ((info->dram_ferr & 2) || (info->dram_nerr & 2)) 274 if ((info->dram_ferr & 2) || (info->dram_nerr & 2))
275 pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD, 275 pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD,
276 &info->dram_uelog_add); 276 &info->dram_uelog_add);
277 277
278 if (info->dram_ferr & 3) 278 if (info->dram_ferr & 3)
279 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03); 279 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03);
280 280
281 if (info->dram_nerr & 3) 281 if (info->dram_nerr & 3)
282 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03); 282 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
283 } 283 }
284 284
285 static int e7xxx_process_error_info(struct mem_ctl_info *mci, 285 static int e7xxx_process_error_info(struct mem_ctl_info *mci,
286 struct e7xxx_error_info *info, 286 struct e7xxx_error_info *info,
287 int handle_errors) 287 int handle_errors)
288 { 288 {
289 int error_found; 289 int error_found;
290 290
291 error_found = 0; 291 error_found = 0;
292 292
293 /* decode and report errors */ 293 /* decode and report errors */
294 if (info->dram_ferr & 1) { /* check first error correctable */ 294 if (info->dram_ferr & 1) { /* check first error correctable */
295 error_found = 1; 295 error_found = 1;
296 296
297 if (handle_errors) 297 if (handle_errors)
298 process_ce(mci, info); 298 process_ce(mci, info);
299 } 299 }
300 300
301 if (info->dram_ferr & 2) { /* check first error uncorrectable */ 301 if (info->dram_ferr & 2) { /* check first error uncorrectable */
302 error_found = 1; 302 error_found = 1;
303 303
304 if (handle_errors) 304 if (handle_errors)
305 process_ue(mci, info); 305 process_ue(mci, info);
306 } 306 }
307 307
308 if (info->dram_nerr & 1) { /* check next error correctable */ 308 if (info->dram_nerr & 1) { /* check next error correctable */
309 error_found = 1; 309 error_found = 1;
310 310
311 if (handle_errors) { 311 if (handle_errors) {
312 if (info->dram_ferr & 1) 312 if (info->dram_ferr & 1)
313 process_ce_no_info(mci); 313 process_ce_no_info(mci);
314 else 314 else
315 process_ce(mci, info); 315 process_ce(mci, info);
316 } 316 }
317 } 317 }
318 318
319 if (info->dram_nerr & 2) { /* check next error uncorrectable */ 319 if (info->dram_nerr & 2) { /* check next error uncorrectable */
320 error_found = 1; 320 error_found = 1;
321 321
322 if (handle_errors) { 322 if (handle_errors) {
323 if (info->dram_ferr & 2) 323 if (info->dram_ferr & 2)
324 process_ue_no_info(mci); 324 process_ue_no_info(mci);
325 else 325 else
326 process_ue(mci, info); 326 process_ue(mci, info);
327 } 327 }
328 } 328 }
329 329
330 return error_found; 330 return error_found;
331 } 331 }
332 332
333 static void e7xxx_check(struct mem_ctl_info *mci) 333 static void e7xxx_check(struct mem_ctl_info *mci)
334 { 334 {
335 struct e7xxx_error_info info; 335 struct e7xxx_error_info info;
336 336
337 debugf3("%s()\n", __func__); 337 debugf3("%s()\n", __func__);
338 e7xxx_get_error_info(mci, &info); 338 e7xxx_get_error_info(mci, &info);
339 e7xxx_process_error_info(mci, &info, 1); 339 e7xxx_process_error_info(mci, &info, 1);
340 } 340 }
341 341
342 /* Return 1 if dual channel mode is active. Else return 0. */ 342 /* Return 1 if dual channel mode is active. Else return 0. */
343 static inline int dual_channel_active(u32 drc, int dev_idx) 343 static inline int dual_channel_active(u32 drc, int dev_idx)
344 { 344 {
345 return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1; 345 return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1;
346 } 346 }
347 347
348 /* Return DRB granularity (0=32mb, 1=64mb). */ 348 /* Return DRB granularity (0=32mb, 1=64mb). */
349 static inline int drb_granularity(u32 drc, int dev_idx) 349 static inline int drb_granularity(u32 drc, int dev_idx)
350 { 350 {
351 /* only e7501 can be single channel */ 351 /* only e7501 can be single channel */
352 return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1; 352 return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1;
353 } 353 }
354 354
355 static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, 355 static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
356 int dev_idx, u32 drc) 356 int dev_idx, u32 drc)
357 { 357 {
358 unsigned long last_cumul_size; 358 unsigned long last_cumul_size;
359 int index, j; 359 int index, j;
360 u8 value; 360 u8 value;
361 u32 dra, cumul_size, nr_pages; 361 u32 dra, cumul_size, nr_pages;
362 int drc_chan, drc_drbg, drc_ddim, mem_dev; 362 int drc_chan, drc_drbg, drc_ddim, mem_dev;
363 struct csrow_info *csrow; 363 struct csrow_info *csrow;
364 struct dimm_info *dimm; 364 struct dimm_info *dimm;
365 enum edac_type edac_mode; 365 enum edac_type edac_mode;
366 366
367 pci_read_config_dword(pdev, E7XXX_DRA, &dra); 367 pci_read_config_dword(pdev, E7XXX_DRA, &dra);
368 drc_chan = dual_channel_active(drc, dev_idx); 368 drc_chan = dual_channel_active(drc, dev_idx);
369 drc_drbg = drb_granularity(drc, dev_idx); 369 drc_drbg = drb_granularity(drc, dev_idx);
370 drc_ddim = (drc >> 20) & 0x3; 370 drc_ddim = (drc >> 20) & 0x3;
371 last_cumul_size = 0; 371 last_cumul_size = 0;
372 372
373 /* The dram row boundary (DRB) reg values are boundary address 373 /* The dram row boundary (DRB) reg values are boundary address
374 * for each DRAM row with a granularity of 32 or 64MB (single/dual 374 * for each DRAM row with a granularity of 32 or 64MB (single/dual
375 * channel operation). DRB regs are cumulative; therefore DRB7 will 375 * channel operation). DRB regs are cumulative; therefore DRB7 will
376 * contain the total memory contained in all eight rows. 376 * contain the total memory contained in all eight rows.
377 */ 377 */
378 for (index = 0; index < mci->nr_csrows; index++) { 378 for (index = 0; index < mci->nr_csrows; index++) {
379 /* mem_dev 0=x8, 1=x4 */ 379 /* mem_dev 0=x8, 1=x4 */
380 mem_dev = (dra >> (index * 4 + 3)) & 0x1; 380 mem_dev = (dra >> (index * 4 + 3)) & 0x1;
381 csrow = &mci->csrows[index]; 381 csrow = mci->csrows[index];
382 382
383 pci_read_config_byte(pdev, E7XXX_DRB + index, &value); 383 pci_read_config_byte(pdev, E7XXX_DRB + index, &value);
384 /* convert a 64 or 32 MiB DRB to a page size. */ 384 /* convert a 64 or 32 MiB DRB to a page size. */
385 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); 385 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
386 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 386 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
387 cumul_size); 387 cumul_size);
388 if (cumul_size == last_cumul_size) 388 if (cumul_size == last_cumul_size)
389 continue; /* not populated */ 389 continue; /* not populated */
390 390
391 csrow->first_page = last_cumul_size; 391 csrow->first_page = last_cumul_size;
392 csrow->last_page = cumul_size - 1; 392 csrow->last_page = cumul_size - 1;
393 nr_pages = cumul_size - last_cumul_size; 393 nr_pages = cumul_size - last_cumul_size;
394 last_cumul_size = cumul_size; 394 last_cumul_size = cumul_size;
395 395
396 /* 396 /*
397 * if single channel or x8 devices then SECDED 397 * if single channel or x8 devices then SECDED
398 * if dual channel and x4 then S4ECD4ED 398 * if dual channel and x4 then S4ECD4ED
399 */ 399 */
400 if (drc_ddim) { 400 if (drc_ddim) {
401 if (drc_chan && mem_dev) { 401 if (drc_chan && mem_dev) {
402 edac_mode = EDAC_S4ECD4ED; 402 edac_mode = EDAC_S4ECD4ED;
403 mci->edac_cap |= EDAC_FLAG_S4ECD4ED; 403 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
404 } else { 404 } else {
405 edac_mode = EDAC_SECDED; 405 edac_mode = EDAC_SECDED;
406 mci->edac_cap |= EDAC_FLAG_SECDED; 406 mci->edac_cap |= EDAC_FLAG_SECDED;
407 } 407 }
408 } else 408 } else
409 edac_mode = EDAC_NONE; 409 edac_mode = EDAC_NONE;
410 410
411 for (j = 0; j < drc_chan + 1; j++) { 411 for (j = 0; j < drc_chan + 1; j++) {
412 dimm = csrow->channels[j].dimm; 412 dimm = csrow->channels[j]->dimm;
413 413
414 dimm->nr_pages = nr_pages / (drc_chan + 1); 414 dimm->nr_pages = nr_pages / (drc_chan + 1);
415 dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */ 415 dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
416 dimm->mtype = MEM_RDDR; /* only one type supported */ 416 dimm->mtype = MEM_RDDR; /* only one type supported */
417 dimm->dtype = mem_dev ? DEV_X4 : DEV_X8; 417 dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
418 dimm->edac_mode = edac_mode; 418 dimm->edac_mode = edac_mode;
419 } 419 }
420 } 420 }
421 } 421 }
422 422
423 static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) 423 static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
424 { 424 {
425 u16 pci_data; 425 u16 pci_data;
426 struct mem_ctl_info *mci = NULL; 426 struct mem_ctl_info *mci = NULL;
427 struct edac_mc_layer layers[2]; 427 struct edac_mc_layer layers[2];
428 struct e7xxx_pvt *pvt = NULL; 428 struct e7xxx_pvt *pvt = NULL;
429 u32 drc; 429 u32 drc;
430 int drc_chan; 430 int drc_chan;
431 struct e7xxx_error_info discard; 431 struct e7xxx_error_info discard;
432 432
433 debugf0("%s(): mci\n", __func__); 433 debugf0("%s(): mci\n", __func__);
434 434
435 pci_read_config_dword(pdev, E7XXX_DRC, &drc); 435 pci_read_config_dword(pdev, E7XXX_DRC, &drc);
436 436
437 drc_chan = dual_channel_active(drc, dev_idx); 437 drc_chan = dual_channel_active(drc, dev_idx);
438 /* 438 /*
439 * According with the datasheet, this device has a maximum of 439 * According with the datasheet, this device has a maximum of
440 * 4 DIMMS per channel, either single-rank or dual-rank. So, the 440 * 4 DIMMS per channel, either single-rank or dual-rank. So, the
441 * total amount of dimms is 8 (E7XXX_NR_DIMMS). 441 * total amount of dimms is 8 (E7XXX_NR_DIMMS).
442 * That means that the DIMM is mapped as CSROWs, and the channel 442 * That means that the DIMM is mapped as CSROWs, and the channel
443 * will map the rank. So, an error to either channel should be 443 * will map the rank. So, an error to either channel should be
444 * attributed to the same dimm. 444 * attributed to the same dimm.
445 */ 445 */
446 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 446 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
447 layers[0].size = E7XXX_NR_CSROWS; 447 layers[0].size = E7XXX_NR_CSROWS;
448 layers[0].is_virt_csrow = true; 448 layers[0].is_virt_csrow = true;
449 layers[1].type = EDAC_MC_LAYER_CHANNEL; 449 layers[1].type = EDAC_MC_LAYER_CHANNEL;
450 layers[1].size = drc_chan + 1; 450 layers[1].size = drc_chan + 1;
451 layers[1].is_virt_csrow = false; 451 layers[1].is_virt_csrow = false;
452 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); 452 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
453 if (mci == NULL) 453 if (mci == NULL)
454 return -ENOMEM; 454 return -ENOMEM;
455 455
456 debugf3("%s(): init mci\n", __func__); 456 debugf3("%s(): init mci\n", __func__);
457 mci->mtype_cap = MEM_FLAG_RDDR; 457 mci->mtype_cap = MEM_FLAG_RDDR;
458 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | 458 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
459 EDAC_FLAG_S4ECD4ED; 459 EDAC_FLAG_S4ECD4ED;
460 /* FIXME - what if different memory types are in different csrows? */ 460 /* FIXME - what if different memory types are in different csrows? */
461 mci->mod_name = EDAC_MOD_STR; 461 mci->mod_name = EDAC_MOD_STR;
462 mci->mod_ver = E7XXX_REVISION; 462 mci->mod_ver = E7XXX_REVISION;
463 mci->pdev = &pdev->dev; 463 mci->pdev = &pdev->dev;
464 debugf3("%s(): init pvt\n", __func__); 464 debugf3("%s(): init pvt\n", __func__);
465 pvt = (struct e7xxx_pvt *)mci->pvt_info; 465 pvt = (struct e7xxx_pvt *)mci->pvt_info;
466 pvt->dev_info = &e7xxx_devs[dev_idx]; 466 pvt->dev_info = &e7xxx_devs[dev_idx];
467 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, 467 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
468 pvt->dev_info->err_dev, pvt->bridge_ck); 468 pvt->dev_info->err_dev, pvt->bridge_ck);
469 469
470 if (!pvt->bridge_ck) { 470 if (!pvt->bridge_ck) {
471 e7xxx_printk(KERN_ERR, "error reporting device not found:" 471 e7xxx_printk(KERN_ERR, "error reporting device not found:"
472 "vendor %x device 0x%x (broken BIOS?)\n", 472 "vendor %x device 0x%x (broken BIOS?)\n",
473 PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev); 473 PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
474 goto fail0; 474 goto fail0;
475 } 475 }
476 476
477 debugf3("%s(): more mci init\n", __func__); 477 debugf3("%s(): more mci init\n", __func__);
478 mci->ctl_name = pvt->dev_info->ctl_name; 478 mci->ctl_name = pvt->dev_info->ctl_name;
479 mci->dev_name = pci_name(pdev); 479 mci->dev_name = pci_name(pdev);
480 mci->edac_check = e7xxx_check; 480 mci->edac_check = e7xxx_check;
481 mci->ctl_page_to_phys = ctl_page_to_phys; 481 mci->ctl_page_to_phys = ctl_page_to_phys;
482 e7xxx_init_csrows(mci, pdev, dev_idx, drc); 482 e7xxx_init_csrows(mci, pdev, dev_idx, drc);
483 mci->edac_cap |= EDAC_FLAG_NONE; 483 mci->edac_cap |= EDAC_FLAG_NONE;
484 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); 484 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
485 /* load the top of low memory, remap base, and remap limit vars */ 485 /* load the top of low memory, remap base, and remap limit vars */
486 pci_read_config_word(pdev, E7XXX_TOLM, &pci_data); 486 pci_read_config_word(pdev, E7XXX_TOLM, &pci_data);
487 pvt->tolm = ((u32) pci_data) << 4; 487 pvt->tolm = ((u32) pci_data) << 4;
488 pci_read_config_word(pdev, E7XXX_REMAPBASE, &pci_data); 488 pci_read_config_word(pdev, E7XXX_REMAPBASE, &pci_data);
489 pvt->remapbase = ((u32) pci_data) << 14; 489 pvt->remapbase = ((u32) pci_data) << 14;
490 pci_read_config_word(pdev, E7XXX_REMAPLIMIT, &pci_data); 490 pci_read_config_word(pdev, E7XXX_REMAPLIMIT, &pci_data);
491 pvt->remaplimit = ((u32) pci_data) << 14; 491 pvt->remaplimit = ((u32) pci_data) << 14;
492 e7xxx_printk(KERN_INFO, 492 e7xxx_printk(KERN_INFO,
493 "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, 493 "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
494 pvt->remapbase, pvt->remaplimit); 494 pvt->remapbase, pvt->remaplimit);
495 495
496 /* clear any pending errors, or initial state bits */ 496 /* clear any pending errors, or initial state bits */
497 e7xxx_get_error_info(mci, &discard); 497 e7xxx_get_error_info(mci, &discard);
498 498
499 /* Here we assume that we will never see multiple instances of this 499 /* Here we assume that we will never see multiple instances of this
500 * type of memory controller. The ID is therefore hardcoded to 0. 500 * type of memory controller. The ID is therefore hardcoded to 0.
501 */ 501 */
502 if (edac_mc_add_mc(mci)) { 502 if (edac_mc_add_mc(mci)) {
503 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 503 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
504 goto fail1; 504 goto fail1;
505 } 505 }
506 506
507 /* allocating generic PCI control info */ 507 /* allocating generic PCI control info */
508 e7xxx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); 508 e7xxx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
509 if (!e7xxx_pci) { 509 if (!e7xxx_pci) {
510 printk(KERN_WARNING 510 printk(KERN_WARNING
511 "%s(): Unable to create PCI control\n", 511 "%s(): Unable to create PCI control\n",
512 __func__); 512 __func__);
513 printk(KERN_WARNING 513 printk(KERN_WARNING
514 "%s(): PCI error report via EDAC not setup\n", 514 "%s(): PCI error report via EDAC not setup\n",
515 __func__); 515 __func__);
516 } 516 }
517 517
518 /* get this far and it's successful */ 518 /* get this far and it's successful */
519 debugf3("%s(): success\n", __func__); 519 debugf3("%s(): success\n", __func__);
520 return 0; 520 return 0;
521 521
522 fail1: 522 fail1:
523 pci_dev_put(pvt->bridge_ck); 523 pci_dev_put(pvt->bridge_ck);
524 524
525 fail0: 525 fail0:
526 edac_mc_free(mci); 526 edac_mc_free(mci);
527 527
528 return -ENODEV; 528 return -ENODEV;
529 } 529 }
530 530
531 /* returns count (>= 0), or negative on error */ 531 /* returns count (>= 0), or negative on error */
532 static int __devinit e7xxx_init_one(struct pci_dev *pdev, 532 static int __devinit e7xxx_init_one(struct pci_dev *pdev,
533 const struct pci_device_id *ent) 533 const struct pci_device_id *ent)
534 { 534 {
535 debugf0("%s()\n", __func__); 535 debugf0("%s()\n", __func__);
536 536
537 /* wake up and enable device */ 537 /* wake up and enable device */
538 return pci_enable_device(pdev) ? 538 return pci_enable_device(pdev) ?
539 -EIO : e7xxx_probe1(pdev, ent->driver_data); 539 -EIO : e7xxx_probe1(pdev, ent->driver_data);
540 } 540 }
541 541
542 static void __devexit e7xxx_remove_one(struct pci_dev *pdev) 542 static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
543 { 543 {
544 struct mem_ctl_info *mci; 544 struct mem_ctl_info *mci;
545 struct e7xxx_pvt *pvt; 545 struct e7xxx_pvt *pvt;
546 546
547 debugf0("%s()\n", __func__); 547 debugf0("%s()\n", __func__);
548 548
549 if (e7xxx_pci) 549 if (e7xxx_pci)
550 edac_pci_release_generic_ctl(e7xxx_pci); 550 edac_pci_release_generic_ctl(e7xxx_pci);
551 551
552 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) 552 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
553 return; 553 return;
554 554
555 pvt = (struct e7xxx_pvt *)mci->pvt_info; 555 pvt = (struct e7xxx_pvt *)mci->pvt_info;
556 pci_dev_put(pvt->bridge_ck); 556 pci_dev_put(pvt->bridge_ck);
557 edac_mc_free(mci); 557 edac_mc_free(mci);
558 } 558 }
559 559
560 static DEFINE_PCI_DEVICE_TABLE(e7xxx_pci_tbl) = { 560 static DEFINE_PCI_DEVICE_TABLE(e7xxx_pci_tbl) = {
561 { 561 {
562 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 562 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
563 E7205}, 563 E7205},
564 { 564 {
565 PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 565 PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
566 E7500}, 566 E7500},
567 { 567 {
568 PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 568 PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
569 E7501}, 569 E7501},
570 { 570 {
571 PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 571 PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
572 E7505}, 572 E7505},
573 { 573 {
574 0, 574 0,
575 } /* 0 terminated list. */ 575 } /* 0 terminated list. */
576 }; 576 };
577 577
578 MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl); 578 MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
579 579
580 static struct pci_driver e7xxx_driver = { 580 static struct pci_driver e7xxx_driver = {
581 .name = EDAC_MOD_STR, 581 .name = EDAC_MOD_STR,
582 .probe = e7xxx_init_one, 582 .probe = e7xxx_init_one,
583 .remove = __devexit_p(e7xxx_remove_one), 583 .remove = __devexit_p(e7xxx_remove_one),
584 .id_table = e7xxx_pci_tbl, 584 .id_table = e7xxx_pci_tbl,
585 }; 585 };
586 586
587 static int __init e7xxx_init(void) 587 static int __init e7xxx_init(void)
588 { 588 {
589 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 589 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
590 opstate_init(); 590 opstate_init();
591 591
592 return pci_register_driver(&e7xxx_driver); 592 return pci_register_driver(&e7xxx_driver);
593 } 593 }
594 594
595 static void __exit e7xxx_exit(void) 595 static void __exit e7xxx_exit(void)
596 { 596 {
597 pci_unregister_driver(&e7xxx_driver); 597 pci_unregister_driver(&e7xxx_driver);
598 } 598 }
599 599
600 module_init(e7xxx_init); 600 module_init(e7xxx_init);
601 module_exit(e7xxx_exit); 601 module_exit(e7xxx_exit);
602 602
603 MODULE_LICENSE("GPL"); 603 MODULE_LICENSE("GPL");
604 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n" 604 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
605 "Based on.work by Dan Hollis et al"); 605 "Based on.work by Dan Hollis et al");
606 MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers"); 606 MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
607 module_param(edac_op_state, int, 0444); 607 module_param(edac_op_state, int, 0444);
608 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 608 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
609 609
drivers/edac/edac_mc.c
1 /* 1 /*
2 * edac_mc kernel module 2 * edac_mc kernel module
3 * (C) 2005, 2006 Linux Networx (http://lnxi.com) 3 * (C) 2005, 2006 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the 4 * This file may be distributed under the terms of the
5 * GNU General Public License. 5 * GNU General Public License.
6 * 6 *
7 * Written by Thayne Harbaugh 7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others. 8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/ 9 * http://www.anime.net/~goemon/linux-ecc/
10 * 10 *
11 * Modified by Dave Peterson and Doug Thompson 11 * Modified by Dave Peterson and Doug Thompson
12 * 12 *
13 */ 13 */
14 14
15 #include <linux/module.h> 15 #include <linux/module.h>
16 #include <linux/proc_fs.h> 16 #include <linux/proc_fs.h>
17 #include <linux/kernel.h> 17 #include <linux/kernel.h>
18 #include <linux/types.h> 18 #include <linux/types.h>
19 #include <linux/smp.h> 19 #include <linux/smp.h>
20 #include <linux/init.h> 20 #include <linux/init.h>
21 #include <linux/sysctl.h> 21 #include <linux/sysctl.h>
22 #include <linux/highmem.h> 22 #include <linux/highmem.h>
23 #include <linux/timer.h> 23 #include <linux/timer.h>
24 #include <linux/slab.h> 24 #include <linux/slab.h>
25 #include <linux/jiffies.h> 25 #include <linux/jiffies.h>
26 #include <linux/spinlock.h> 26 #include <linux/spinlock.h>
27 #include <linux/list.h> 27 #include <linux/list.h>
28 #include <linux/ctype.h> 28 #include <linux/ctype.h>
29 #include <linux/edac.h> 29 #include <linux/edac.h>
30 #include <linux/bitops.h> 30 #include <linux/bitops.h>
31 #include <asm/uaccess.h> 31 #include <asm/uaccess.h>
32 #include <asm/page.h> 32 #include <asm/page.h>
33 #include <asm/edac.h> 33 #include <asm/edac.h>
34 #include "edac_core.h" 34 #include "edac_core.h"
35 #include "edac_module.h" 35 #include "edac_module.h"
36 36
37 #define CREATE_TRACE_POINTS 37 #define CREATE_TRACE_POINTS
38 #define TRACE_INCLUDE_PATH ../../include/ras 38 #define TRACE_INCLUDE_PATH ../../include/ras
39 #include <ras/ras_event.h> 39 #include <ras/ras_event.h>
40 40
41 /* lock to memory controller's control array */ 41 /* lock to memory controller's control array */
42 static DEFINE_MUTEX(mem_ctls_mutex); 42 static DEFINE_MUTEX(mem_ctls_mutex);
43 static LIST_HEAD(mc_devices); 43 static LIST_HEAD(mc_devices);
44 44
45 #ifdef CONFIG_EDAC_DEBUG 45 #ifdef CONFIG_EDAC_DEBUG
46 46
47 static void edac_mc_dump_channel(struct rank_info *chan) 47 static void edac_mc_dump_channel(struct rank_info *chan)
48 { 48 {
49 debugf4("\tchannel = %p\n", chan); 49 debugf4("\tchannel = %p\n", chan);
50 debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx); 50 debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
51 debugf4("\tchannel->csrow = %p\n\n", chan->csrow); 51 debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
52 debugf4("\tchannel->dimm = %p\n", chan->dimm); 52 debugf4("\tchannel->dimm = %p\n", chan->dimm);
53 } 53 }
54 54
55 static void edac_mc_dump_dimm(struct dimm_info *dimm) 55 static void edac_mc_dump_dimm(struct dimm_info *dimm)
56 { 56 {
57 int i; 57 int i;
58 58
59 debugf4("\tdimm = %p\n", dimm); 59 debugf4("\tdimm = %p\n", dimm);
60 debugf4("\tdimm->label = '%s'\n", dimm->label); 60 debugf4("\tdimm->label = '%s'\n", dimm->label);
61 debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages); 61 debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
62 debugf4("\tdimm location "); 62 debugf4("\tdimm location ");
63 for (i = 0; i < dimm->mci->n_layers; i++) { 63 for (i = 0; i < dimm->mci->n_layers; i++) {
64 printk(KERN_CONT "%d", dimm->location[i]); 64 printk(KERN_CONT "%d", dimm->location[i]);
65 if (i < dimm->mci->n_layers - 1) 65 if (i < dimm->mci->n_layers - 1)
66 printk(KERN_CONT "."); 66 printk(KERN_CONT ".");
67 } 67 }
68 printk(KERN_CONT "\n"); 68 printk(KERN_CONT "\n");
69 debugf4("\tdimm->grain = %d\n", dimm->grain); 69 debugf4("\tdimm->grain = %d\n", dimm->grain);
70 debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages); 70 debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
71 } 71 }
72 72
73 static void edac_mc_dump_csrow(struct csrow_info *csrow) 73 static void edac_mc_dump_csrow(struct csrow_info *csrow)
74 { 74 {
75 debugf4("\tcsrow = %p\n", csrow); 75 debugf4("\tcsrow = %p\n", csrow);
76 debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx); 76 debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
77 debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page); 77 debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
78 debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page); 78 debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
79 debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask); 79 debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
80 debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels); 80 debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
81 debugf4("\tcsrow->channels = %p\n", csrow->channels); 81 debugf4("\tcsrow->channels = %p\n", csrow->channels);
82 debugf4("\tcsrow->mci = %p\n\n", csrow->mci); 82 debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
83 } 83 }
84 84
85 static void edac_mc_dump_mci(struct mem_ctl_info *mci) 85 static void edac_mc_dump_mci(struct mem_ctl_info *mci)
86 { 86 {
87 debugf3("\tmci = %p\n", mci); 87 debugf3("\tmci = %p\n", mci);
88 debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap); 88 debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
89 debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap); 89 debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
90 debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap); 90 debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap);
91 debugf4("\tmci->edac_check = %p\n", mci->edac_check); 91 debugf4("\tmci->edac_check = %p\n", mci->edac_check);
92 debugf3("\tmci->nr_csrows = %d, csrows = %p\n", 92 debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
93 mci->nr_csrows, mci->csrows); 93 mci->nr_csrows, mci->csrows);
94 debugf3("\tmci->nr_dimms = %d, dimms = %p\n", 94 debugf3("\tmci->nr_dimms = %d, dimms = %p\n",
95 mci->tot_dimms, mci->dimms); 95 mci->tot_dimms, mci->dimms);
96 debugf3("\tdev = %p\n", mci->pdev); 96 debugf3("\tdev = %p\n", mci->pdev);
97 debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name); 97 debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
98 debugf3("\tpvt_info = %p\n\n", mci->pvt_info); 98 debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
99 } 99 }
100 100
101 #endif /* CONFIG_EDAC_DEBUG */ 101 #endif /* CONFIG_EDAC_DEBUG */
102 102
103 /* 103 /*
104 * keep those in sync with the enum mem_type 104 * keep those in sync with the enum mem_type
105 */ 105 */
106 const char *edac_mem_types[] = { 106 const char *edac_mem_types[] = {
107 "Empty csrow", 107 "Empty csrow",
108 "Reserved csrow type", 108 "Reserved csrow type",
109 "Unknown csrow type", 109 "Unknown csrow type",
110 "Fast page mode RAM", 110 "Fast page mode RAM",
111 "Extended data out RAM", 111 "Extended data out RAM",
112 "Burst Extended data out RAM", 112 "Burst Extended data out RAM",
113 "Single data rate SDRAM", 113 "Single data rate SDRAM",
114 "Registered single data rate SDRAM", 114 "Registered single data rate SDRAM",
115 "Double data rate SDRAM", 115 "Double data rate SDRAM",
116 "Registered Double data rate SDRAM", 116 "Registered Double data rate SDRAM",
117 "Rambus DRAM", 117 "Rambus DRAM",
118 "Unbuffered DDR2 RAM", 118 "Unbuffered DDR2 RAM",
119 "Fully buffered DDR2", 119 "Fully buffered DDR2",
120 "Registered DDR2 RAM", 120 "Registered DDR2 RAM",
121 "Rambus XDR", 121 "Rambus XDR",
122 "Unbuffered DDR3 RAM", 122 "Unbuffered DDR3 RAM",
123 "Registered DDR3 RAM", 123 "Registered DDR3 RAM",
124 }; 124 };
125 EXPORT_SYMBOL_GPL(edac_mem_types); 125 EXPORT_SYMBOL_GPL(edac_mem_types);
126 126
127 /** 127 /**
128 * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation 128 * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
129 * @p: pointer to a pointer with the memory offset to be used. At 129 * @p: pointer to a pointer with the memory offset to be used. At
130 * return, this will be incremented to point to the next offset 130 * return, this will be incremented to point to the next offset
131 * @size: Size of the data structure to be reserved 131 * @size: Size of the data structure to be reserved
132 * @n_elems: Number of elements that should be reserved 132 * @n_elems: Number of elements that should be reserved
133 * 133 *
134 * If 'size' is a constant, the compiler will optimize this whole function 134 * If 'size' is a constant, the compiler will optimize this whole function
135 * down to either a no-op or the addition of a constant to the value of '*p'. 135 * down to either a no-op or the addition of a constant to the value of '*p'.
136 * 136 *
137 * The 'p' pointer is absolutely needed to keep the proper advancing 137 * The 'p' pointer is absolutely needed to keep the proper advancing
138 * further in memory to the proper offsets when allocating the struct along 138 * further in memory to the proper offsets when allocating the struct along
139 * with its embedded structs, as edac_device_alloc_ctl_info() does it 139 * with its embedded structs, as edac_device_alloc_ctl_info() does it
140 * above, for example. 140 * above, for example.
141 * 141 *
142 * At return, the pointer 'p' will be incremented to be used on a next call 142 * At return, the pointer 'p' will be incremented to be used on a next call
143 * to this function. 143 * to this function.
144 */ 144 */
145 void *edac_align_ptr(void **p, unsigned size, int n_elems) 145 void *edac_align_ptr(void **p, unsigned size, int n_elems)
146 { 146 {
147 unsigned align, r; 147 unsigned align, r;
148 void *ptr = *p; 148 void *ptr = *p;
149 149
150 *p += size * n_elems; 150 *p += size * n_elems;
151 151
152 /* 152 /*
153 * 'p' can possibly be an unaligned item X such that sizeof(X) is 153 * 'p' can possibly be an unaligned item X such that sizeof(X) is
154 * 'size'. Adjust 'p' so that its alignment is at least as 154 * 'size'. Adjust 'p' so that its alignment is at least as
155 * stringent as what the compiler would provide for X and return 155 * stringent as what the compiler would provide for X and return
156 * the aligned result. 156 * the aligned result.
157 * Here we assume that the alignment of a "long long" is the most 157 * Here we assume that the alignment of a "long long" is the most
158 * stringent alignment that the compiler will ever provide by default. 158 * stringent alignment that the compiler will ever provide by default.
159 * As far as I know, this is a reasonable assumption. 159 * As far as I know, this is a reasonable assumption.
160 */ 160 */
161 if (size > sizeof(long)) 161 if (size > sizeof(long))
162 align = sizeof(long long); 162 align = sizeof(long long);
163 else if (size > sizeof(int)) 163 else if (size > sizeof(int))
164 align = sizeof(long); 164 align = sizeof(long);
165 else if (size > sizeof(short)) 165 else if (size > sizeof(short))
166 align = sizeof(int); 166 align = sizeof(int);
167 else if (size > sizeof(char)) 167 else if (size > sizeof(char))
168 align = sizeof(short); 168 align = sizeof(short);
169 else 169 else
170 return (char *)ptr; 170 return (char *)ptr;
171 171
172 r = size % align; 172 r = size % align;
173 173
174 if (r == 0) 174 if (r == 0)
175 return (char *)ptr; 175 return (char *)ptr;
176 176
177 *p += align - r; 177 *p += align - r;
178 178
179 return (void *)(((unsigned long)ptr) + align - r); 179 return (void *)(((unsigned long)ptr) + align - r);
180 } 180 }
181 181
182 /** 182 /**
183 * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure 183 * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
184 * @mc_num: Memory controller number 184 * @mc_num: Memory controller number
185 * @n_layers: Number of MC hierarchy layers 185 * @n_layers: Number of MC hierarchy layers
186 * layers: Describes each layer as seen by the Memory Controller 186 * layers: Describes each layer as seen by the Memory Controller
187 * @size_pvt: size of private storage needed 187 * @size_pvt: size of private storage needed
188 * 188 *
189 * 189 *
190 * Everything is kmalloc'ed as one big chunk - more efficient. 190 * Everything is kmalloc'ed as one big chunk - more efficient.
191 * Only can be used if all structures have the same lifetime - otherwise 191 * Only can be used if all structures have the same lifetime - otherwise
192 * you have to allocate and initialize your own structures. 192 * you have to allocate and initialize your own structures.
193 * 193 *
194 * Use edac_mc_free() to free mc structures allocated by this function. 194 * Use edac_mc_free() to free mc structures allocated by this function.
195 * 195 *
196 * NOTE: drivers handle multi-rank memories in different ways: in some 196 * NOTE: drivers handle multi-rank memories in different ways: in some
197 * drivers, one multi-rank memory stick is mapped as one entry, while, in 197 * drivers, one multi-rank memory stick is mapped as one entry, while, in
198 * others, a single multi-rank memory stick would be mapped into several 198 * others, a single multi-rank memory stick would be mapped into several
199 * entries. Currently, this function will allocate multiple struct dimm_info 199 * entries. Currently, this function will allocate multiple struct dimm_info
200 * on such scenarios, as grouping the multiple ranks require drivers change. 200 * on such scenarios, as grouping the multiple ranks require drivers change.
201 * 201 *
202 * Returns: 202 * Returns:
203 * On failure: NULL 203 * On failure: NULL
204 * On success: struct mem_ctl_info pointer 204 * On success: struct mem_ctl_info pointer
205 */ 205 */
206 struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, 206 struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
207 unsigned n_layers, 207 unsigned n_layers,
208 struct edac_mc_layer *layers, 208 struct edac_mc_layer *layers,
209 unsigned sz_pvt) 209 unsigned sz_pvt)
210 { 210 {
211 struct mem_ctl_info *mci; 211 struct mem_ctl_info *mci;
212 struct edac_mc_layer *layer; 212 struct edac_mc_layer *layer;
213 struct csrow_info *csi, *csr; 213 struct csrow_info *csr;
214 struct rank_info *chi, *chp, *chan; 214 struct rank_info *chan;
215 struct dimm_info *dimm; 215 struct dimm_info *dimm;
216 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS]; 216 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
217 unsigned pos[EDAC_MAX_LAYERS]; 217 unsigned pos[EDAC_MAX_LAYERS];
218 unsigned size, tot_dimms = 1, count = 1; 218 unsigned size, tot_dimms = 1, count = 1;
219 unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0; 219 unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
220 void *pvt, *p, *ptr = NULL; 220 void *pvt, *p, *ptr = NULL;
221 int i, j, row, chn, n, len; 221 int i, j, row, chn, n, len, off;
222 bool per_rank = false; 222 bool per_rank = false;
223 223
224 BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0); 224 BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
225 /* 225 /*
226 * Calculate the total amount of dimms and csrows/cschannels while 226 * Calculate the total amount of dimms and csrows/cschannels while
227 * in the old API emulation mode 227 * in the old API emulation mode
228 */ 228 */
229 for (i = 0; i < n_layers; i++) { 229 for (i = 0; i < n_layers; i++) {
230 tot_dimms *= layers[i].size; 230 tot_dimms *= layers[i].size;
231 if (layers[i].is_virt_csrow) 231 if (layers[i].is_virt_csrow)
232 tot_csrows *= layers[i].size; 232 tot_csrows *= layers[i].size;
233 else 233 else
234 tot_channels *= layers[i].size; 234 tot_channels *= layers[i].size;
235 235
236 if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT) 236 if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
237 per_rank = true; 237 per_rank = true;
238 } 238 }
239 239
240 /* Figure out the offsets of the various items from the start of an mc 240 /* Figure out the offsets of the various items from the start of an mc
241 * structure. We want the alignment of each item to be at least as 241 * structure. We want the alignment of each item to be at least as
242 * stringent as what the compiler would provide if we could simply 242 * stringent as what the compiler would provide if we could simply
243 * hardcode everything into a single struct. 243 * hardcode everything into a single struct.
244 */ 244 */
245 mci = edac_align_ptr(&ptr, sizeof(*mci), 1); 245 mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
246 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers); 246 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
247 csi = edac_align_ptr(&ptr, sizeof(*csi), tot_csrows);
248 chi = edac_align_ptr(&ptr, sizeof(*chi), tot_csrows * tot_channels);
249 dimm = edac_align_ptr(&ptr, sizeof(*dimm), tot_dimms);
250 for (i = 0; i < n_layers; i++) { 247 for (i = 0; i < n_layers; i++) {
251 count *= layers[i].size; 248 count *= layers[i].size;
252 debugf4("%s: errcount layer %d size %d\n", __func__, i, count); 249 debugf4("%s: errcount layer %d size %d\n", __func__, i, count);
253 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); 250 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
254 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); 251 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
255 tot_errcount += 2 * count; 252 tot_errcount += 2 * count;
256 } 253 }
257 254
258 debugf4("%s: allocating %d error counters\n", __func__, tot_errcount); 255 debugf4("%s: allocating %d error counters\n", __func__, tot_errcount);
259 pvt = edac_align_ptr(&ptr, sz_pvt, 1); 256 pvt = edac_align_ptr(&ptr, sz_pvt, 1);
260 size = ((unsigned long)pvt) + sz_pvt; 257 size = ((unsigned long)pvt) + sz_pvt;
261 258
262 debugf1("%s(): allocating %u bytes for mci data (%d %s, %d csrows/channels)\n", 259 debugf1("%s(): allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
263 __func__, size, 260 __func__, size,
264 tot_dimms, 261 tot_dimms,
265 per_rank ? "ranks" : "dimms", 262 per_rank ? "ranks" : "dimms",
266 tot_csrows * tot_channels); 263 tot_csrows * tot_channels);
264
267 mci = kzalloc(size, GFP_KERNEL); 265 mci = kzalloc(size, GFP_KERNEL);
268 if (mci == NULL) 266 if (mci == NULL)
269 return NULL; 267 return NULL;
270 268
271 /* Adjust pointers so they point within the memory we just allocated 269 /* Adjust pointers so they point within the memory we just allocated
272 * rather than an imaginary chunk of memory located at address 0. 270 * rather than an imaginary chunk of memory located at address 0.
273 */ 271 */
274 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer)); 272 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
275 csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
276 chi = (struct rank_info *)(((char *)mci) + ((unsigned long)chi));
277 dimm = (struct dimm_info *)(((char *)mci) + ((unsigned long)dimm));
278 for (i = 0; i < n_layers; i++) { 273 for (i = 0; i < n_layers; i++) {
279 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i])); 274 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
280 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i])); 275 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
281 } 276 }
282 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL; 277 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
283 278
284 /* setup index and various internal pointers */ 279 /* setup index and various internal pointers */
285 mci->mc_idx = mc_num; 280 mci->mc_idx = mc_num;
286 mci->csrows = csi;
287 mci->dimms = dimm;
288 mci->tot_dimms = tot_dimms; 281 mci->tot_dimms = tot_dimms;
289 mci->pvt_info = pvt; 282 mci->pvt_info = pvt;
290 mci->n_layers = n_layers; 283 mci->n_layers = n_layers;
291 mci->layers = layer; 284 mci->layers = layer;
292 memcpy(mci->layers, layers, sizeof(*layer) * n_layers); 285 memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
293 mci->nr_csrows = tot_csrows; 286 mci->nr_csrows = tot_csrows;
294 mci->num_cschannel = tot_channels; 287 mci->num_cschannel = tot_channels;
295 mci->mem_is_per_rank = per_rank; 288 mci->mem_is_per_rank = per_rank;
296 289
297 /* 290 /*
298 * Fill the csrow struct 291 * Alocate and fill the csrow/channels structs
299 */ 292 */
293 mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL);
294 if (!mci->csrows)
295 goto error;
300 for (row = 0; row < tot_csrows; row++) { 296 for (row = 0; row < tot_csrows; row++) {
301 csr = &csi[row]; 297 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
298 if (!csr)
299 goto error;
300 mci->csrows[row] = csr;
302 csr->csrow_idx = row; 301 csr->csrow_idx = row;
303 csr->mci = mci; 302 csr->mci = mci;
304 csr->nr_channels = tot_channels; 303 csr->nr_channels = tot_channels;
305 chp = &chi[row * tot_channels]; 304 csr->channels = kcalloc(sizeof(*csr->channels), tot_channels,
306 csr->channels = chp; 305 GFP_KERNEL);
306 if (!csr->channels)
307 goto error;
307 308
308 for (chn = 0; chn < tot_channels; chn++) { 309 for (chn = 0; chn < tot_channels; chn++) {
309 chan = &chp[chn]; 310 chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
311 if (!chan)
312 goto error;
313 csr->channels[chn] = chan;
310 chan->chan_idx = chn; 314 chan->chan_idx = chn;
311 chan->csrow = csr; 315 chan->csrow = csr;
312 } 316 }
313 } 317 }
314 318
315 /* 319 /*
316 * Fill the dimm struct 320 * Allocate and fill the dimm structs
317 */ 321 */
322 mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL);
323 if (!mci->dimms)
324 goto error;
325
318 memset(&pos, 0, sizeof(pos)); 326 memset(&pos, 0, sizeof(pos));
319 row = 0; 327 row = 0;
320 chn = 0; 328 chn = 0;
321 debugf4("%s: initializing %d %s\n", __func__, tot_dimms, 329 debugf4("%s: initializing %d %s\n", __func__, tot_dimms,
322 per_rank ? "ranks" : "dimms"); 330 per_rank ? "ranks" : "dimms");
323 for (i = 0; i < tot_dimms; i++) { 331 for (i = 0; i < tot_dimms; i++) {
324 chan = &csi[row].channels[chn]; 332 chan = mci->csrows[row]->channels[chn];
325 dimm = EDAC_DIMM_PTR(layer, mci->dimms, n_layers, 333 off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]);
326 pos[0], pos[1], pos[2]); 334 if (off < 0 || off >= tot_dimms) {
335 edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n");
336 goto error;
337 }
338
339 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
340 mci->dimms[off] = dimm;
327 dimm->mci = mci; 341 dimm->mci = mci;
328 342
329 debugf2("%s: %d: %s%zd (%d:%d:%d): row %d, chan %d\n", __func__, 343 debugf2("%s: %d: %s%i (%d:%d:%d): row %d, chan %d\n", __func__,
330 i, per_rank ? "rank" : "dimm", (dimm - mci->dimms), 344 i, per_rank ? "rank" : "dimm", off,
331 pos[0], pos[1], pos[2], row, chn); 345 pos[0], pos[1], pos[2], row, chn);
332 346
333 /* 347 /*
334 * Copy DIMM location and initialize it. 348 * Copy DIMM location and initialize it.
335 */ 349 */
336 len = sizeof(dimm->label); 350 len = sizeof(dimm->label);
337 p = dimm->label; 351 p = dimm->label;
338 n = snprintf(p, len, "mc#%u", mc_num); 352 n = snprintf(p, len, "mc#%u", mc_num);
339 p += n; 353 p += n;
340 len -= n; 354 len -= n;
341 for (j = 0; j < n_layers; j++) { 355 for (j = 0; j < n_layers; j++) {
342 n = snprintf(p, len, "%s#%u", 356 n = snprintf(p, len, "%s#%u",
343 edac_layer_name[layers[j].type], 357 edac_layer_name[layers[j].type],
344 pos[j]); 358 pos[j]);
345 p += n; 359 p += n;
346 len -= n; 360 len -= n;
347 dimm->location[j] = pos[j]; 361 dimm->location[j] = pos[j];
348 362
349 if (len <= 0) 363 if (len <= 0)
350 break; 364 break;
351 } 365 }
352 366
353 /* Link it to the csrows old API data */ 367 /* Link it to the csrows old API data */
354 chan->dimm = dimm; 368 chan->dimm = dimm;
355 dimm->csrow = row; 369 dimm->csrow = row;
356 dimm->cschannel = chn; 370 dimm->cschannel = chn;
357 371
358 /* Increment csrow location */ 372 /* Increment csrow location */
359 row++; 373 row++;
360 if (row == tot_csrows) { 374 if (row == tot_csrows) {
361 row = 0; 375 row = 0;
362 chn++; 376 chn++;
363 } 377 }
364 378
365 /* Increment dimm location */ 379 /* Increment dimm location */
366 for (j = n_layers - 1; j >= 0; j--) { 380 for (j = n_layers - 1; j >= 0; j--) {
367 pos[j]++; 381 pos[j]++;
368 if (pos[j] < layers[j].size) 382 if (pos[j] < layers[j].size)
369 break; 383 break;
370 pos[j] = 0; 384 pos[j] = 0;
371 } 385 }
372 } 386 }
373 387
374 mci->op_state = OP_ALLOC; 388 mci->op_state = OP_ALLOC;
375 389
376 /* at this point, the root kobj is valid, and in order to 390 /* at this point, the root kobj is valid, and in order to
377 * 'free' the object, then the function: 391 * 'free' the object, then the function:
378 * edac_mc_unregister_sysfs_main_kobj() must be called 392 * edac_mc_unregister_sysfs_main_kobj() must be called
379 * which will perform kobj unregistration and the actual free 393 * which will perform kobj unregistration and the actual free
380 * will occur during the kobject callback operation 394 * will occur during the kobject callback operation
381 */ 395 */
382 396
383 return mci; 397 return mci;
398
399 error:
400 if (mci->dimms) {
401 for (i = 0; i < tot_dimms; i++)
402 kfree(mci->dimms[i]);
403 kfree(mci->dimms);
404 }
405 if (mci->csrows) {
406 for (chn = 0; chn < tot_channels; chn++) {
407 csr = mci->csrows[chn];
408 if (csr) {
409 for (chn = 0; chn < tot_channels; chn++)
410 kfree(csr->channels[chn]);
411 kfree(csr);
412 }
413 kfree(mci->csrows[i]);
414 }
415 kfree(mci->csrows);
416 }
417 kfree(mci);
418
419 return NULL;
384 } 420 }
385 EXPORT_SYMBOL_GPL(edac_mc_alloc); 421 EXPORT_SYMBOL_GPL(edac_mc_alloc);
386 422
387 /** 423 /**
388 * edac_mc_free 424 * edac_mc_free
389 * 'Free' a previously allocated 'mci' structure 425 * 'Free' a previously allocated 'mci' structure
390 * @mci: pointer to a struct mem_ctl_info structure 426 * @mci: pointer to a struct mem_ctl_info structure
391 */ 427 */
392 void edac_mc_free(struct mem_ctl_info *mci) 428 void edac_mc_free(struct mem_ctl_info *mci)
393 { 429 {
394 debugf1("%s()\n", __func__); 430 debugf1("%s()\n", __func__);
395 431
432 /* the mci instance is freed here, when the sysfs object is dropped */
396 edac_unregister_sysfs(mci); 433 edac_unregister_sysfs(mci);
397
398 /* free the mci instance memory here */
399 kfree(mci);
400 } 434 }
401 EXPORT_SYMBOL_GPL(edac_mc_free); 435 EXPORT_SYMBOL_GPL(edac_mc_free);
402 436
403 437
404 /** 438 /**
405 * find_mci_by_dev 439 * find_mci_by_dev
406 * 440 *
407 * scan list of controllers looking for the one that manages 441 * scan list of controllers looking for the one that manages
408 * the 'dev' device 442 * the 'dev' device
409 * @dev: pointer to a struct device related with the MCI 443 * @dev: pointer to a struct device related with the MCI
410 */ 444 */
411 struct mem_ctl_info *find_mci_by_dev(struct device *dev) 445 struct mem_ctl_info *find_mci_by_dev(struct device *dev)
412 { 446 {
413 struct mem_ctl_info *mci; 447 struct mem_ctl_info *mci;
414 struct list_head *item; 448 struct list_head *item;
415 449
416 debugf3("%s()\n", __func__); 450 debugf3("%s()\n", __func__);
417 451
418 list_for_each(item, &mc_devices) { 452 list_for_each(item, &mc_devices) {
419 mci = list_entry(item, struct mem_ctl_info, link); 453 mci = list_entry(item, struct mem_ctl_info, link);
420 454
421 if (mci->pdev == dev) 455 if (mci->pdev == dev)
422 return mci; 456 return mci;
423 } 457 }
424 458
425 return NULL; 459 return NULL;
426 } 460 }
427 EXPORT_SYMBOL_GPL(find_mci_by_dev); 461 EXPORT_SYMBOL_GPL(find_mci_by_dev);
428 462
429 /* 463 /*
430 * handler for EDAC to check if NMI type handler has asserted interrupt 464 * handler for EDAC to check if NMI type handler has asserted interrupt
431 */ 465 */
432 static int edac_mc_assert_error_check_and_clear(void) 466 static int edac_mc_assert_error_check_and_clear(void)
433 { 467 {
434 int old_state; 468 int old_state;
435 469
436 if (edac_op_state == EDAC_OPSTATE_POLL) 470 if (edac_op_state == EDAC_OPSTATE_POLL)
437 return 1; 471 return 1;
438 472
439 old_state = edac_err_assert; 473 old_state = edac_err_assert;
440 edac_err_assert = 0; 474 edac_err_assert = 0;
441 475
442 return old_state; 476 return old_state;
443 } 477 }
444 478
445 /* 479 /*
446 * edac_mc_workq_function 480 * edac_mc_workq_function
447 * performs the operation scheduled by a workq request 481 * performs the operation scheduled by a workq request
448 */ 482 */
449 static void edac_mc_workq_function(struct work_struct *work_req) 483 static void edac_mc_workq_function(struct work_struct *work_req)
450 { 484 {
451 struct delayed_work *d_work = to_delayed_work(work_req); 485 struct delayed_work *d_work = to_delayed_work(work_req);
452 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work); 486 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
453 487
454 mutex_lock(&mem_ctls_mutex); 488 mutex_lock(&mem_ctls_mutex);
455 489
456 /* if this control struct has movd to offline state, we are done */ 490 /* if this control struct has movd to offline state, we are done */
457 if (mci->op_state == OP_OFFLINE) { 491 if (mci->op_state == OP_OFFLINE) {
458 mutex_unlock(&mem_ctls_mutex); 492 mutex_unlock(&mem_ctls_mutex);
459 return; 493 return;
460 } 494 }
461 495
462 /* Only poll controllers that are running polled and have a check */ 496 /* Only poll controllers that are running polled and have a check */
463 if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL)) 497 if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
464 mci->edac_check(mci); 498 mci->edac_check(mci);
465 499
466 mutex_unlock(&mem_ctls_mutex); 500 mutex_unlock(&mem_ctls_mutex);
467 501
468 /* Reschedule */ 502 /* Reschedule */
469 queue_delayed_work(edac_workqueue, &mci->work, 503 queue_delayed_work(edac_workqueue, &mci->work,
470 msecs_to_jiffies(edac_mc_get_poll_msec())); 504 msecs_to_jiffies(edac_mc_get_poll_msec()));
471 } 505 }
472 506
473 /* 507 /*
474 * edac_mc_workq_setup 508 * edac_mc_workq_setup
475 * initialize a workq item for this mci 509 * initialize a workq item for this mci
476 * passing in the new delay period in msec 510 * passing in the new delay period in msec
477 * 511 *
478 * locking model: 512 * locking model:
479 * 513 *
480 * called with the mem_ctls_mutex held 514 * called with the mem_ctls_mutex held
481 */ 515 */
482 static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) 516 static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
483 { 517 {
484 debugf0("%s()\n", __func__); 518 debugf0("%s()\n", __func__);
485 519
486 /* if this instance is not in the POLL state, then simply return */ 520 /* if this instance is not in the POLL state, then simply return */
487 if (mci->op_state != OP_RUNNING_POLL) 521 if (mci->op_state != OP_RUNNING_POLL)
488 return; 522 return;
489 523
490 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); 524 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
491 queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); 525 queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
492 } 526 }
493 527
494 /* 528 /*
495 * edac_mc_workq_teardown 529 * edac_mc_workq_teardown
496 * stop the workq processing on this mci 530 * stop the workq processing on this mci
497 * 531 *
498 * locking model: 532 * locking model:
499 * 533 *
500 * called WITHOUT lock held 534 * called WITHOUT lock held
501 */ 535 */
502 static void edac_mc_workq_teardown(struct mem_ctl_info *mci) 536 static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
503 { 537 {
504 int status; 538 int status;
505 539
506 if (mci->op_state != OP_RUNNING_POLL) 540 if (mci->op_state != OP_RUNNING_POLL)
507 return; 541 return;
508 542
509 status = cancel_delayed_work(&mci->work); 543 status = cancel_delayed_work(&mci->work);
510 if (status == 0) { 544 if (status == 0) {
511 debugf0("%s() not canceled, flush the queue\n", 545 debugf0("%s() not canceled, flush the queue\n",
512 __func__); 546 __func__);
513 547
514 /* workq instance might be running, wait for it */ 548 /* workq instance might be running, wait for it */
515 flush_workqueue(edac_workqueue); 549 flush_workqueue(edac_workqueue);
516 } 550 }
517 } 551 }
518 552
519 /* 553 /*
520 * edac_mc_reset_delay_period(unsigned long value) 554 * edac_mc_reset_delay_period(unsigned long value)
521 * 555 *
522 * user space has updated our poll period value, need to 556 * user space has updated our poll period value, need to
523 * reset our workq delays 557 * reset our workq delays
524 */ 558 */
525 void edac_mc_reset_delay_period(int value) 559 void edac_mc_reset_delay_period(int value)
526 { 560 {
527 struct mem_ctl_info *mci; 561 struct mem_ctl_info *mci;
528 struct list_head *item; 562 struct list_head *item;
529 563
530 mutex_lock(&mem_ctls_mutex); 564 mutex_lock(&mem_ctls_mutex);
531 565
532 /* scan the list and turn off all workq timers, doing so under lock 566 /* scan the list and turn off all workq timers, doing so under lock
533 */ 567 */
534 list_for_each(item, &mc_devices) { 568 list_for_each(item, &mc_devices) {
535 mci = list_entry(item, struct mem_ctl_info, link); 569 mci = list_entry(item, struct mem_ctl_info, link);
536 570
537 if (mci->op_state == OP_RUNNING_POLL) 571 if (mci->op_state == OP_RUNNING_POLL)
538 cancel_delayed_work(&mci->work); 572 cancel_delayed_work(&mci->work);
539 } 573 }
540 574
541 mutex_unlock(&mem_ctls_mutex); 575 mutex_unlock(&mem_ctls_mutex);
542 576
543 577
544 /* re-walk the list, and reset the poll delay */ 578 /* re-walk the list, and reset the poll delay */
545 mutex_lock(&mem_ctls_mutex); 579 mutex_lock(&mem_ctls_mutex);
546 580
547 list_for_each(item, &mc_devices) { 581 list_for_each(item, &mc_devices) {
548 mci = list_entry(item, struct mem_ctl_info, link); 582 mci = list_entry(item, struct mem_ctl_info, link);
549 583
550 edac_mc_workq_setup(mci, (unsigned long) value); 584 edac_mc_workq_setup(mci, (unsigned long) value);
551 } 585 }
552 586
553 mutex_unlock(&mem_ctls_mutex); 587 mutex_unlock(&mem_ctls_mutex);
554 } 588 }
555 589
556 590
557 591
558 /* Return 0 on success, 1 on failure. 592 /* Return 0 on success, 1 on failure.
559 * Before calling this function, caller must 593 * Before calling this function, caller must
560 * assign a unique value to mci->mc_idx. 594 * assign a unique value to mci->mc_idx.
561 * 595 *
562 * locking model: 596 * locking model:
563 * 597 *
564 * called with the mem_ctls_mutex lock held 598 * called with the mem_ctls_mutex lock held
565 */ 599 */
566 static int add_mc_to_global_list(struct mem_ctl_info *mci) 600 static int add_mc_to_global_list(struct mem_ctl_info *mci)
567 { 601 {
568 struct list_head *item, *insert_before; 602 struct list_head *item, *insert_before;
569 struct mem_ctl_info *p; 603 struct mem_ctl_info *p;
570 604
571 insert_before = &mc_devices; 605 insert_before = &mc_devices;
572 606
573 p = find_mci_by_dev(mci->pdev); 607 p = find_mci_by_dev(mci->pdev);
574 if (unlikely(p != NULL)) 608 if (unlikely(p != NULL))
575 goto fail0; 609 goto fail0;
576 610
577 list_for_each(item, &mc_devices) { 611 list_for_each(item, &mc_devices) {
578 p = list_entry(item, struct mem_ctl_info, link); 612 p = list_entry(item, struct mem_ctl_info, link);
579 613
580 if (p->mc_idx >= mci->mc_idx) { 614 if (p->mc_idx >= mci->mc_idx) {
581 if (unlikely(p->mc_idx == mci->mc_idx)) 615 if (unlikely(p->mc_idx == mci->mc_idx))
582 goto fail1; 616 goto fail1;
583 617
584 insert_before = item; 618 insert_before = item;
585 break; 619 break;
586 } 620 }
587 } 621 }
588 622
589 list_add_tail_rcu(&mci->link, insert_before); 623 list_add_tail_rcu(&mci->link, insert_before);
590 atomic_inc(&edac_handlers); 624 atomic_inc(&edac_handlers);
591 return 0; 625 return 0;
592 626
593 fail0: 627 fail0:
594 edac_printk(KERN_WARNING, EDAC_MC, 628 edac_printk(KERN_WARNING, EDAC_MC,
595 "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev), 629 "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
596 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx); 630 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
597 return 1; 631 return 1;
598 632
599 fail1: 633 fail1:
600 edac_printk(KERN_WARNING, EDAC_MC, 634 edac_printk(KERN_WARNING, EDAC_MC,
601 "bug in low-level driver: attempt to assign\n" 635 "bug in low-level driver: attempt to assign\n"
602 " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__); 636 " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
603 return 1; 637 return 1;
604 } 638 }
605 639
606 static void del_mc_from_global_list(struct mem_ctl_info *mci) 640 static void del_mc_from_global_list(struct mem_ctl_info *mci)
607 { 641 {
608 atomic_dec(&edac_handlers); 642 atomic_dec(&edac_handlers);
609 list_del_rcu(&mci->link); 643 list_del_rcu(&mci->link);
610 644
611 /* these are for safe removal of devices from global list while 645 /* these are for safe removal of devices from global list while
612 * NMI handlers may be traversing list 646 * NMI handlers may be traversing list
613 */ 647 */
614 synchronize_rcu(); 648 synchronize_rcu();
615 INIT_LIST_HEAD(&mci->link); 649 INIT_LIST_HEAD(&mci->link);
616 } 650 }
617 651
618 /** 652 /**
619 * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'. 653 * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
620 * 654 *
621 * If found, return a pointer to the structure. 655 * If found, return a pointer to the structure.
622 * Else return NULL. 656 * Else return NULL.
623 * 657 *
624 * Caller must hold mem_ctls_mutex. 658 * Caller must hold mem_ctls_mutex.
625 */ 659 */
626 struct mem_ctl_info *edac_mc_find(int idx) 660 struct mem_ctl_info *edac_mc_find(int idx)
627 { 661 {
628 struct list_head *item; 662 struct list_head *item;
629 struct mem_ctl_info *mci; 663 struct mem_ctl_info *mci;
630 664
631 list_for_each(item, &mc_devices) { 665 list_for_each(item, &mc_devices) {
632 mci = list_entry(item, struct mem_ctl_info, link); 666 mci = list_entry(item, struct mem_ctl_info, link);
633 667
634 if (mci->mc_idx >= idx) { 668 if (mci->mc_idx >= idx) {
635 if (mci->mc_idx == idx) 669 if (mci->mc_idx == idx)
636 return mci; 670 return mci;
637 671
638 break; 672 break;
639 } 673 }
640 } 674 }
641 675
642 return NULL; 676 return NULL;
643 } 677 }
644 EXPORT_SYMBOL(edac_mc_find); 678 EXPORT_SYMBOL(edac_mc_find);
645 679
646 /** 680 /**
647 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and 681 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
648 * create sysfs entries associated with mci structure 682 * create sysfs entries associated with mci structure
649 * @mci: pointer to the mci structure to be added to the list 683 * @mci: pointer to the mci structure to be added to the list
650 * 684 *
651 * Return: 685 * Return:
652 * 0 Success 686 * 0 Success
653 * !0 Failure 687 * !0 Failure
654 */ 688 */
655 689
656 /* FIXME - should a warning be printed if no error detection? correction? */ 690 /* FIXME - should a warning be printed if no error detection? correction? */
657 int edac_mc_add_mc(struct mem_ctl_info *mci) 691 int edac_mc_add_mc(struct mem_ctl_info *mci)
658 { 692 {
659 debugf0("%s()\n", __func__); 693 debugf0("%s()\n", __func__);
660 694
661 #ifdef CONFIG_EDAC_DEBUG 695 #ifdef CONFIG_EDAC_DEBUG
662 if (edac_debug_level >= 3) 696 if (edac_debug_level >= 3)
663 edac_mc_dump_mci(mci); 697 edac_mc_dump_mci(mci);
664 698
665 if (edac_debug_level >= 4) { 699 if (edac_debug_level >= 4) {
666 int i; 700 int i;
667 701
668 for (i = 0; i < mci->nr_csrows; i++) { 702 for (i = 0; i < mci->nr_csrows; i++) {
669 int j; 703 int j;
670 704
671 edac_mc_dump_csrow(&mci->csrows[i]); 705 edac_mc_dump_csrow(mci->csrows[i]);
672 for (j = 0; j < mci->csrows[i].nr_channels; j++) 706 for (j = 0; j < mci->csrows[i]->nr_channels; j++)
673 edac_mc_dump_channel(&mci->csrows[i]. 707 edac_mc_dump_channel(mci->csrows[i]->channels[j]);
674 channels[j]);
675 } 708 }
676 for (i = 0; i < mci->tot_dimms; i++) 709 for (i = 0; i < mci->tot_dimms; i++)
677 edac_mc_dump_dimm(&mci->dimms[i]); 710 edac_mc_dump_dimm(mci->dimms[i]);
678 } 711 }
679 #endif 712 #endif
680 mutex_lock(&mem_ctls_mutex); 713 mutex_lock(&mem_ctls_mutex);
681 714
682 if (add_mc_to_global_list(mci)) 715 if (add_mc_to_global_list(mci))
683 goto fail0; 716 goto fail0;
684 717
685 /* set load time so that error rate can be tracked */ 718 /* set load time so that error rate can be tracked */
686 mci->start_time = jiffies; 719 mci->start_time = jiffies;
687 720
688 if (edac_create_sysfs_mci_device(mci)) { 721 if (edac_create_sysfs_mci_device(mci)) {
689 edac_mc_printk(mci, KERN_WARNING, 722 edac_mc_printk(mci, KERN_WARNING,
690 "failed to create sysfs device\n"); 723 "failed to create sysfs device\n");
691 goto fail1; 724 goto fail1;
692 } 725 }
693 726
694 /* If there IS a check routine, then we are running POLLED */ 727 /* If there IS a check routine, then we are running POLLED */
695 if (mci->edac_check != NULL) { 728 if (mci->edac_check != NULL) {
696 /* This instance is NOW RUNNING */ 729 /* This instance is NOW RUNNING */
697 mci->op_state = OP_RUNNING_POLL; 730 mci->op_state = OP_RUNNING_POLL;
698 731
699 edac_mc_workq_setup(mci, edac_mc_get_poll_msec()); 732 edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
700 } else { 733 } else {
701 mci->op_state = OP_RUNNING_INTERRUPT; 734 mci->op_state = OP_RUNNING_INTERRUPT;
702 } 735 }
703 736
704 /* Report action taken */ 737 /* Report action taken */
705 edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':" 738 edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
706 " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci)); 739 " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
707 740
708 mutex_unlock(&mem_ctls_mutex); 741 mutex_unlock(&mem_ctls_mutex);
709 return 0; 742 return 0;
710 743
711 fail1: 744 fail1:
712 del_mc_from_global_list(mci); 745 del_mc_from_global_list(mci);
713 746
714 fail0: 747 fail0:
715 mutex_unlock(&mem_ctls_mutex); 748 mutex_unlock(&mem_ctls_mutex);
716 return 1; 749 return 1;
717 } 750 }
718 EXPORT_SYMBOL_GPL(edac_mc_add_mc); 751 EXPORT_SYMBOL_GPL(edac_mc_add_mc);
719 752
720 /** 753 /**
721 * edac_mc_del_mc: Remove sysfs entries for specified mci structure and 754 * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
722 * remove mci structure from global list 755 * remove mci structure from global list
723 * @pdev: Pointer to 'struct device' representing mci structure to remove. 756 * @pdev: Pointer to 'struct device' representing mci structure to remove.
724 * 757 *
725 * Return pointer to removed mci structure, or NULL if device not found. 758 * Return pointer to removed mci structure, or NULL if device not found.
726 */ 759 */
727 struct mem_ctl_info *edac_mc_del_mc(struct device *dev) 760 struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
728 { 761 {
729 struct mem_ctl_info *mci; 762 struct mem_ctl_info *mci;
730 763
731 debugf0("%s()\n", __func__); 764 debugf0("%s()\n", __func__);
732 765
733 mutex_lock(&mem_ctls_mutex); 766 mutex_lock(&mem_ctls_mutex);
734 767
735 /* find the requested mci struct in the global list */ 768 /* find the requested mci struct in the global list */
736 mci = find_mci_by_dev(dev); 769 mci = find_mci_by_dev(dev);
737 if (mci == NULL) { 770 if (mci == NULL) {
738 mutex_unlock(&mem_ctls_mutex); 771 mutex_unlock(&mem_ctls_mutex);
739 return NULL; 772 return NULL;
740 } 773 }
741 774
742 del_mc_from_global_list(mci); 775 del_mc_from_global_list(mci);
743 mutex_unlock(&mem_ctls_mutex); 776 mutex_unlock(&mem_ctls_mutex);
744 777
745 /* flush workq processes */ 778 /* flush workq processes */
746 edac_mc_workq_teardown(mci); 779 edac_mc_workq_teardown(mci);
747 780
748 /* marking MCI offline */ 781 /* marking MCI offline */
749 mci->op_state = OP_OFFLINE; 782 mci->op_state = OP_OFFLINE;
750 783
751 /* remove from sysfs */ 784 /* remove from sysfs */
752 edac_remove_sysfs_mci_device(mci); 785 edac_remove_sysfs_mci_device(mci);
753 786
754 edac_printk(KERN_INFO, EDAC_MC, 787 edac_printk(KERN_INFO, EDAC_MC,
755 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx, 788 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
756 mci->mod_name, mci->ctl_name, edac_dev_name(mci)); 789 mci->mod_name, mci->ctl_name, edac_dev_name(mci));
757 790
758 return mci; 791 return mci;
759 } 792 }
760 EXPORT_SYMBOL_GPL(edac_mc_del_mc); 793 EXPORT_SYMBOL_GPL(edac_mc_del_mc);
761 794
762 static void edac_mc_scrub_block(unsigned long page, unsigned long offset, 795 static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
763 u32 size) 796 u32 size)
764 { 797 {
765 struct page *pg; 798 struct page *pg;
766 void *virt_addr; 799 void *virt_addr;
767 unsigned long flags = 0; 800 unsigned long flags = 0;
768 801
769 debugf3("%s()\n", __func__); 802 debugf3("%s()\n", __func__);
770 803
771 /* ECC error page was not in our memory. Ignore it. */ 804 /* ECC error page was not in our memory. Ignore it. */
772 if (!pfn_valid(page)) 805 if (!pfn_valid(page))
773 return; 806 return;
774 807
775 /* Find the actual page structure then map it and fix */ 808 /* Find the actual page structure then map it and fix */
776 pg = pfn_to_page(page); 809 pg = pfn_to_page(page);
777 810
778 if (PageHighMem(pg)) 811 if (PageHighMem(pg))
779 local_irq_save(flags); 812 local_irq_save(flags);
780 813
781 virt_addr = kmap_atomic(pg); 814 virt_addr = kmap_atomic(pg);
782 815
783 /* Perform architecture specific atomic scrub operation */ 816 /* Perform architecture specific atomic scrub operation */
784 atomic_scrub(virt_addr + offset, size); 817 atomic_scrub(virt_addr + offset, size);
785 818
786 /* Unmap and complete */ 819 /* Unmap and complete */
787 kunmap_atomic(virt_addr); 820 kunmap_atomic(virt_addr);
788 821
789 if (PageHighMem(pg)) 822 if (PageHighMem(pg))
790 local_irq_restore(flags); 823 local_irq_restore(flags);
791 } 824 }
792 825
793 /* FIXME - should return -1 */ 826 /* FIXME - should return -1 */
794 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) 827 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
795 { 828 {
796 struct csrow_info *csrows = mci->csrows; 829 struct csrow_info **csrows = mci->csrows;
797 int row, i, j, n; 830 int row, i, j, n;
798 831
799 debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page); 832 debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
800 row = -1; 833 row = -1;
801 834
802 for (i = 0; i < mci->nr_csrows; i++) { 835 for (i = 0; i < mci->nr_csrows; i++) {
803 struct csrow_info *csrow = &csrows[i]; 836 struct csrow_info *csrow = csrows[i];
804 n = 0; 837 n = 0;
805 for (j = 0; j < csrow->nr_channels; j++) { 838 for (j = 0; j < csrow->nr_channels; j++) {
806 struct dimm_info *dimm = csrow->channels[j].dimm; 839 struct dimm_info *dimm = csrow->channels[j]->dimm;
807 n += dimm->nr_pages; 840 n += dimm->nr_pages;
808 } 841 }
809 if (n == 0) 842 if (n == 0)
810 continue; 843 continue;
811 844
812 debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) " 845 debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
813 "mask(0x%lx)\n", mci->mc_idx, __func__, 846 "mask(0x%lx)\n", mci->mc_idx, __func__,
814 csrow->first_page, page, csrow->last_page, 847 csrow->first_page, page, csrow->last_page,
815 csrow->page_mask); 848 csrow->page_mask);
816 849
817 if ((page >= csrow->first_page) && 850 if ((page >= csrow->first_page) &&
818 (page <= csrow->last_page) && 851 (page <= csrow->last_page) &&
819 ((page & csrow->page_mask) == 852 ((page & csrow->page_mask) ==
820 (csrow->first_page & csrow->page_mask))) { 853 (csrow->first_page & csrow->page_mask))) {
821 row = i; 854 row = i;
822 break; 855 break;
823 } 856 }
824 } 857 }
825 858
826 if (row == -1) 859 if (row == -1)
827 edac_mc_printk(mci, KERN_ERR, 860 edac_mc_printk(mci, KERN_ERR,
828 "could not look up page error address %lx\n", 861 "could not look up page error address %lx\n",
829 (unsigned long)page); 862 (unsigned long)page);
830 863
831 return row; 864 return row;
832 } 865 }
833 EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page); 866 EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
834 867
835 const char *edac_layer_name[] = { 868 const char *edac_layer_name[] = {
836 [EDAC_MC_LAYER_BRANCH] = "branch", 869 [EDAC_MC_LAYER_BRANCH] = "branch",
837 [EDAC_MC_LAYER_CHANNEL] = "channel", 870 [EDAC_MC_LAYER_CHANNEL] = "channel",
838 [EDAC_MC_LAYER_SLOT] = "slot", 871 [EDAC_MC_LAYER_SLOT] = "slot",
839 [EDAC_MC_LAYER_CHIP_SELECT] = "csrow", 872 [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
840 }; 873 };
841 EXPORT_SYMBOL_GPL(edac_layer_name); 874 EXPORT_SYMBOL_GPL(edac_layer_name);
842 875
843 static void edac_inc_ce_error(struct mem_ctl_info *mci, 876 static void edac_inc_ce_error(struct mem_ctl_info *mci,
844 bool enable_per_layer_report, 877 bool enable_per_layer_report,
845 const int pos[EDAC_MAX_LAYERS]) 878 const int pos[EDAC_MAX_LAYERS])
846 { 879 {
847 int i, index = 0; 880 int i, index = 0;
848 881
849 mci->ce_mc++; 882 mci->ce_mc++;
850 883
851 if (!enable_per_layer_report) { 884 if (!enable_per_layer_report) {
852 mci->ce_noinfo_count++; 885 mci->ce_noinfo_count++;
853 return; 886 return;
854 } 887 }
855 888
856 for (i = 0; i < mci->n_layers; i++) { 889 for (i = 0; i < mci->n_layers; i++) {
857 if (pos[i] < 0) 890 if (pos[i] < 0)
858 break; 891 break;
859 index += pos[i]; 892 index += pos[i];
860 mci->ce_per_layer[i][index]++; 893 mci->ce_per_layer[i][index]++;
861 894
862 if (i < mci->n_layers - 1) 895 if (i < mci->n_layers - 1)
863 index *= mci->layers[i + 1].size; 896 index *= mci->layers[i + 1].size;
864 } 897 }
865 } 898 }
866 899
867 static void edac_inc_ue_error(struct mem_ctl_info *mci, 900 static void edac_inc_ue_error(struct mem_ctl_info *mci,
868 bool enable_per_layer_report, 901 bool enable_per_layer_report,
869 const int pos[EDAC_MAX_LAYERS]) 902 const int pos[EDAC_MAX_LAYERS])
870 { 903 {
871 int i, index = 0; 904 int i, index = 0;
872 905
873 mci->ue_mc++; 906 mci->ue_mc++;
874 907
875 if (!enable_per_layer_report) { 908 if (!enable_per_layer_report) {
876 mci->ce_noinfo_count++; 909 mci->ce_noinfo_count++;
877 return; 910 return;
878 } 911 }
879 912
880 for (i = 0; i < mci->n_layers; i++) { 913 for (i = 0; i < mci->n_layers; i++) {
881 if (pos[i] < 0) 914 if (pos[i] < 0)
882 break; 915 break;
883 index += pos[i]; 916 index += pos[i];
884 mci->ue_per_layer[i][index]++; 917 mci->ue_per_layer[i][index]++;
885 918
886 if (i < mci->n_layers - 1) 919 if (i < mci->n_layers - 1)
887 index *= mci->layers[i + 1].size; 920 index *= mci->layers[i + 1].size;
888 } 921 }
889 } 922 }
890 923
891 static void edac_ce_error(struct mem_ctl_info *mci, 924 static void edac_ce_error(struct mem_ctl_info *mci,
892 const int pos[EDAC_MAX_LAYERS], 925 const int pos[EDAC_MAX_LAYERS],
893 const char *msg, 926 const char *msg,
894 const char *location, 927 const char *location,
895 const char *label, 928 const char *label,
896 const char *detail, 929 const char *detail,
897 const char *other_detail, 930 const char *other_detail,
898 const bool enable_per_layer_report, 931 const bool enable_per_layer_report,
899 const unsigned long page_frame_number, 932 const unsigned long page_frame_number,
900 const unsigned long offset_in_page, 933 const unsigned long offset_in_page,
901 long grain) 934 long grain)
902 { 935 {
903 unsigned long remapped_page; 936 unsigned long remapped_page;
904 937
905 if (edac_mc_get_log_ce()) { 938 if (edac_mc_get_log_ce()) {
906 if (other_detail && *other_detail) 939 if (other_detail && *other_detail)
907 edac_mc_printk(mci, KERN_WARNING, 940 edac_mc_printk(mci, KERN_WARNING,
908 "CE %s on %s (%s %s - %s)\n", 941 "CE %s on %s (%s %s - %s)\n",
909 msg, label, location, 942 msg, label, location,
910 detail, other_detail); 943 detail, other_detail);
911 else 944 else
912 edac_mc_printk(mci, KERN_WARNING, 945 edac_mc_printk(mci, KERN_WARNING,
913 "CE %s on %s (%s %s)\n", 946 "CE %s on %s (%s %s)\n",
914 msg, label, location, 947 msg, label, location,
915 detail); 948 detail);
916 } 949 }
917 edac_inc_ce_error(mci, enable_per_layer_report, pos); 950 edac_inc_ce_error(mci, enable_per_layer_report, pos);
918 951
919 if (mci->scrub_mode & SCRUB_SW_SRC) { 952 if (mci->scrub_mode & SCRUB_SW_SRC) {
920 /* 953 /*
921 * Some memory controllers (called MCs below) can remap 954 * Some memory controllers (called MCs below) can remap
922 * memory so that it is still available at a different 955 * memory so that it is still available at a different
923 * address when PCI devices map into memory. 956 * address when PCI devices map into memory.
924 * MC's that can't do this, lose the memory where PCI 957 * MC's that can't do this, lose the memory where PCI
925 * devices are mapped. This mapping is MC-dependent 958 * devices are mapped. This mapping is MC-dependent
926 * and so we call back into the MC driver for it to 959 * and so we call back into the MC driver for it to
927 * map the MC page to a physical (CPU) page which can 960 * map the MC page to a physical (CPU) page which can
928 * then be mapped to a virtual page - which can then 961 * then be mapped to a virtual page - which can then
929 * be scrubbed. 962 * be scrubbed.
930 */ 963 */
931 remapped_page = mci->ctl_page_to_phys ? 964 remapped_page = mci->ctl_page_to_phys ?
932 mci->ctl_page_to_phys(mci, page_frame_number) : 965 mci->ctl_page_to_phys(mci, page_frame_number) :
933 page_frame_number; 966 page_frame_number;
934 967
935 edac_mc_scrub_block(remapped_page, 968 edac_mc_scrub_block(remapped_page,
936 offset_in_page, grain); 969 offset_in_page, grain);
937 } 970 }
938 } 971 }
939 972
940 static void edac_ue_error(struct mem_ctl_info *mci, 973 static void edac_ue_error(struct mem_ctl_info *mci,
941 const int pos[EDAC_MAX_LAYERS], 974 const int pos[EDAC_MAX_LAYERS],
942 const char *msg, 975 const char *msg,
943 const char *location, 976 const char *location,
944 const char *label, 977 const char *label,
945 const char *detail, 978 const char *detail,
946 const char *other_detail, 979 const char *other_detail,
947 const bool enable_per_layer_report) 980 const bool enable_per_layer_report)
948 { 981 {
949 if (edac_mc_get_log_ue()) { 982 if (edac_mc_get_log_ue()) {
950 if (other_detail && *other_detail) 983 if (other_detail && *other_detail)
951 edac_mc_printk(mci, KERN_WARNING, 984 edac_mc_printk(mci, KERN_WARNING,
952 "UE %s on %s (%s %s - %s)\n", 985 "UE %s on %s (%s %s - %s)\n",
953 msg, label, location, detail, 986 msg, label, location, detail,
954 other_detail); 987 other_detail);
955 else 988 else
956 edac_mc_printk(mci, KERN_WARNING, 989 edac_mc_printk(mci, KERN_WARNING,
957 "UE %s on %s (%s %s)\n", 990 "UE %s on %s (%s %s)\n",
958 msg, label, location, detail); 991 msg, label, location, detail);
959 } 992 }
960 993
961 if (edac_mc_get_panic_on_ue()) { 994 if (edac_mc_get_panic_on_ue()) {
962 if (other_detail && *other_detail) 995 if (other_detail && *other_detail)
963 panic("UE %s on %s (%s%s - %s)\n", 996 panic("UE %s on %s (%s%s - %s)\n",
964 msg, label, location, detail, other_detail); 997 msg, label, location, detail, other_detail);
965 else 998 else
966 panic("UE %s on %s (%s%s)\n", 999 panic("UE %s on %s (%s%s)\n",
967 msg, label, location, detail); 1000 msg, label, location, detail);
968 } 1001 }
969 1002
970 edac_inc_ue_error(mci, enable_per_layer_report, pos); 1003 edac_inc_ue_error(mci, enable_per_layer_report, pos);
971 } 1004 }
972 1005
973 #define OTHER_LABEL " or " 1006 #define OTHER_LABEL " or "
974 1007
975 /** 1008 /**
976 * edac_mc_handle_error - reports a memory event to userspace 1009 * edac_mc_handle_error - reports a memory event to userspace
977 * 1010 *
978 * @type: severity of the error (CE/UE/Fatal) 1011 * @type: severity of the error (CE/UE/Fatal)
979 * @mci: a struct mem_ctl_info pointer 1012 * @mci: a struct mem_ctl_info pointer
980 * @page_frame_number: mem page where the error occurred 1013 * @page_frame_number: mem page where the error occurred
981 * @offset_in_page: offset of the error inside the page 1014 * @offset_in_page: offset of the error inside the page
982 * @syndrome: ECC syndrome 1015 * @syndrome: ECC syndrome
983 * @top_layer: Memory layer[0] position 1016 * @top_layer: Memory layer[0] position
984 * @mid_layer: Memory layer[1] position 1017 * @mid_layer: Memory layer[1] position
985 * @low_layer: Memory layer[2] position 1018 * @low_layer: Memory layer[2] position
986 * @msg: Message meaningful to the end users that 1019 * @msg: Message meaningful to the end users that
987 * explains the event 1020 * explains the event
988 * @other_detail: Technical details about the event that 1021 * @other_detail: Technical details about the event that
989 * may help hardware manufacturers and 1022 * may help hardware manufacturers and
990 * EDAC developers to analyse the event 1023 * EDAC developers to analyse the event
991 * @arch_log: Architecture-specific struct that can 1024 * @arch_log: Architecture-specific struct that can
992 * be used to add extended information to the 1025 * be used to add extended information to the
993 * tracepoint, like dumping MCE registers. 1026 * tracepoint, like dumping MCE registers.
994 */ 1027 */
995 void edac_mc_handle_error(const enum hw_event_mc_err_type type, 1028 void edac_mc_handle_error(const enum hw_event_mc_err_type type,
996 struct mem_ctl_info *mci, 1029 struct mem_ctl_info *mci,
997 const unsigned long page_frame_number, 1030 const unsigned long page_frame_number,
998 const unsigned long offset_in_page, 1031 const unsigned long offset_in_page,
999 const unsigned long syndrome, 1032 const unsigned long syndrome,
1000 const int top_layer, 1033 const int top_layer,
1001 const int mid_layer, 1034 const int mid_layer,
1002 const int low_layer, 1035 const int low_layer,
1003 const char *msg, 1036 const char *msg,
1004 const char *other_detail, 1037 const char *other_detail,
1005 const void *arch_log) 1038 const void *arch_log)
1006 { 1039 {
1007 /* FIXME: too much for stack: move it to some pre-alocated area */ 1040 /* FIXME: too much for stack: move it to some pre-alocated area */
1008 char detail[80], location[80]; 1041 char detail[80], location[80];
1009 char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms]; 1042 char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
1010 char *p; 1043 char *p;
1011 int row = -1, chan = -1; 1044 int row = -1, chan = -1;
1012 int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer }; 1045 int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
1013 int i; 1046 int i;
1014 long grain; 1047 long grain;
1015 bool enable_per_layer_report = false; 1048 bool enable_per_layer_report = false;
1016 u16 error_count; /* FIXME: make it a parameter */ 1049 u16 error_count; /* FIXME: make it a parameter */
1017 u8 grain_bits; 1050 u8 grain_bits;
1018 1051
1019 debugf3("MC%d: %s()\n", mci->mc_idx, __func__); 1052 debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
1020 1053
1021 /* 1054 /*
1022 * Check if the event report is consistent and if the memory 1055 * Check if the event report is consistent and if the memory
1023 * location is known. If it is known, enable_per_layer_report will be 1056 * location is known. If it is known, enable_per_layer_report will be
1024 * true, the DIMM(s) label info will be filled and the per-layer 1057 * true, the DIMM(s) label info will be filled and the per-layer
1025 * error counters will be incremented. 1058 * error counters will be incremented.
1026 */ 1059 */
1027 for (i = 0; i < mci->n_layers; i++) { 1060 for (i = 0; i < mci->n_layers; i++) {
1028 if (pos[i] >= (int)mci->layers[i].size) { 1061 if (pos[i] >= (int)mci->layers[i].size) {
1029 if (type == HW_EVENT_ERR_CORRECTED) 1062 if (type == HW_EVENT_ERR_CORRECTED)
1030 p = "CE"; 1063 p = "CE";
1031 else 1064 else
1032 p = "UE"; 1065 p = "UE";
1033 1066
1034 edac_mc_printk(mci, KERN_ERR, 1067 edac_mc_printk(mci, KERN_ERR,
1035 "INTERNAL ERROR: %s value is out of range (%d >= %d)\n", 1068 "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
1036 edac_layer_name[mci->layers[i].type], 1069 edac_layer_name[mci->layers[i].type],
1037 pos[i], mci->layers[i].size); 1070 pos[i], mci->layers[i].size);
1038 /* 1071 /*
1039 * Instead of just returning it, let's use what's 1072 * Instead of just returning it, let's use what's
1040 * known about the error. The increment routines and 1073 * known about the error. The increment routines and
1041 * the DIMM filter logic will do the right thing by 1074 * the DIMM filter logic will do the right thing by
1042 * pointing the likely damaged DIMMs. 1075 * pointing the likely damaged DIMMs.
1043 */ 1076 */
1044 pos[i] = -1; 1077 pos[i] = -1;
1045 } 1078 }
1046 if (pos[i] >= 0) 1079 if (pos[i] >= 0)
1047 enable_per_layer_report = true; 1080 enable_per_layer_report = true;
1048 } 1081 }
1049 1082
1050 /* 1083 /*
1051 * Get the dimm label/grain that applies to the match criteria. 1084 * Get the dimm label/grain that applies to the match criteria.
1052 * As the error algorithm may not be able to point to just one memory 1085 * As the error algorithm may not be able to point to just one memory
1053 * stick, the logic here will get all possible labels that could 1086 * stick, the logic here will get all possible labels that could
1054 * pottentially be affected by the error. 1087 * pottentially be affected by the error.
1055 * On FB-DIMM memory controllers, for uncorrected errors, it is common 1088 * On FB-DIMM memory controllers, for uncorrected errors, it is common
1056 * to have only the MC channel and the MC dimm (also called "branch") 1089 * to have only the MC channel and the MC dimm (also called "branch")
1057 * but the channel is not known, as the memory is arranged in pairs, 1090 * but the channel is not known, as the memory is arranged in pairs,
1058 * where each memory belongs to a separate channel within the same 1091 * where each memory belongs to a separate channel within the same
1059 * branch. 1092 * branch.
1060 */ 1093 */
1061 grain = 0; 1094 grain = 0;
1062 p = label; 1095 p = label;
1063 *p = '\0'; 1096 *p = '\0';
1064 for (i = 0; i < mci->tot_dimms; i++) { 1097 for (i = 0; i < mci->tot_dimms; i++) {
1065 struct dimm_info *dimm = &mci->dimms[i]; 1098 struct dimm_info *dimm = mci->dimms[i];
1066 1099
1067 if (top_layer >= 0 && top_layer != dimm->location[0]) 1100 if (top_layer >= 0 && top_layer != dimm->location[0])
1068 continue; 1101 continue;
1069 if (mid_layer >= 0 && mid_layer != dimm->location[1]) 1102 if (mid_layer >= 0 && mid_layer != dimm->location[1])
1070 continue; 1103 continue;
1071 if (low_layer >= 0 && low_layer != dimm->location[2]) 1104 if (low_layer >= 0 && low_layer != dimm->location[2])
1072 continue; 1105 continue;
1073 1106
1074 /* get the max grain, over the error match range */ 1107 /* get the max grain, over the error match range */
1075 if (dimm->grain > grain) 1108 if (dimm->grain > grain)
1076 grain = dimm->grain; 1109 grain = dimm->grain;
1077 1110
1078 /* 1111 /*
1079 * If the error is memory-controller wide, there's no need to 1112 * If the error is memory-controller wide, there's no need to
1080 * seek for the affected DIMMs because the whole 1113 * seek for the affected DIMMs because the whole
1081 * channel/memory controller/... may be affected. 1114 * channel/memory controller/... may be affected.
1082 * Also, don't show errors for empty DIMM slots. 1115 * Also, don't show errors for empty DIMM slots.
1083 */ 1116 */
1084 if (enable_per_layer_report && dimm->nr_pages) { 1117 if (enable_per_layer_report && dimm->nr_pages) {
1085 if (p != label) { 1118 if (p != label) {
1086 strcpy(p, OTHER_LABEL); 1119 strcpy(p, OTHER_LABEL);
1087 p += strlen(OTHER_LABEL); 1120 p += strlen(OTHER_LABEL);
1088 } 1121 }
1089 strcpy(p, dimm->label); 1122 strcpy(p, dimm->label);
1090 p += strlen(p); 1123 p += strlen(p);
1091 *p = '\0'; 1124 *p = '\0';
1092 1125
1093 /* 1126 /*
1094 * get csrow/channel of the DIMM, in order to allow 1127 * get csrow/channel of the DIMM, in order to allow
1095 * incrementing the compat API counters 1128 * incrementing the compat API counters
1096 */ 1129 */
1097 debugf4("%s: %s csrows map: (%d,%d)\n", 1130 debugf4("%s: %s csrows map: (%d,%d)\n",
1098 __func__, 1131 __func__,
1099 mci->mem_is_per_rank ? "rank" : "dimm", 1132 mci->mem_is_per_rank ? "rank" : "dimm",
1100 dimm->csrow, dimm->cschannel); 1133 dimm->csrow, dimm->cschannel);
1101 1134
1102 if (row == -1) 1135 if (row == -1)
1103 row = dimm->csrow; 1136 row = dimm->csrow;
1104 else if (row >= 0 && row != dimm->csrow) 1137 else if (row >= 0 && row != dimm->csrow)
1105 row = -2; 1138 row = -2;
1106 1139
1107 if (chan == -1) 1140 if (chan == -1)
1108 chan = dimm->cschannel; 1141 chan = dimm->cschannel;
1109 else if (chan >= 0 && chan != dimm->cschannel) 1142 else if (chan >= 0 && chan != dimm->cschannel)
1110 chan = -2; 1143 chan = -2;
1111 } 1144 }
1112 } 1145 }
1113 1146
1114 if (!enable_per_layer_report) { 1147 if (!enable_per_layer_report) {
1115 strcpy(label, "any memory"); 1148 strcpy(label, "any memory");
1116 } else { 1149 } else {
1117 debugf4("%s: csrow/channel to increment: (%d,%d)\n", 1150 debugf4("%s: csrow/channel to increment: (%d,%d)\n",
1118 __func__, row, chan); 1151 __func__, row, chan);
1119 if (p == label) 1152 if (p == label)
1120 strcpy(label, "unknown memory"); 1153 strcpy(label, "unknown memory");
1121 if (type == HW_EVENT_ERR_CORRECTED) { 1154 if (type == HW_EVENT_ERR_CORRECTED) {
1122 if (row >= 0) { 1155 if (row >= 0) {
1123 mci->csrows[row].ce_count++; 1156 mci->csrows[row]->ce_count++;
1124 if (chan >= 0) 1157 if (chan >= 0)
1125 mci->csrows[row].channels[chan].ce_count++; 1158 mci->csrows[row]->channels[chan]->ce_count++;
1126 } 1159 }
1127 } else 1160 } else
1128 if (row >= 0) 1161 if (row >= 0)
1129 mci->csrows[row].ue_count++; 1162 mci->csrows[row]->ue_count++;
1130 } 1163 }
1131 1164
1132 /* Fill the RAM location data */ 1165 /* Fill the RAM location data */
1133 p = location; 1166 p = location;
1134 for (i = 0; i < mci->n_layers; i++) { 1167 for (i = 0; i < mci->n_layers; i++) {
1135 if (pos[i] < 0) 1168 if (pos[i] < 0)
1136 continue; 1169 continue;
1137 1170
1138 p += sprintf(p, "%s:%d ", 1171 p += sprintf(p, "%s:%d ",
1139 edac_layer_name[mci->layers[i].type], 1172 edac_layer_name[mci->layers[i].type],
1140 pos[i]); 1173 pos[i]);
1141 } 1174 }
1142 if (p > location) 1175 if (p > location)
1143 *(p - 1) = '\0'; 1176 *(p - 1) = '\0';
1144 1177
1145 /* Report the error via the trace interface */ 1178 /* Report the error via the trace interface */
1146 1179
1147 error_count = 1; /* FIXME: allow change it */ 1180 error_count = 1; /* FIXME: allow change it */
1148 grain_bits = fls_long(grain) + 1; 1181 grain_bits = fls_long(grain) + 1;
1149 trace_mc_event(type, msg, label, error_count, 1182 trace_mc_event(type, msg, label, error_count,
1150 mci->mc_idx, top_layer, mid_layer, low_layer, 1183 mci->mc_idx, top_layer, mid_layer, low_layer,
1151 PAGES_TO_MiB(page_frame_number) | offset_in_page, 1184 PAGES_TO_MiB(page_frame_number) | offset_in_page,
1152 grain_bits, syndrome, other_detail); 1185 grain_bits, syndrome, other_detail);
1153 1186
1154 /* Memory type dependent details about the error */ 1187 /* Memory type dependent details about the error */
1155 if (type == HW_EVENT_ERR_CORRECTED) { 1188 if (type == HW_EVENT_ERR_CORRECTED) {
1156 snprintf(detail, sizeof(detail), 1189 snprintf(detail, sizeof(detail),
1157 "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx", 1190 "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
1158 page_frame_number, offset_in_page, 1191 page_frame_number, offset_in_page,
1159 grain, syndrome); 1192 grain, syndrome);
1160 edac_ce_error(mci, pos, msg, location, label, detail, 1193 edac_ce_error(mci, pos, msg, location, label, detail,
1161 other_detail, enable_per_layer_report, 1194 other_detail, enable_per_layer_report,
drivers/edac/edac_mc_sysfs.c
1 /* 1 /*
2 * edac_mc kernel module 2 * edac_mc kernel module
3 * (C) 2005-2007 Linux Networx (http://lnxi.com) 3 * (C) 2005-2007 Linux Networx (http://lnxi.com)
4 * 4 *
5 * This file may be distributed under the terms of the 5 * This file may be distributed under the terms of the
6 * GNU General Public License. 6 * GNU General Public License.
7 * 7 *
8 * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com 8 * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
9 * 9 *
10 * (c) 2012 - Mauro Carvalho Chehab <mchehab@redhat.com> 10 * (c) 2012 - Mauro Carvalho Chehab <mchehab@redhat.com>
11 * The entire API were re-written, and ported to use struct device 11 * The entire API were re-written, and ported to use struct device
12 * 12 *
13 */ 13 */
14 14
15 #include <linux/ctype.h> 15 #include <linux/ctype.h>
16 #include <linux/slab.h> 16 #include <linux/slab.h>
17 #include <linux/edac.h> 17 #include <linux/edac.h>
18 #include <linux/bug.h> 18 #include <linux/bug.h>
19 #include <linux/pm_runtime.h> 19 #include <linux/pm_runtime.h>
20 #include <linux/uaccess.h> 20 #include <linux/uaccess.h>
21 21
22 #include "edac_core.h" 22 #include "edac_core.h"
23 #include "edac_module.h" 23 #include "edac_module.h"
24 24
25 /* MC EDAC Controls, setable by module parameter, and sysfs */ 25 /* MC EDAC Controls, setable by module parameter, and sysfs */
26 static int edac_mc_log_ue = 1; 26 static int edac_mc_log_ue = 1;
27 static int edac_mc_log_ce = 1; 27 static int edac_mc_log_ce = 1;
28 static int edac_mc_panic_on_ue; 28 static int edac_mc_panic_on_ue;
29 static int edac_mc_poll_msec = 1000; 29 static int edac_mc_poll_msec = 1000;
30 30
31 /* Getter functions for above */ 31 /* Getter functions for above */
32 int edac_mc_get_log_ue(void) 32 int edac_mc_get_log_ue(void)
33 { 33 {
34 return edac_mc_log_ue; 34 return edac_mc_log_ue;
35 } 35 }
36 36
37 int edac_mc_get_log_ce(void) 37 int edac_mc_get_log_ce(void)
38 { 38 {
39 return edac_mc_log_ce; 39 return edac_mc_log_ce;
40 } 40 }
41 41
42 int edac_mc_get_panic_on_ue(void) 42 int edac_mc_get_panic_on_ue(void)
43 { 43 {
44 return edac_mc_panic_on_ue; 44 return edac_mc_panic_on_ue;
45 } 45 }
46 46
47 /* this is temporary */ 47 /* this is temporary */
48 int edac_mc_get_poll_msec(void) 48 int edac_mc_get_poll_msec(void)
49 { 49 {
50 return edac_mc_poll_msec; 50 return edac_mc_poll_msec;
51 } 51 }
52 52
53 static int edac_set_poll_msec(const char *val, struct kernel_param *kp) 53 static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
54 { 54 {
55 long l; 55 long l;
56 int ret; 56 int ret;
57 57
58 if (!val) 58 if (!val)
59 return -EINVAL; 59 return -EINVAL;
60 60
61 ret = strict_strtol(val, 0, &l); 61 ret = strict_strtol(val, 0, &l);
62 if (ret == -EINVAL || ((int)l != l)) 62 if (ret == -EINVAL || ((int)l != l))
63 return -EINVAL; 63 return -EINVAL;
64 *((int *)kp->arg) = l; 64 *((int *)kp->arg) = l;
65 65
66 /* notify edac_mc engine to reset the poll period */ 66 /* notify edac_mc engine to reset the poll period */
67 edac_mc_reset_delay_period(l); 67 edac_mc_reset_delay_period(l);
68 68
69 return 0; 69 return 0;
70 } 70 }
71 71
72 /* Parameter declarations for above */ 72 /* Parameter declarations for above */
73 module_param(edac_mc_panic_on_ue, int, 0644); 73 module_param(edac_mc_panic_on_ue, int, 0644);
74 MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on"); 74 MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
75 module_param(edac_mc_log_ue, int, 0644); 75 module_param(edac_mc_log_ue, int, 0644);
76 MODULE_PARM_DESC(edac_mc_log_ue, 76 MODULE_PARM_DESC(edac_mc_log_ue,
77 "Log uncorrectable error to console: 0=off 1=on"); 77 "Log uncorrectable error to console: 0=off 1=on");
78 module_param(edac_mc_log_ce, int, 0644); 78 module_param(edac_mc_log_ce, int, 0644);
79 MODULE_PARM_DESC(edac_mc_log_ce, 79 MODULE_PARM_DESC(edac_mc_log_ce,
80 "Log correctable error to console: 0=off 1=on"); 80 "Log correctable error to console: 0=off 1=on");
81 module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int, 81 module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
82 &edac_mc_poll_msec, 0644); 82 &edac_mc_poll_msec, 0644);
83 MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds"); 83 MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
84 84
85 static struct device mci_pdev; 85 static struct device *mci_pdev;
86 86
87 /* 87 /*
88 * various constants for Memory Controllers 88 * various constants for Memory Controllers
89 */ 89 */
90 static const char *mem_types[] = { 90 static const char *mem_types[] = {
91 [MEM_EMPTY] = "Empty", 91 [MEM_EMPTY] = "Empty",
92 [MEM_RESERVED] = "Reserved", 92 [MEM_RESERVED] = "Reserved",
93 [MEM_UNKNOWN] = "Unknown", 93 [MEM_UNKNOWN] = "Unknown",
94 [MEM_FPM] = "FPM", 94 [MEM_FPM] = "FPM",
95 [MEM_EDO] = "EDO", 95 [MEM_EDO] = "EDO",
96 [MEM_BEDO] = "BEDO", 96 [MEM_BEDO] = "BEDO",
97 [MEM_SDR] = "Unbuffered-SDR", 97 [MEM_SDR] = "Unbuffered-SDR",
98 [MEM_RDR] = "Registered-SDR", 98 [MEM_RDR] = "Registered-SDR",
99 [MEM_DDR] = "Unbuffered-DDR", 99 [MEM_DDR] = "Unbuffered-DDR",
100 [MEM_RDDR] = "Registered-DDR", 100 [MEM_RDDR] = "Registered-DDR",
101 [MEM_RMBS] = "RMBS", 101 [MEM_RMBS] = "RMBS",
102 [MEM_DDR2] = "Unbuffered-DDR2", 102 [MEM_DDR2] = "Unbuffered-DDR2",
103 [MEM_FB_DDR2] = "FullyBuffered-DDR2", 103 [MEM_FB_DDR2] = "FullyBuffered-DDR2",
104 [MEM_RDDR2] = "Registered-DDR2", 104 [MEM_RDDR2] = "Registered-DDR2",
105 [MEM_XDR] = "XDR", 105 [MEM_XDR] = "XDR",
106 [MEM_DDR3] = "Unbuffered-DDR3", 106 [MEM_DDR3] = "Unbuffered-DDR3",
107 [MEM_RDDR3] = "Registered-DDR3" 107 [MEM_RDDR3] = "Registered-DDR3"
108 }; 108 };
109 109
110 static const char *dev_types[] = { 110 static const char *dev_types[] = {
111 [DEV_UNKNOWN] = "Unknown", 111 [DEV_UNKNOWN] = "Unknown",
112 [DEV_X1] = "x1", 112 [DEV_X1] = "x1",
113 [DEV_X2] = "x2", 113 [DEV_X2] = "x2",
114 [DEV_X4] = "x4", 114 [DEV_X4] = "x4",
115 [DEV_X8] = "x8", 115 [DEV_X8] = "x8",
116 [DEV_X16] = "x16", 116 [DEV_X16] = "x16",
117 [DEV_X32] = "x32", 117 [DEV_X32] = "x32",
118 [DEV_X64] = "x64" 118 [DEV_X64] = "x64"
119 }; 119 };
120 120
121 static const char *edac_caps[] = { 121 static const char *edac_caps[] = {
122 [EDAC_UNKNOWN] = "Unknown", 122 [EDAC_UNKNOWN] = "Unknown",
123 [EDAC_NONE] = "None", 123 [EDAC_NONE] = "None",
124 [EDAC_RESERVED] = "Reserved", 124 [EDAC_RESERVED] = "Reserved",
125 [EDAC_PARITY] = "PARITY", 125 [EDAC_PARITY] = "PARITY",
126 [EDAC_EC] = "EC", 126 [EDAC_EC] = "EC",
127 [EDAC_SECDED] = "SECDED", 127 [EDAC_SECDED] = "SECDED",
128 [EDAC_S2ECD2ED] = "S2ECD2ED", 128 [EDAC_S2ECD2ED] = "S2ECD2ED",
129 [EDAC_S4ECD4ED] = "S4ECD4ED", 129 [EDAC_S4ECD4ED] = "S4ECD4ED",
130 [EDAC_S8ECD8ED] = "S8ECD8ED", 130 [EDAC_S8ECD8ED] = "S8ECD8ED",
131 [EDAC_S16ECD16ED] = "S16ECD16ED" 131 [EDAC_S16ECD16ED] = "S16ECD16ED"
132 }; 132 };
133 133
134 #ifdef CONFIG_EDAC_LEGACY_SYSFS 134 #ifdef CONFIG_EDAC_LEGACY_SYSFS
135 /* 135 /*
136 * EDAC sysfs CSROW data structures and methods 136 * EDAC sysfs CSROW data structures and methods
137 */ 137 */
138 138
139 #define to_csrow(k) container_of(k, struct csrow_info, dev) 139 #define to_csrow(k) container_of(k, struct csrow_info, dev)
140 140
141 /* 141 /*
142 * We need it to avoid namespace conflicts between the legacy API 142 * We need it to avoid namespace conflicts between the legacy API
143 * and the per-dimm/per-rank one 143 * and the per-dimm/per-rank one
144 */ 144 */
145 #define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \ 145 #define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
146 struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store) 146 struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
147 147
148 struct dev_ch_attribute { 148 struct dev_ch_attribute {
149 struct device_attribute attr; 149 struct device_attribute attr;
150 int channel; 150 int channel;
151 }; 151 };
152 152
153 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \ 153 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
154 struct dev_ch_attribute dev_attr_legacy_##_name = \ 154 struct dev_ch_attribute dev_attr_legacy_##_name = \
155 { __ATTR(_name, _mode, _show, _store), (_var) } 155 { __ATTR(_name, _mode, _show, _store), (_var) }
156 156
157 #define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel) 157 #define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel)
158 158
159 /* Set of more default csrow<id> attribute show/store functions */ 159 /* Set of more default csrow<id> attribute show/store functions */
160 static ssize_t csrow_ue_count_show(struct device *dev, 160 static ssize_t csrow_ue_count_show(struct device *dev,
161 struct device_attribute *mattr, char *data) 161 struct device_attribute *mattr, char *data)
162 { 162 {
163 struct csrow_info *csrow = to_csrow(dev); 163 struct csrow_info *csrow = to_csrow(dev);
164 164
165 return sprintf(data, "%u\n", csrow->ue_count); 165 return sprintf(data, "%u\n", csrow->ue_count);
166 } 166 }
167 167
168 static ssize_t csrow_ce_count_show(struct device *dev, 168 static ssize_t csrow_ce_count_show(struct device *dev,
169 struct device_attribute *mattr, char *data) 169 struct device_attribute *mattr, char *data)
170 { 170 {
171 struct csrow_info *csrow = to_csrow(dev); 171 struct csrow_info *csrow = to_csrow(dev);
172 172
173 return sprintf(data, "%u\n", csrow->ce_count); 173 return sprintf(data, "%u\n", csrow->ce_count);
174 } 174 }
175 175
176 static ssize_t csrow_size_show(struct device *dev, 176 static ssize_t csrow_size_show(struct device *dev,
177 struct device_attribute *mattr, char *data) 177 struct device_attribute *mattr, char *data)
178 { 178 {
179 struct csrow_info *csrow = to_csrow(dev); 179 struct csrow_info *csrow = to_csrow(dev);
180 int i; 180 int i;
181 u32 nr_pages = 0; 181 u32 nr_pages = 0;
182 182
183 for (i = 0; i < csrow->nr_channels; i++) 183 for (i = 0; i < csrow->nr_channels; i++)
184 nr_pages += csrow->channels[i].dimm->nr_pages; 184 nr_pages += csrow->channels[i]->dimm->nr_pages;
185 return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages)); 185 return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
186 } 186 }
187 187
188 static ssize_t csrow_mem_type_show(struct device *dev, 188 static ssize_t csrow_mem_type_show(struct device *dev,
189 struct device_attribute *mattr, char *data) 189 struct device_attribute *mattr, char *data)
190 { 190 {
191 struct csrow_info *csrow = to_csrow(dev); 191 struct csrow_info *csrow = to_csrow(dev);
192 192
193 return sprintf(data, "%s\n", mem_types[csrow->channels[0].dimm->mtype]); 193 return sprintf(data, "%s\n", mem_types[csrow->channels[0]->dimm->mtype]);
194 } 194 }
195 195
196 static ssize_t csrow_dev_type_show(struct device *dev, 196 static ssize_t csrow_dev_type_show(struct device *dev,
197 struct device_attribute *mattr, char *data) 197 struct device_attribute *mattr, char *data)
198 { 198 {
199 struct csrow_info *csrow = to_csrow(dev); 199 struct csrow_info *csrow = to_csrow(dev);
200 200
201 return sprintf(data, "%s\n", dev_types[csrow->channels[0].dimm->dtype]); 201 return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
202 } 202 }
203 203
204 static ssize_t csrow_edac_mode_show(struct device *dev, 204 static ssize_t csrow_edac_mode_show(struct device *dev,
205 struct device_attribute *mattr, 205 struct device_attribute *mattr,
206 char *data) 206 char *data)
207 { 207 {
208 struct csrow_info *csrow = to_csrow(dev); 208 struct csrow_info *csrow = to_csrow(dev);
209 209
210 return sprintf(data, "%s\n", edac_caps[csrow->channels[0].dimm->edac_mode]); 210 return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
211 } 211 }
212 212
213 /* show/store functions for DIMM Label attributes */ 213 /* show/store functions for DIMM Label attributes */
214 static ssize_t channel_dimm_label_show(struct device *dev, 214 static ssize_t channel_dimm_label_show(struct device *dev,
215 struct device_attribute *mattr, 215 struct device_attribute *mattr,
216 char *data) 216 char *data)
217 { 217 {
218 struct csrow_info *csrow = to_csrow(dev); 218 struct csrow_info *csrow = to_csrow(dev);
219 unsigned chan = to_channel(mattr); 219 unsigned chan = to_channel(mattr);
220 struct rank_info *rank = &csrow->channels[chan]; 220 struct rank_info *rank = csrow->channels[chan];
221 221
222 /* if field has not been initialized, there is nothing to send */ 222 /* if field has not been initialized, there is nothing to send */
223 if (!rank->dimm->label[0]) 223 if (!rank->dimm->label[0])
224 return 0; 224 return 0;
225 225
226 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", 226 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
227 rank->dimm->label); 227 rank->dimm->label);
228 } 228 }
229 229
230 static ssize_t channel_dimm_label_store(struct device *dev, 230 static ssize_t channel_dimm_label_store(struct device *dev,
231 struct device_attribute *mattr, 231 struct device_attribute *mattr,
232 const char *data, size_t count) 232 const char *data, size_t count)
233 { 233 {
234 struct csrow_info *csrow = to_csrow(dev); 234 struct csrow_info *csrow = to_csrow(dev);
235 unsigned chan = to_channel(mattr); 235 unsigned chan = to_channel(mattr);
236 struct rank_info *rank = &csrow->channels[chan]; 236 struct rank_info *rank = csrow->channels[chan];
237 237
238 ssize_t max_size = 0; 238 ssize_t max_size = 0;
239 239
240 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); 240 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
241 strncpy(rank->dimm->label, data, max_size); 241 strncpy(rank->dimm->label, data, max_size);
242 rank->dimm->label[max_size] = '\0'; 242 rank->dimm->label[max_size] = '\0';
243 243
244 return max_size; 244 return max_size;
245 } 245 }
246 246
247 /* show function for dynamic chX_ce_count attribute */ 247 /* show function for dynamic chX_ce_count attribute */
248 static ssize_t channel_ce_count_show(struct device *dev, 248 static ssize_t channel_ce_count_show(struct device *dev,
249 struct device_attribute *mattr, char *data) 249 struct device_attribute *mattr, char *data)
250 { 250 {
251 struct csrow_info *csrow = to_csrow(dev); 251 struct csrow_info *csrow = to_csrow(dev);
252 unsigned chan = to_channel(mattr); 252 unsigned chan = to_channel(mattr);
253 struct rank_info *rank = &csrow->channels[chan]; 253 struct rank_info *rank = csrow->channels[chan];
254 254
255 return sprintf(data, "%u\n", rank->ce_count); 255 return sprintf(data, "%u\n", rank->ce_count);
256 } 256 }
257 257
258 /* cwrow<id>/attribute files */ 258 /* cwrow<id>/attribute files */
259 DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL); 259 DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL);
260 DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL); 260 DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL);
261 DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL); 261 DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL);
262 DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL); 262 DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL);
263 DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL); 263 DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL);
264 DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL); 264 DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL);
265 265
266 /* default attributes of the CSROW<id> object */ 266 /* default attributes of the CSROW<id> object */
267 static struct attribute *csrow_attrs[] = { 267 static struct attribute *csrow_attrs[] = {
268 &dev_attr_legacy_dev_type.attr, 268 &dev_attr_legacy_dev_type.attr,
269 &dev_attr_legacy_mem_type.attr, 269 &dev_attr_legacy_mem_type.attr,
270 &dev_attr_legacy_edac_mode.attr, 270 &dev_attr_legacy_edac_mode.attr,
271 &dev_attr_legacy_size_mb.attr, 271 &dev_attr_legacy_size_mb.attr,
272 &dev_attr_legacy_ue_count.attr, 272 &dev_attr_legacy_ue_count.attr,
273 &dev_attr_legacy_ce_count.attr, 273 &dev_attr_legacy_ce_count.attr,
274 NULL, 274 NULL,
275 }; 275 };
276 276
277 static struct attribute_group csrow_attr_grp = { 277 static struct attribute_group csrow_attr_grp = {
278 .attrs = csrow_attrs, 278 .attrs = csrow_attrs,
279 }; 279 };
280 280
281 static const struct attribute_group *csrow_attr_groups[] = { 281 static const struct attribute_group *csrow_attr_groups[] = {
282 &csrow_attr_grp, 282 &csrow_attr_grp,
283 NULL 283 NULL
284 }; 284 };
285 285
286 static void csrow_attr_release(struct device *device) 286 static void csrow_attr_release(struct device *dev)
287 { 287 {
288 debugf1("Releasing csrow device %s\n", dev_name(device)); 288 struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
289
290 debugf1("Releasing csrow device %s\n", dev_name(dev));
291 kfree(csrow);
289 } 292 }
290 293
291 static struct device_type csrow_attr_type = { 294 static struct device_type csrow_attr_type = {
292 .groups = csrow_attr_groups, 295 .groups = csrow_attr_groups,
293 .release = csrow_attr_release, 296 .release = csrow_attr_release,
294 }; 297 };
295 298
296 /* 299 /*
297 * possible dynamic channel DIMM Label attribute files 300 * possible dynamic channel DIMM Label attribute files
298 * 301 *
299 */ 302 */
300 303
301 #define EDAC_NR_CHANNELS 6 304 #define EDAC_NR_CHANNELS 6
302 305
303 DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR, 306 DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
304 channel_dimm_label_show, channel_dimm_label_store, 0); 307 channel_dimm_label_show, channel_dimm_label_store, 0);
305 DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR, 308 DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
306 channel_dimm_label_show, channel_dimm_label_store, 1); 309 channel_dimm_label_show, channel_dimm_label_store, 1);
307 DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR, 310 DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR,
308 channel_dimm_label_show, channel_dimm_label_store, 2); 311 channel_dimm_label_show, channel_dimm_label_store, 2);
309 DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR, 312 DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR,
310 channel_dimm_label_show, channel_dimm_label_store, 3); 313 channel_dimm_label_show, channel_dimm_label_store, 3);
311 DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR, 314 DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
312 channel_dimm_label_show, channel_dimm_label_store, 4); 315 channel_dimm_label_show, channel_dimm_label_store, 4);
313 DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR, 316 DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
314 channel_dimm_label_show, channel_dimm_label_store, 5); 317 channel_dimm_label_show, channel_dimm_label_store, 5);
315 318
316 /* Total possible dynamic DIMM Label attribute file table */ 319 /* Total possible dynamic DIMM Label attribute file table */
317 static struct device_attribute *dynamic_csrow_dimm_attr[] = { 320 static struct device_attribute *dynamic_csrow_dimm_attr[] = {
318 &dev_attr_legacy_ch0_dimm_label.attr, 321 &dev_attr_legacy_ch0_dimm_label.attr,
319 &dev_attr_legacy_ch1_dimm_label.attr, 322 &dev_attr_legacy_ch1_dimm_label.attr,
320 &dev_attr_legacy_ch2_dimm_label.attr, 323 &dev_attr_legacy_ch2_dimm_label.attr,
321 &dev_attr_legacy_ch3_dimm_label.attr, 324 &dev_attr_legacy_ch3_dimm_label.attr,
322 &dev_attr_legacy_ch4_dimm_label.attr, 325 &dev_attr_legacy_ch4_dimm_label.attr,
323 &dev_attr_legacy_ch5_dimm_label.attr 326 &dev_attr_legacy_ch5_dimm_label.attr
324 }; 327 };
325 328
326 /* possible dynamic channel ce_count attribute files */ 329 /* possible dynamic channel ce_count attribute files */
327 DEVICE_CHANNEL(ch0_ce_count, S_IRUGO | S_IWUSR, 330 DEVICE_CHANNEL(ch0_ce_count, S_IRUGO | S_IWUSR,
328 channel_ce_count_show, NULL, 0); 331 channel_ce_count_show, NULL, 0);
329 DEVICE_CHANNEL(ch1_ce_count, S_IRUGO | S_IWUSR, 332 DEVICE_CHANNEL(ch1_ce_count, S_IRUGO | S_IWUSR,
330 channel_ce_count_show, NULL, 1); 333 channel_ce_count_show, NULL, 1);
331 DEVICE_CHANNEL(ch2_ce_count, S_IRUGO | S_IWUSR, 334 DEVICE_CHANNEL(ch2_ce_count, S_IRUGO | S_IWUSR,
332 channel_ce_count_show, NULL, 2); 335 channel_ce_count_show, NULL, 2);
333 DEVICE_CHANNEL(ch3_ce_count, S_IRUGO | S_IWUSR, 336 DEVICE_CHANNEL(ch3_ce_count, S_IRUGO | S_IWUSR,
334 channel_ce_count_show, NULL, 3); 337 channel_ce_count_show, NULL, 3);
335 DEVICE_CHANNEL(ch4_ce_count, S_IRUGO | S_IWUSR, 338 DEVICE_CHANNEL(ch4_ce_count, S_IRUGO | S_IWUSR,
336 channel_ce_count_show, NULL, 4); 339 channel_ce_count_show, NULL, 4);
337 DEVICE_CHANNEL(ch5_ce_count, S_IRUGO | S_IWUSR, 340 DEVICE_CHANNEL(ch5_ce_count, S_IRUGO | S_IWUSR,
338 channel_ce_count_show, NULL, 5); 341 channel_ce_count_show, NULL, 5);
339 342
340 /* Total possible dynamic ce_count attribute file table */ 343 /* Total possible dynamic ce_count attribute file table */
341 static struct device_attribute *dynamic_csrow_ce_count_attr[] = { 344 static struct device_attribute *dynamic_csrow_ce_count_attr[] = {
342 &dev_attr_legacy_ch0_ce_count.attr, 345 &dev_attr_legacy_ch0_ce_count.attr,
343 &dev_attr_legacy_ch1_ce_count.attr, 346 &dev_attr_legacy_ch1_ce_count.attr,
344 &dev_attr_legacy_ch2_ce_count.attr, 347 &dev_attr_legacy_ch2_ce_count.attr,
345 &dev_attr_legacy_ch3_ce_count.attr, 348 &dev_attr_legacy_ch3_ce_count.attr,
346 &dev_attr_legacy_ch4_ce_count.attr, 349 &dev_attr_legacy_ch4_ce_count.attr,
347 &dev_attr_legacy_ch5_ce_count.attr 350 &dev_attr_legacy_ch5_ce_count.attr
348 }; 351 };
349 352
350 static inline int nr_pages_per_csrow(struct csrow_info *csrow) 353 static inline int nr_pages_per_csrow(struct csrow_info *csrow)
351 { 354 {
352 int chan, nr_pages = 0; 355 int chan, nr_pages = 0;
353 356
354 for (chan = 0; chan < csrow->nr_channels; chan++) 357 for (chan = 0; chan < csrow->nr_channels; chan++)
355 nr_pages += csrow->channels[chan].dimm->nr_pages; 358 nr_pages += csrow->channels[chan]->dimm->nr_pages;
356 359
357 return nr_pages; 360 return nr_pages;
358 } 361 }
359 362
360 /* Create a CSROW object under specifed edac_mc_device */ 363 /* Create a CSROW object under specifed edac_mc_device */
361 static int edac_create_csrow_object(struct mem_ctl_info *mci, 364 static int edac_create_csrow_object(struct mem_ctl_info *mci,
362 struct csrow_info *csrow, int index) 365 struct csrow_info *csrow, int index)
363 { 366 {
364 int err, chan; 367 int err, chan;
365 368
366 if (csrow->nr_channels >= EDAC_NR_CHANNELS) 369 if (csrow->nr_channels >= EDAC_NR_CHANNELS)
367 return -ENODEV; 370 return -ENODEV;
368 371
369 csrow->dev.type = &csrow_attr_type; 372 csrow->dev.type = &csrow_attr_type;
370 csrow->dev.bus = &mci->bus; 373 csrow->dev.bus = &mci->bus;
371 device_initialize(&csrow->dev); 374 device_initialize(&csrow->dev);
372 csrow->dev.parent = &mci->dev; 375 csrow->dev.parent = &mci->dev;
373 dev_set_name(&csrow->dev, "csrow%d", index); 376 dev_set_name(&csrow->dev, "csrow%d", index);
374 dev_set_drvdata(&csrow->dev, csrow); 377 dev_set_drvdata(&csrow->dev, csrow);
375 378
376 debugf0("%s(): creating (virtual) csrow node %s\n", __func__, 379 debugf0("%s(): creating (virtual) csrow node %s\n", __func__,
377 dev_name(&csrow->dev)); 380 dev_name(&csrow->dev));
378 381
379 err = device_add(&csrow->dev); 382 err = device_add(&csrow->dev);
380 if (err < 0) 383 if (err < 0)
381 return err; 384 return err;
382 385
383 for (chan = 0; chan < csrow->nr_channels; chan++) { 386 for (chan = 0; chan < csrow->nr_channels; chan++) {
384 /* Only expose populated DIMMs */ 387 /* Only expose populated DIMMs */
385 if (!csrow->channels[chan].dimm->nr_pages) 388 if (!csrow->channels[chan]->dimm->nr_pages)
386 continue; 389 continue;
387 err = device_create_file(&csrow->dev, 390 err = device_create_file(&csrow->dev,
388 dynamic_csrow_dimm_attr[chan]); 391 dynamic_csrow_dimm_attr[chan]);
389 if (err < 0) 392 if (err < 0)
390 goto error; 393 goto error;
391 err = device_create_file(&csrow->dev, 394 err = device_create_file(&csrow->dev,
392 dynamic_csrow_ce_count_attr[chan]); 395 dynamic_csrow_ce_count_attr[chan]);
393 if (err < 0) { 396 if (err < 0) {
394 device_remove_file(&csrow->dev, 397 device_remove_file(&csrow->dev,
395 dynamic_csrow_dimm_attr[chan]); 398 dynamic_csrow_dimm_attr[chan]);
396 goto error; 399 goto error;
397 } 400 }
398 } 401 }
399 402
400 return 0; 403 return 0;
401 404
402 error: 405 error:
403 for (--chan; chan >= 0; chan--) { 406 for (--chan; chan >= 0; chan--) {
404 device_remove_file(&csrow->dev, 407 device_remove_file(&csrow->dev,
405 dynamic_csrow_dimm_attr[chan]); 408 dynamic_csrow_dimm_attr[chan]);
406 device_remove_file(&csrow->dev, 409 device_remove_file(&csrow->dev,
407 dynamic_csrow_ce_count_attr[chan]); 410 dynamic_csrow_ce_count_attr[chan]);
408 } 411 }
409 put_device(&csrow->dev); 412 put_device(&csrow->dev);
410 413
411 return err; 414 return err;
412 } 415 }
413 416
414 /* Create a CSROW object under specifed edac_mc_device */ 417 /* Create a CSROW object under specifed edac_mc_device */
415 static int edac_create_csrow_objects(struct mem_ctl_info *mci) 418 static int edac_create_csrow_objects(struct mem_ctl_info *mci)
416 { 419 {
417 int err, i, chan; 420 int err, i, chan;
418 struct csrow_info *csrow; 421 struct csrow_info *csrow;
419 422
420 for (i = 0; i < mci->nr_csrows; i++) { 423 for (i = 0; i < mci->nr_csrows; i++) {
421 csrow = &mci->csrows[i]; 424 csrow = mci->csrows[i];
422 if (!nr_pages_per_csrow(csrow)) 425 if (!nr_pages_per_csrow(csrow))
423 continue; 426 continue;
424 err = edac_create_csrow_object(mci, &mci->csrows[i], i); 427 err = edac_create_csrow_object(mci, mci->csrows[i], i);
425 if (err < 0) 428 if (err < 0)
426 goto error; 429 goto error;
427 } 430 }
428 return 0; 431 return 0;
429 432
430 error: 433 error:
431 for (--i; i >= 0; i--) { 434 for (--i; i >= 0; i--) {
432 csrow = &mci->csrows[i]; 435 csrow = mci->csrows[i];
433 if (!nr_pages_per_csrow(csrow)) 436 if (!nr_pages_per_csrow(csrow))
434 continue; 437 continue;
435 for (chan = csrow->nr_channels - 1; chan >= 0; chan--) { 438 for (chan = csrow->nr_channels - 1; chan >= 0; chan--) {
436 if (!csrow->channels[chan].dimm->nr_pages) 439 if (!csrow->channels[chan]->dimm->nr_pages)
437 continue; 440 continue;
438 device_remove_file(&csrow->dev, 441 device_remove_file(&csrow->dev,
439 dynamic_csrow_dimm_attr[chan]); 442 dynamic_csrow_dimm_attr[chan]);
440 device_remove_file(&csrow->dev, 443 device_remove_file(&csrow->dev,
441 dynamic_csrow_ce_count_attr[chan]); 444 dynamic_csrow_ce_count_attr[chan]);
442 } 445 }
443 put_device(&mci->csrows[i].dev); 446 put_device(&mci->csrows[i]->dev);
444 } 447 }
445 448
446 return err; 449 return err;
447 } 450 }
448 451
449 static void edac_delete_csrow_objects(struct mem_ctl_info *mci) 452 static void edac_delete_csrow_objects(struct mem_ctl_info *mci)
450 { 453 {
451 int i, chan; 454 int i, chan;
452 struct csrow_info *csrow; 455 struct csrow_info *csrow;
453 456
454 for (i = mci->nr_csrows - 1; i >= 0; i--) { 457 for (i = mci->nr_csrows - 1; i >= 0; i--) {
455 csrow = &mci->csrows[i]; 458 csrow = mci->csrows[i];
456 if (!nr_pages_per_csrow(csrow)) 459 if (!nr_pages_per_csrow(csrow))
457 continue; 460 continue;
458 for (chan = csrow->nr_channels - 1; chan >= 0; chan--) { 461 for (chan = csrow->nr_channels - 1; chan >= 0; chan--) {
459 if (!csrow->channels[chan].dimm->nr_pages) 462 if (!csrow->channels[chan]->dimm->nr_pages)
460 continue; 463 continue;
461 debugf1("Removing csrow %d channel %d sysfs nodes\n", 464 debugf1("Removing csrow %d channel %d sysfs nodes\n",
462 i, chan); 465 i, chan);
463 device_remove_file(&csrow->dev, 466 device_remove_file(&csrow->dev,
464 dynamic_csrow_dimm_attr[chan]); 467 dynamic_csrow_dimm_attr[chan]);
465 device_remove_file(&csrow->dev, 468 device_remove_file(&csrow->dev,
466 dynamic_csrow_ce_count_attr[chan]); 469 dynamic_csrow_ce_count_attr[chan]);
467 } 470 }
468 put_device(&mci->csrows[i].dev); 471 put_device(&mci->csrows[i]->dev);
469 device_del(&mci->csrows[i].dev); 472 device_del(&mci->csrows[i]->dev);
470 } 473 }
471 } 474 }
472 #endif 475 #endif
473 476
474 /* 477 /*
475 * Per-dimm (or per-rank) devices 478 * Per-dimm (or per-rank) devices
476 */ 479 */
477 480
478 #define to_dimm(k) container_of(k, struct dimm_info, dev) 481 #define to_dimm(k) container_of(k, struct dimm_info, dev)
479 482
480 /* show/store functions for DIMM Label attributes */ 483 /* show/store functions for DIMM Label attributes */
481 static ssize_t dimmdev_location_show(struct device *dev, 484 static ssize_t dimmdev_location_show(struct device *dev,
482 struct device_attribute *mattr, char *data) 485 struct device_attribute *mattr, char *data)
483 { 486 {
484 struct dimm_info *dimm = to_dimm(dev); 487 struct dimm_info *dimm = to_dimm(dev);
485 struct mem_ctl_info *mci = dimm->mci; 488 struct mem_ctl_info *mci = dimm->mci;
486 int i; 489 int i;
487 char *p = data; 490 char *p = data;
488 491
489 for (i = 0; i < mci->n_layers; i++) { 492 for (i = 0; i < mci->n_layers; i++) {
490 p += sprintf(p, "%s %d ", 493 p += sprintf(p, "%s %d ",
491 edac_layer_name[mci->layers[i].type], 494 edac_layer_name[mci->layers[i].type],
492 dimm->location[i]); 495 dimm->location[i]);
493 } 496 }
494 497
495 return p - data; 498 return p - data;
496 } 499 }
497 500
498 static ssize_t dimmdev_label_show(struct device *dev, 501 static ssize_t dimmdev_label_show(struct device *dev,
499 struct device_attribute *mattr, char *data) 502 struct device_attribute *mattr, char *data)
500 { 503 {
501 struct dimm_info *dimm = to_dimm(dev); 504 struct dimm_info *dimm = to_dimm(dev);
502 505
503 /* if field has not been initialized, there is nothing to send */ 506 /* if field has not been initialized, there is nothing to send */
504 if (!dimm->label[0]) 507 if (!dimm->label[0])
505 return 0; 508 return 0;
506 509
507 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", dimm->label); 510 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", dimm->label);
508 } 511 }
509 512
510 static ssize_t dimmdev_label_store(struct device *dev, 513 static ssize_t dimmdev_label_store(struct device *dev,
511 struct device_attribute *mattr, 514 struct device_attribute *mattr,
512 const char *data, 515 const char *data,
513 size_t count) 516 size_t count)
514 { 517 {
515 struct dimm_info *dimm = to_dimm(dev); 518 struct dimm_info *dimm = to_dimm(dev);
516 519
517 ssize_t max_size = 0; 520 ssize_t max_size = 0;
518 521
519 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); 522 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
520 strncpy(dimm->label, data, max_size); 523 strncpy(dimm->label, data, max_size);
521 dimm->label[max_size] = '\0'; 524 dimm->label[max_size] = '\0';
522 525
523 return max_size; 526 return max_size;
524 } 527 }
525 528
526 static ssize_t dimmdev_size_show(struct device *dev, 529 static ssize_t dimmdev_size_show(struct device *dev,
527 struct device_attribute *mattr, char *data) 530 struct device_attribute *mattr, char *data)
528 { 531 {
529 struct dimm_info *dimm = to_dimm(dev); 532 struct dimm_info *dimm = to_dimm(dev);
530 533
531 return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages)); 534 return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages));
532 } 535 }
533 536
534 static ssize_t dimmdev_mem_type_show(struct device *dev, 537 static ssize_t dimmdev_mem_type_show(struct device *dev,
535 struct device_attribute *mattr, char *data) 538 struct device_attribute *mattr, char *data)
536 { 539 {
537 struct dimm_info *dimm = to_dimm(dev); 540 struct dimm_info *dimm = to_dimm(dev);
538 541
539 return sprintf(data, "%s\n", mem_types[dimm->mtype]); 542 return sprintf(data, "%s\n", mem_types[dimm->mtype]);
540 } 543 }
541 544
542 static ssize_t dimmdev_dev_type_show(struct device *dev, 545 static ssize_t dimmdev_dev_type_show(struct device *dev,
543 struct device_attribute *mattr, char *data) 546 struct device_attribute *mattr, char *data)
544 { 547 {
545 struct dimm_info *dimm = to_dimm(dev); 548 struct dimm_info *dimm = to_dimm(dev);
546 549
547 return sprintf(data, "%s\n", dev_types[dimm->dtype]); 550 return sprintf(data, "%s\n", dev_types[dimm->dtype]);
548 } 551 }
549 552
550 static ssize_t dimmdev_edac_mode_show(struct device *dev, 553 static ssize_t dimmdev_edac_mode_show(struct device *dev,
551 struct device_attribute *mattr, 554 struct device_attribute *mattr,
552 char *data) 555 char *data)
553 { 556 {
554 struct dimm_info *dimm = to_dimm(dev); 557 struct dimm_info *dimm = to_dimm(dev);
555 558
556 return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]); 559 return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]);
557 } 560 }
558 561
559 /* dimm/rank attribute files */ 562 /* dimm/rank attribute files */
560 static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR, 563 static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR,
561 dimmdev_label_show, dimmdev_label_store); 564 dimmdev_label_show, dimmdev_label_store);
562 static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL); 565 static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL);
563 static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL); 566 static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL);
564 static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL); 567 static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL);
565 static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL); 568 static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL);
566 static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL); 569 static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL);
567 570
568 /* attributes of the dimm<id>/rank<id> object */ 571 /* attributes of the dimm<id>/rank<id> object */
569 static struct attribute *dimm_attrs[] = { 572 static struct attribute *dimm_attrs[] = {
570 &dev_attr_dimm_label.attr, 573 &dev_attr_dimm_label.attr,
571 &dev_attr_dimm_location.attr, 574 &dev_attr_dimm_location.attr,
572 &dev_attr_size.attr, 575 &dev_attr_size.attr,
573 &dev_attr_dimm_mem_type.attr, 576 &dev_attr_dimm_mem_type.attr,
574 &dev_attr_dimm_dev_type.attr, 577 &dev_attr_dimm_dev_type.attr,
575 &dev_attr_dimm_edac_mode.attr, 578 &dev_attr_dimm_edac_mode.attr,
576 NULL, 579 NULL,
577 }; 580 };
578 581
579 static struct attribute_group dimm_attr_grp = { 582 static struct attribute_group dimm_attr_grp = {
580 .attrs = dimm_attrs, 583 .attrs = dimm_attrs,
581 }; 584 };
582 585
583 static const struct attribute_group *dimm_attr_groups[] = { 586 static const struct attribute_group *dimm_attr_groups[] = {
584 &dimm_attr_grp, 587 &dimm_attr_grp,
585 NULL 588 NULL
586 }; 589 };
587 590
588 static void dimm_attr_release(struct device *device) 591 static void dimm_attr_release(struct device *dev)
589 { 592 {
590 debugf1("Releasing dimm device %s\n", dev_name(device)); 593 struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);
594
595 debugf1("Releasing dimm device %s\n", dev_name(dev));
596 kfree(dimm);
591 } 597 }
592 598
593 static struct device_type dimm_attr_type = { 599 static struct device_type dimm_attr_type = {
594 .groups = dimm_attr_groups, 600 .groups = dimm_attr_groups,
595 .release = dimm_attr_release, 601 .release = dimm_attr_release,
596 }; 602 };
597 603
598 /* Create a DIMM object under specifed memory controller device */ 604 /* Create a DIMM object under specifed memory controller device */
599 static int edac_create_dimm_object(struct mem_ctl_info *mci, 605 static int edac_create_dimm_object(struct mem_ctl_info *mci,
600 struct dimm_info *dimm, 606 struct dimm_info *dimm,
601 int index) 607 int index)
602 { 608 {
603 int err; 609 int err;
604 dimm->mci = mci; 610 dimm->mci = mci;
605 611
606 dimm->dev.type = &dimm_attr_type; 612 dimm->dev.type = &dimm_attr_type;
607 dimm->dev.bus = &mci->bus; 613 dimm->dev.bus = &mci->bus;
608 device_initialize(&dimm->dev); 614 device_initialize(&dimm->dev);
609 615
610 dimm->dev.parent = &mci->dev; 616 dimm->dev.parent = &mci->dev;
611 if (mci->mem_is_per_rank) 617 if (mci->mem_is_per_rank)
612 dev_set_name(&dimm->dev, "rank%d", index); 618 dev_set_name(&dimm->dev, "rank%d", index);
613 else 619 else
614 dev_set_name(&dimm->dev, "dimm%d", index); 620 dev_set_name(&dimm->dev, "dimm%d", index);
615 dev_set_drvdata(&dimm->dev, dimm); 621 dev_set_drvdata(&dimm->dev, dimm);
616 pm_runtime_forbid(&mci->dev); 622 pm_runtime_forbid(&mci->dev);
617 623
618 err = device_add(&dimm->dev); 624 err = device_add(&dimm->dev);
619 625
620 debugf0("%s(): creating rank/dimm device %s\n", __func__, 626 debugf0("%s(): creating rank/dimm device %s\n", __func__,
621 dev_name(&dimm->dev)); 627 dev_name(&dimm->dev));
622 628
623 return err; 629 return err;
624 } 630 }
625 631
626 /* 632 /*
627 * Memory controller device 633 * Memory controller device
628 */ 634 */
629 635
630 #define to_mci(k) container_of(k, struct mem_ctl_info, dev) 636 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
631 637
632 static ssize_t mci_reset_counters_store(struct device *dev, 638 static ssize_t mci_reset_counters_store(struct device *dev,
633 struct device_attribute *mattr, 639 struct device_attribute *mattr,
634 const char *data, size_t count) 640 const char *data, size_t count)
635 { 641 {
636 struct mem_ctl_info *mci = to_mci(dev); 642 struct mem_ctl_info *mci = to_mci(dev);
637 int cnt, row, chan, i; 643 int cnt, row, chan, i;
638 mci->ue_mc = 0; 644 mci->ue_mc = 0;
639 mci->ce_mc = 0; 645 mci->ce_mc = 0;
640 mci->ue_noinfo_count = 0; 646 mci->ue_noinfo_count = 0;
641 mci->ce_noinfo_count = 0; 647 mci->ce_noinfo_count = 0;
642 648
643 for (row = 0; row < mci->nr_csrows; row++) { 649 for (row = 0; row < mci->nr_csrows; row++) {
644 struct csrow_info *ri = &mci->csrows[row]; 650 struct csrow_info *ri = mci->csrows[row];
645 651
646 ri->ue_count = 0; 652 ri->ue_count = 0;
647 ri->ce_count = 0; 653 ri->ce_count = 0;
648 654
649 for (chan = 0; chan < ri->nr_channels; chan++) 655 for (chan = 0; chan < ri->nr_channels; chan++)
650 ri->channels[chan].ce_count = 0; 656 ri->channels[chan]->ce_count = 0;
651 } 657 }
652 658
653 cnt = 1; 659 cnt = 1;
654 for (i = 0; i < mci->n_layers; i++) { 660 for (i = 0; i < mci->n_layers; i++) {
655 cnt *= mci->layers[i].size; 661 cnt *= mci->layers[i].size;
656 memset(mci->ce_per_layer[i], 0, cnt * sizeof(u32)); 662 memset(mci->ce_per_layer[i], 0, cnt * sizeof(u32));
657 memset(mci->ue_per_layer[i], 0, cnt * sizeof(u32)); 663 memset(mci->ue_per_layer[i], 0, cnt * sizeof(u32));
658 } 664 }
659 665
660 mci->start_time = jiffies; 666 mci->start_time = jiffies;
661 return count; 667 return count;
662 } 668 }
663 669
664 /* Memory scrubbing interface: 670 /* Memory scrubbing interface:
665 * 671 *
666 * A MC driver can limit the scrubbing bandwidth based on the CPU type. 672 * A MC driver can limit the scrubbing bandwidth based on the CPU type.
667 * Therefore, ->set_sdram_scrub_rate should be made to return the actual 673 * Therefore, ->set_sdram_scrub_rate should be made to return the actual
668 * bandwidth that is accepted or 0 when scrubbing is to be disabled. 674 * bandwidth that is accepted or 0 when scrubbing is to be disabled.
669 * 675 *
670 * Negative value still means that an error has occurred while setting 676 * Negative value still means that an error has occurred while setting
671 * the scrub rate. 677 * the scrub rate.
672 */ 678 */
673 static ssize_t mci_sdram_scrub_rate_store(struct device *dev, 679 static ssize_t mci_sdram_scrub_rate_store(struct device *dev,
674 struct device_attribute *mattr, 680 struct device_attribute *mattr,
675 const char *data, size_t count) 681 const char *data, size_t count)
676 { 682 {
677 struct mem_ctl_info *mci = to_mci(dev); 683 struct mem_ctl_info *mci = to_mci(dev);
678 unsigned long bandwidth = 0; 684 unsigned long bandwidth = 0;
679 int new_bw = 0; 685 int new_bw = 0;
680 686
681 if (!mci->set_sdram_scrub_rate) 687 if (!mci->set_sdram_scrub_rate)
682 return -ENODEV; 688 return -ENODEV;
683 689
684 if (strict_strtoul(data, 10, &bandwidth) < 0) 690 if (strict_strtoul(data, 10, &bandwidth) < 0)
685 return -EINVAL; 691 return -EINVAL;
686 692
687 new_bw = mci->set_sdram_scrub_rate(mci, bandwidth); 693 new_bw = mci->set_sdram_scrub_rate(mci, bandwidth);
688 if (new_bw < 0) { 694 if (new_bw < 0) {
689 edac_printk(KERN_WARNING, EDAC_MC, 695 edac_printk(KERN_WARNING, EDAC_MC,
690 "Error setting scrub rate to: %lu\n", bandwidth); 696 "Error setting scrub rate to: %lu\n", bandwidth);
691 return -EINVAL; 697 return -EINVAL;
692 } 698 }
693 699
694 return count; 700 return count;
695 } 701 }
696 702
697 /* 703 /*
698 * ->get_sdram_scrub_rate() return value semantics same as above. 704 * ->get_sdram_scrub_rate() return value semantics same as above.
699 */ 705 */
700 static ssize_t mci_sdram_scrub_rate_show(struct device *dev, 706 static ssize_t mci_sdram_scrub_rate_show(struct device *dev,
701 struct device_attribute *mattr, 707 struct device_attribute *mattr,
702 char *data) 708 char *data)
703 { 709 {
704 struct mem_ctl_info *mci = to_mci(dev); 710 struct mem_ctl_info *mci = to_mci(dev);
705 int bandwidth = 0; 711 int bandwidth = 0;
706 712
707 if (!mci->get_sdram_scrub_rate) 713 if (!mci->get_sdram_scrub_rate)
708 return -ENODEV; 714 return -ENODEV;
709 715
710 bandwidth = mci->get_sdram_scrub_rate(mci); 716 bandwidth = mci->get_sdram_scrub_rate(mci);
711 if (bandwidth < 0) { 717 if (bandwidth < 0) {
712 edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n"); 718 edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
713 return bandwidth; 719 return bandwidth;
714 } 720 }
715 721
716 return sprintf(data, "%d\n", bandwidth); 722 return sprintf(data, "%d\n", bandwidth);
717 } 723 }
718 724
719 /* default attribute files for the MCI object */ 725 /* default attribute files for the MCI object */
720 static ssize_t mci_ue_count_show(struct device *dev, 726 static ssize_t mci_ue_count_show(struct device *dev,
721 struct device_attribute *mattr, 727 struct device_attribute *mattr,
722 char *data) 728 char *data)
723 { 729 {
724 struct mem_ctl_info *mci = to_mci(dev); 730 struct mem_ctl_info *mci = to_mci(dev);
725 731
726 return sprintf(data, "%d\n", mci->ue_mc); 732 return sprintf(data, "%d\n", mci->ue_mc);
727 } 733 }
728 734
729 static ssize_t mci_ce_count_show(struct device *dev, 735 static ssize_t mci_ce_count_show(struct device *dev,
730 struct device_attribute *mattr, 736 struct device_attribute *mattr,
731 char *data) 737 char *data)
732 { 738 {
733 struct mem_ctl_info *mci = to_mci(dev); 739 struct mem_ctl_info *mci = to_mci(dev);
734 740
735 return sprintf(data, "%d\n", mci->ce_mc); 741 return sprintf(data, "%d\n", mci->ce_mc);
736 } 742 }
737 743
738 static ssize_t mci_ce_noinfo_show(struct device *dev, 744 static ssize_t mci_ce_noinfo_show(struct device *dev,
739 struct device_attribute *mattr, 745 struct device_attribute *mattr,
740 char *data) 746 char *data)
741 { 747 {
742 struct mem_ctl_info *mci = to_mci(dev); 748 struct mem_ctl_info *mci = to_mci(dev);
743 749
744 return sprintf(data, "%d\n", mci->ce_noinfo_count); 750 return sprintf(data, "%d\n", mci->ce_noinfo_count);
745 } 751 }
746 752
747 static ssize_t mci_ue_noinfo_show(struct device *dev, 753 static ssize_t mci_ue_noinfo_show(struct device *dev,
748 struct device_attribute *mattr, 754 struct device_attribute *mattr,
749 char *data) 755 char *data)
750 { 756 {
751 struct mem_ctl_info *mci = to_mci(dev); 757 struct mem_ctl_info *mci = to_mci(dev);
752 758
753 return sprintf(data, "%d\n", mci->ue_noinfo_count); 759 return sprintf(data, "%d\n", mci->ue_noinfo_count);
754 } 760 }
755 761
756 static ssize_t mci_seconds_show(struct device *dev, 762 static ssize_t mci_seconds_show(struct device *dev,
757 struct device_attribute *mattr, 763 struct device_attribute *mattr,
758 char *data) 764 char *data)
759 { 765 {
760 struct mem_ctl_info *mci = to_mci(dev); 766 struct mem_ctl_info *mci = to_mci(dev);
761 767
762 return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ); 768 return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
763 } 769 }
764 770
765 static ssize_t mci_ctl_name_show(struct device *dev, 771 static ssize_t mci_ctl_name_show(struct device *dev,
766 struct device_attribute *mattr, 772 struct device_attribute *mattr,
767 char *data) 773 char *data)
768 { 774 {
769 struct mem_ctl_info *mci = to_mci(dev); 775 struct mem_ctl_info *mci = to_mci(dev);
770 776
771 return sprintf(data, "%s\n", mci->ctl_name); 777 return sprintf(data, "%s\n", mci->ctl_name);
772 } 778 }
773 779
774 static ssize_t mci_size_mb_show(struct device *dev, 780 static ssize_t mci_size_mb_show(struct device *dev,
775 struct device_attribute *mattr, 781 struct device_attribute *mattr,
776 char *data) 782 char *data)
777 { 783 {
778 struct mem_ctl_info *mci = to_mci(dev); 784 struct mem_ctl_info *mci = to_mci(dev);
779 int total_pages = 0, csrow_idx, j; 785 int total_pages = 0, csrow_idx, j;
780 786
781 for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) { 787 for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
782 struct csrow_info *csrow = &mci->csrows[csrow_idx]; 788 struct csrow_info *csrow = mci->csrows[csrow_idx];
783 789
784 for (j = 0; j < csrow->nr_channels; j++) { 790 for (j = 0; j < csrow->nr_channels; j++) {
785 struct dimm_info *dimm = csrow->channels[j].dimm; 791 struct dimm_info *dimm = csrow->channels[j]->dimm;
786 792
787 total_pages += dimm->nr_pages; 793 total_pages += dimm->nr_pages;
788 } 794 }
789 } 795 }
790 796
791 return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages)); 797 return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
792 } 798 }
793 799
794 static ssize_t mci_max_location_show(struct device *dev, 800 static ssize_t mci_max_location_show(struct device *dev,
795 struct device_attribute *mattr, 801 struct device_attribute *mattr,
796 char *data) 802 char *data)
797 { 803 {
798 struct mem_ctl_info *mci = to_mci(dev); 804 struct mem_ctl_info *mci = to_mci(dev);
799 int i; 805 int i;
800 char *p = data; 806 char *p = data;
801 807
802 for (i = 0; i < mci->n_layers; i++) { 808 for (i = 0; i < mci->n_layers; i++) {
803 p += sprintf(p, "%s %d ", 809 p += sprintf(p, "%s %d ",
804 edac_layer_name[mci->layers[i].type], 810 edac_layer_name[mci->layers[i].type],
805 mci->layers[i].size - 1); 811 mci->layers[i].size - 1);
806 } 812 }
807 813
808 return p - data; 814 return p - data;
809 } 815 }
810 816
811 #ifdef CONFIG_EDAC_DEBUG 817 #ifdef CONFIG_EDAC_DEBUG
812 static ssize_t edac_fake_inject_write(struct file *file, 818 static ssize_t edac_fake_inject_write(struct file *file,
813 const char __user *data, 819 const char __user *data,
814 size_t count, loff_t *ppos) 820 size_t count, loff_t *ppos)
815 { 821 {
816 struct device *dev = file->private_data; 822 struct device *dev = file->private_data;
817 struct mem_ctl_info *mci = to_mci(dev); 823 struct mem_ctl_info *mci = to_mci(dev);
818 static enum hw_event_mc_err_type type; 824 static enum hw_event_mc_err_type type;
819 825
820 type = mci->fake_inject_ue ? HW_EVENT_ERR_UNCORRECTED 826 type = mci->fake_inject_ue ? HW_EVENT_ERR_UNCORRECTED
821 : HW_EVENT_ERR_CORRECTED; 827 : HW_EVENT_ERR_CORRECTED;
822 828
823 printk(KERN_DEBUG 829 printk(KERN_DEBUG
824 "Generating a %s fake error to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n", 830 "Generating a %s fake error to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n",
825 (type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE", 831 (type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE",
826 mci->fake_inject_layer[0], 832 mci->fake_inject_layer[0],
827 mci->fake_inject_layer[1], 833 mci->fake_inject_layer[1],
828 mci->fake_inject_layer[2] 834 mci->fake_inject_layer[2]
829 ); 835 );
830 edac_mc_handle_error(type, mci, 0, 0, 0, 836 edac_mc_handle_error(type, mci, 0, 0, 0,
831 mci->fake_inject_layer[0], 837 mci->fake_inject_layer[0],
832 mci->fake_inject_layer[1], 838 mci->fake_inject_layer[1],
833 mci->fake_inject_layer[2], 839 mci->fake_inject_layer[2],
834 "FAKE ERROR", "for EDAC testing only", NULL); 840 "FAKE ERROR", "for EDAC testing only", NULL);
835 841
836 return count; 842 return count;
837 } 843 }
838 844
839 static int debugfs_open(struct inode *inode, struct file *file) 845 static int debugfs_open(struct inode *inode, struct file *file)
840 { 846 {
841 file->private_data = inode->i_private; 847 file->private_data = inode->i_private;
842 return 0; 848 return 0;
843 } 849 }
844 850
845 static const struct file_operations debug_fake_inject_fops = { 851 static const struct file_operations debug_fake_inject_fops = {
846 .open = debugfs_open, 852 .open = debugfs_open,
847 .write = edac_fake_inject_write, 853 .write = edac_fake_inject_write,
848 .llseek = generic_file_llseek, 854 .llseek = generic_file_llseek,
849 }; 855 };
850 #endif 856 #endif
851 857
852 /* default Control file */ 858 /* default Control file */
853 DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store); 859 DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
854 860
855 /* default Attribute files */ 861 /* default Attribute files */
856 DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL); 862 DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
857 DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL); 863 DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
858 DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL); 864 DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
859 DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL); 865 DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
860 DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL); 866 DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
861 DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL); 867 DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
862 DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL); 868 DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
863 DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL); 869 DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
864 870
865 /* memory scrubber attribute file */ 871 /* memory scrubber attribute file */
866 DEVICE_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show, 872 DEVICE_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show,
867 mci_sdram_scrub_rate_store); 873 mci_sdram_scrub_rate_store);
868 874
869 static struct attribute *mci_attrs[] = { 875 static struct attribute *mci_attrs[] = {
870 &dev_attr_reset_counters.attr, 876 &dev_attr_reset_counters.attr,
871 &dev_attr_mc_name.attr, 877 &dev_attr_mc_name.attr,
872 &dev_attr_size_mb.attr, 878 &dev_attr_size_mb.attr,
873 &dev_attr_seconds_since_reset.attr, 879 &dev_attr_seconds_since_reset.attr,
874 &dev_attr_ue_noinfo_count.attr, 880 &dev_attr_ue_noinfo_count.attr,
875 &dev_attr_ce_noinfo_count.attr, 881 &dev_attr_ce_noinfo_count.attr,
876 &dev_attr_ue_count.attr, 882 &dev_attr_ue_count.attr,
877 &dev_attr_ce_count.attr, 883 &dev_attr_ce_count.attr,
878 &dev_attr_sdram_scrub_rate.attr, 884 &dev_attr_sdram_scrub_rate.attr,
879 &dev_attr_max_location.attr, 885 &dev_attr_max_location.attr,
880 NULL 886 NULL
881 }; 887 };
882 888
883 static struct attribute_group mci_attr_grp = { 889 static struct attribute_group mci_attr_grp = {
884 .attrs = mci_attrs, 890 .attrs = mci_attrs,
885 }; 891 };
886 892
887 static const struct attribute_group *mci_attr_groups[] = { 893 static const struct attribute_group *mci_attr_groups[] = {
888 &mci_attr_grp, 894 &mci_attr_grp,
889 NULL 895 NULL
890 }; 896 };
891 897
892 static void mci_attr_release(struct device *device) 898 static void mci_attr_release(struct device *dev)
893 { 899 {
894 debugf1("Releasing mci device %s\n", dev_name(device)); 900 struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
901
902 debugf1("Releasing csrow device %s\n", dev_name(dev));
903 kfree(mci);
895 } 904 }
896 905
897 static struct device_type mci_attr_type = { 906 static struct device_type mci_attr_type = {
898 .groups = mci_attr_groups, 907 .groups = mci_attr_groups,
899 .release = mci_attr_release, 908 .release = mci_attr_release,
900 }; 909 };
901 910
902 #ifdef CONFIG_EDAC_DEBUG 911 #ifdef CONFIG_EDAC_DEBUG
903 int edac_create_debug_nodes(struct mem_ctl_info *mci) 912 int edac_create_debug_nodes(struct mem_ctl_info *mci)
904 { 913 {
905 struct dentry *d, *parent; 914 struct dentry *d, *parent;
906 char name[80]; 915 char name[80];
907 int i; 916 int i;
908 917
909 d = debugfs_create_dir(mci->dev.kobj.name, mci->debugfs); 918 d = debugfs_create_dir(mci->dev.kobj.name, mci->debugfs);
910 if (!d) 919 if (!d)
911 return -ENOMEM; 920 return -ENOMEM;
912 parent = d; 921 parent = d;
913 922
914 for (i = 0; i < mci->n_layers; i++) { 923 for (i = 0; i < mci->n_layers; i++) {
915 sprintf(name, "fake_inject_%s", 924 sprintf(name, "fake_inject_%s",
916 edac_layer_name[mci->layers[i].type]); 925 edac_layer_name[mci->layers[i].type]);
917 d = debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent, 926 d = debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent,
918 &mci->fake_inject_layer[i]); 927 &mci->fake_inject_layer[i]);
919 if (!d) 928 if (!d)
920 goto nomem; 929 goto nomem;
921 } 930 }
922 931
923 d = debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent, 932 d = debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent,
924 &mci->fake_inject_ue); 933 &mci->fake_inject_ue);
925 if (!d) 934 if (!d)
926 goto nomem; 935 goto nomem;
927 936
928 d = debugfs_create_file("fake_inject", S_IWUSR, parent, 937 d = debugfs_create_file("fake_inject", S_IWUSR, parent,
929 &mci->dev, 938 &mci->dev,
930 &debug_fake_inject_fops); 939 &debug_fake_inject_fops);
931 if (!d) 940 if (!d)
932 goto nomem; 941 goto nomem;
933 942
934 return 0; 943 return 0;
935 nomem: 944 nomem:
936 debugfs_remove(mci->debugfs); 945 debugfs_remove(mci->debugfs);
937 return -ENOMEM; 946 return -ENOMEM;
938 } 947 }
939 #endif 948 #endif
940 949
941 /* 950 /*
942 * Create a new Memory Controller kobject instance, 951 * Create a new Memory Controller kobject instance,
943 * mc<id> under the 'mc' directory 952 * mc<id> under the 'mc' directory
944 * 953 *
945 * Return: 954 * Return:
946 * 0 Success 955 * 0 Success
947 * !0 Failure 956 * !0 Failure
948 */ 957 */
949 int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) 958 int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
950 { 959 {
951 int i, err; 960 int i, err;
952 961
953 debugf0("%s() idx=%d\n", __func__, mci->mc_idx); 962 /*
963 * The memory controller needs its own bus, in order to avoid
964 * namespace conflicts at /sys/bus/edac.
965 */
966 mci->bus.name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
967 if (!mci->bus.name)
968 return -ENOMEM;
969 debugf0("creating bus %s\n",mci->bus.name);
970 err = bus_register(&mci->bus);
971 if (err < 0)
972 return err;
954 973
955 /* get the /sys/devices/system/edac subsys reference */ 974 /* get the /sys/devices/system/edac subsys reference */
956
957 mci->dev.type = &mci_attr_type; 975 mci->dev.type = &mci_attr_type;
958 device_initialize(&mci->dev); 976 device_initialize(&mci->dev);
959 977
960 mci->dev.parent = &mci_pdev; 978 mci->dev.parent = mci_pdev;
961 mci->dev.bus = &mci->bus; 979 mci->dev.bus = &mci->bus;
962 dev_set_name(&mci->dev, "mc%d", mci->mc_idx); 980 dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
963 dev_set_drvdata(&mci->dev, mci); 981 dev_set_drvdata(&mci->dev, mci);
964 pm_runtime_forbid(&mci->dev); 982 pm_runtime_forbid(&mci->dev);
965 983
966 /*
967 * The memory controller needs its own bus, in order to avoid
968 * namespace conflicts at /sys/bus/edac.
969 */
970 debugf0("creating bus %s\n",mci->bus.name);
971 mci->bus.name = kstrdup(dev_name(&mci->dev), GFP_KERNEL);
972 err = bus_register(&mci->bus);
973 if (err < 0)
974 return err;
975
976 debugf0("%s(): creating device %s\n", __func__, 984 debugf0("%s(): creating device %s\n", __func__,
977 dev_name(&mci->dev)); 985 dev_name(&mci->dev));
978 err = device_add(&mci->dev); 986 err = device_add(&mci->dev);
979 if (err < 0) { 987 if (err < 0) {
980 bus_unregister(&mci->bus); 988 bus_unregister(&mci->bus);
981 kfree(mci->bus.name); 989 kfree(mci->bus.name);
982 return err; 990 return err;
983 } 991 }
984 992
985 /* 993 /*
986 * Create the dimm/rank devices 994 * Create the dimm/rank devices
987 */ 995 */
988 for (i = 0; i < mci->tot_dimms; i++) { 996 for (i = 0; i < mci->tot_dimms; i++) {
989 struct dimm_info *dimm = &mci->dimms[i]; 997 struct dimm_info *dimm = mci->dimms[i];
990 /* Only expose populated DIMMs */ 998 /* Only expose populated DIMMs */
991 if (dimm->nr_pages == 0) 999 if (dimm->nr_pages == 0)
992 continue; 1000 continue;
993 #ifdef CONFIG_EDAC_DEBUG 1001 #ifdef CONFIG_EDAC_DEBUG
994 debugf1("%s creating dimm%d, located at ", 1002 debugf1("%s creating dimm%d, located at ",
995 __func__, i); 1003 __func__, i);
996 if (edac_debug_level >= 1) { 1004 if (edac_debug_level >= 1) {
997 int lay; 1005 int lay;
998 for (lay = 0; lay < mci->n_layers; lay++) 1006 for (lay = 0; lay < mci->n_layers; lay++)
999 printk(KERN_CONT "%s %d ", 1007 printk(KERN_CONT "%s %d ",
1000 edac_layer_name[mci->layers[lay].type], 1008 edac_layer_name[mci->layers[lay].type],
1001 dimm->location[lay]); 1009 dimm->location[lay]);
1002 printk(KERN_CONT "\n"); 1010 printk(KERN_CONT "\n");
1003 } 1011 }
1004 #endif 1012 #endif
1005 err = edac_create_dimm_object(mci, dimm, i); 1013 err = edac_create_dimm_object(mci, dimm, i);
1006 if (err) { 1014 if (err) {
1007 debugf1("%s() failure: create dimm %d obj\n", 1015 debugf1("%s() failure: create dimm %d obj\n",
1008 __func__, i); 1016 __func__, i);
1009 goto fail; 1017 goto fail;
1010 } 1018 }
1011 } 1019 }
1012 1020
1013 #ifdef CONFIG_EDAC_LEGACY_SYSFS 1021 #ifdef CONFIG_EDAC_LEGACY_SYSFS
1014 err = edac_create_csrow_objects(mci); 1022 err = edac_create_csrow_objects(mci);
1015 if (err < 0) 1023 if (err < 0)
1016 goto fail; 1024 goto fail;
1017 #endif 1025 #endif
1018 1026
1019 #ifdef CONFIG_EDAC_DEBUG 1027 #ifdef CONFIG_EDAC_DEBUG
1020 edac_create_debug_nodes(mci); 1028 edac_create_debug_nodes(mci);
1021 #endif 1029 #endif
1022 return 0; 1030 return 0;
1023 1031
1024 fail: 1032 fail:
1025 for (i--; i >= 0; i--) { 1033 for (i--; i >= 0; i--) {
1026 struct dimm_info *dimm = &mci->dimms[i]; 1034 struct dimm_info *dimm = mci->dimms[i];
1027 if (dimm->nr_pages == 0) 1035 if (dimm->nr_pages == 0)
1028 continue; 1036 continue;
1029 put_device(&dimm->dev); 1037 put_device(&dimm->dev);
1030 device_del(&dimm->dev); 1038 device_del(&dimm->dev);
1031 } 1039 }
1032 put_device(&mci->dev); 1040 put_device(&mci->dev);
1033 device_del(&mci->dev); 1041 device_del(&mci->dev);
1034 bus_unregister(&mci->bus); 1042 bus_unregister(&mci->bus);
1035 kfree(mci->bus.name); 1043 kfree(mci->bus.name);
1036 return err; 1044 return err;
1037 } 1045 }
1038 1046
1039 /* 1047 /*
1040 * remove a Memory Controller instance 1048 * remove a Memory Controller instance
1041 */ 1049 */
1042 void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) 1050 void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
1043 { 1051 {
1044 int i; 1052 int i;
1045 1053
1046 debugf0("%s()\n", __func__); 1054 debugf0("%s()\n", __func__);
1047 1055
1048 #ifdef CONFIG_EDAC_DEBUG 1056 #ifdef CONFIG_EDAC_DEBUG
1049 debugfs_remove(mci->debugfs); 1057 debugfs_remove(mci->debugfs);
1050 #endif 1058 #endif
1051 #ifdef CONFIG_EDAC_LEGACY_SYSFS 1059 #ifdef CONFIG_EDAC_LEGACY_SYSFS
1052 edac_delete_csrow_objects(mci); 1060 edac_delete_csrow_objects(mci);
1053 #endif 1061 #endif
1054 1062
1055 for (i = 0; i < mci->tot_dimms; i++) { 1063 for (i = 0; i < mci->tot_dimms; i++) {
1056 struct dimm_info *dimm = &mci->dimms[i]; 1064 struct dimm_info *dimm = mci->dimms[i];
1057 if (dimm->nr_pages == 0) 1065 if (dimm->nr_pages == 0)
1058 continue; 1066 continue;
1059 debugf0("%s(): removing device %s\n", __func__, 1067 debugf0("%s(): removing device %s\n", __func__,
1060 dev_name(&dimm->dev)); 1068 dev_name(&dimm->dev));
1061 put_device(&dimm->dev); 1069 put_device(&dimm->dev);
1062 device_del(&dimm->dev); 1070 device_del(&dimm->dev);
1063 } 1071 }
1064 } 1072 }
1065 1073
1066 void edac_unregister_sysfs(struct mem_ctl_info *mci) 1074 void edac_unregister_sysfs(struct mem_ctl_info *mci)
1067 { 1075 {
1068 debugf1("Unregistering device %s\n", dev_name(&mci->dev)); 1076 debugf1("Unregistering device %s\n", dev_name(&mci->dev));
1069 put_device(&mci->dev); 1077 put_device(&mci->dev);
1070 device_del(&mci->dev); 1078 device_del(&mci->dev);
1071 bus_unregister(&mci->bus); 1079 bus_unregister(&mci->bus);
1072 kfree(mci->bus.name); 1080 kfree(mci->bus.name);
1073 } 1081 }
1074 1082
1075 static void mc_attr_release(struct device *device) 1083 static void mc_attr_release(struct device *dev)
1076 { 1084 {
1077 debugf1("Releasing device %s\n", dev_name(device)); 1085 /*
1086 * There's no container structure here, as this is just the mci
1087 * parent device, used to create the /sys/devices/mc sysfs node.
1088 * So, there are no attributes on it.
1089 */
1090 debugf1("Releasing device %s\n", dev_name(dev));
1091 kfree(dev);
1078 } 1092 }
1079 1093
1080 static struct device_type mc_attr_type = { 1094 static struct device_type mc_attr_type = {
1081 .release = mc_attr_release, 1095 .release = mc_attr_release,
1082 }; 1096 };
1083 /* 1097 /*
1084 * Init/exit code for the module. Basically, creates/removes /sys/class/rc 1098 * Init/exit code for the module. Basically, creates/removes /sys/class/rc
1085 */ 1099 */
1086 int __init edac_mc_sysfs_init(void) 1100 int __init edac_mc_sysfs_init(void)
1087 { 1101 {
1088 struct bus_type *edac_subsys; 1102 struct bus_type *edac_subsys;
1089 int err; 1103 int err;
1090 1104
1091 /* get the /sys/devices/system/edac subsys reference */ 1105 /* get the /sys/devices/system/edac subsys reference */
1092 edac_subsys = edac_get_sysfs_subsys(); 1106 edac_subsys = edac_get_sysfs_subsys();
1093 if (edac_subsys == NULL) { 1107 if (edac_subsys == NULL) {
1094 debugf1("%s() no edac_subsys\n", __func__); 1108 debugf1("%s() no edac_subsys\n", __func__);
1095 return -EINVAL; 1109 return -EINVAL;
1096 } 1110 }
1097 1111
1098 mci_pdev.bus = edac_subsys; 1112 mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL);
1099 mci_pdev.type = &mc_attr_type;
1100 device_initialize(&mci_pdev);
1101 dev_set_name(&mci_pdev, "mc");
1102 1113
1103 err = device_add(&mci_pdev); 1114 mci_pdev->bus = edac_subsys;
1115 mci_pdev->type = &mc_attr_type;
1116 device_initialize(mci_pdev);
1117 dev_set_name(mci_pdev, "mc");
1118
1119 err = device_add(mci_pdev);
1104 if (err < 0) 1120 if (err < 0)
drivers/edac/i3000_edac.c
1 /* 1 /*
2 * Intel 3000/3010 Memory Controller kernel module 2 * Intel 3000/3010 Memory Controller kernel module
3 * Copyright (C) 2007 Akamai Technologies, Inc. 3 * Copyright (C) 2007 Akamai Technologies, Inc.
4 * Shamelessly copied from: 4 * Shamelessly copied from:
5 * Intel D82875P Memory Controller kernel module 5 * Intel D82875P Memory Controller kernel module
6 * (C) 2003 Linux Networx (http://lnxi.com) 6 * (C) 2003 Linux Networx (http://lnxi.com)
7 * 7 *
8 * This file may be distributed under the terms of the 8 * This file may be distributed under the terms of the
9 * GNU General Public License. 9 * GNU General Public License.
10 */ 10 */
11 11
12 #include <linux/module.h> 12 #include <linux/module.h>
13 #include <linux/init.h> 13 #include <linux/init.h>
14 #include <linux/pci.h> 14 #include <linux/pci.h>
15 #include <linux/pci_ids.h> 15 #include <linux/pci_ids.h>
16 #include <linux/edac.h> 16 #include <linux/edac.h>
17 #include "edac_core.h" 17 #include "edac_core.h"
18 18
19 #define I3000_REVISION "1.1" 19 #define I3000_REVISION "1.1"
20 20
21 #define EDAC_MOD_STR "i3000_edac" 21 #define EDAC_MOD_STR "i3000_edac"
22 22
23 #define I3000_RANKS 8 23 #define I3000_RANKS 8
24 #define I3000_RANKS_PER_CHANNEL 4 24 #define I3000_RANKS_PER_CHANNEL 4
25 #define I3000_CHANNELS 2 25 #define I3000_CHANNELS 2
26 26
27 /* Intel 3000 register addresses - device 0 function 0 - DRAM Controller */ 27 /* Intel 3000 register addresses - device 0 function 0 - DRAM Controller */
28 28
29 #define I3000_MCHBAR 0x44 /* MCH Memory Mapped Register BAR */ 29 #define I3000_MCHBAR 0x44 /* MCH Memory Mapped Register BAR */
30 #define I3000_MCHBAR_MASK 0xffffc000 30 #define I3000_MCHBAR_MASK 0xffffc000
31 #define I3000_MMR_WINDOW_SIZE 16384 31 #define I3000_MMR_WINDOW_SIZE 16384
32 32
33 #define I3000_EDEAP 0x70 /* Extended DRAM Error Address Pointer (8b) 33 #define I3000_EDEAP 0x70 /* Extended DRAM Error Address Pointer (8b)
34 * 34 *
35 * 7:1 reserved 35 * 7:1 reserved
36 * 0 bit 32 of address 36 * 0 bit 32 of address
37 */ 37 */
38 #define I3000_DEAP 0x58 /* DRAM Error Address Pointer (32b) 38 #define I3000_DEAP 0x58 /* DRAM Error Address Pointer (32b)
39 * 39 *
40 * 31:7 address 40 * 31:7 address
41 * 6:1 reserved 41 * 6:1 reserved
42 * 0 Error channel 0/1 42 * 0 Error channel 0/1
43 */ 43 */
44 #define I3000_DEAP_GRAIN (1 << 7) 44 #define I3000_DEAP_GRAIN (1 << 7)
45 45
46 /* 46 /*
47 * Helper functions to decode the DEAP/EDEAP hardware registers. 47 * Helper functions to decode the DEAP/EDEAP hardware registers.
48 * 48 *
49 * The type promotion here is deliberate; we're deriving an 49 * The type promotion here is deliberate; we're deriving an
50 * unsigned long pfn and offset from hardware regs which are u8/u32. 50 * unsigned long pfn and offset from hardware regs which are u8/u32.
51 */ 51 */
52 52
53 static inline unsigned long deap_pfn(u8 edeap, u32 deap) 53 static inline unsigned long deap_pfn(u8 edeap, u32 deap)
54 { 54 {
55 deap >>= PAGE_SHIFT; 55 deap >>= PAGE_SHIFT;
56 deap |= (edeap & 1) << (32 - PAGE_SHIFT); 56 deap |= (edeap & 1) << (32 - PAGE_SHIFT);
57 return deap; 57 return deap;
58 } 58 }
59 59
60 static inline unsigned long deap_offset(u32 deap) 60 static inline unsigned long deap_offset(u32 deap)
61 { 61 {
62 return deap & ~(I3000_DEAP_GRAIN - 1) & ~PAGE_MASK; 62 return deap & ~(I3000_DEAP_GRAIN - 1) & ~PAGE_MASK;
63 } 63 }
64 64
65 static inline int deap_channel(u32 deap) 65 static inline int deap_channel(u32 deap)
66 { 66 {
67 return deap & 1; 67 return deap & 1;
68 } 68 }
69 69
70 #define I3000_DERRSYN 0x5c /* DRAM Error Syndrome (8b) 70 #define I3000_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
71 * 71 *
72 * 7:0 DRAM ECC Syndrome 72 * 7:0 DRAM ECC Syndrome
73 */ 73 */
74 74
75 #define I3000_ERRSTS 0xc8 /* Error Status Register (16b) 75 #define I3000_ERRSTS 0xc8 /* Error Status Register (16b)
76 * 76 *
77 * 15:12 reserved 77 * 15:12 reserved
78 * 11 MCH Thermal Sensor Event 78 * 11 MCH Thermal Sensor Event
79 * for SMI/SCI/SERR 79 * for SMI/SCI/SERR
80 * 10 reserved 80 * 10 reserved
81 * 9 LOCK to non-DRAM Memory Flag (LCKF) 81 * 9 LOCK to non-DRAM Memory Flag (LCKF)
82 * 8 Received Refresh Timeout Flag (RRTOF) 82 * 8 Received Refresh Timeout Flag (RRTOF)
83 * 7:2 reserved 83 * 7:2 reserved
84 * 1 Multi-bit DRAM ECC Error Flag (DMERR) 84 * 1 Multi-bit DRAM ECC Error Flag (DMERR)
85 * 0 Single-bit DRAM ECC Error Flag (DSERR) 85 * 0 Single-bit DRAM ECC Error Flag (DSERR)
86 */ 86 */
87 #define I3000_ERRSTS_BITS 0x0b03 /* bits which indicate errors */ 87 #define I3000_ERRSTS_BITS 0x0b03 /* bits which indicate errors */
88 #define I3000_ERRSTS_UE 0x0002 88 #define I3000_ERRSTS_UE 0x0002
89 #define I3000_ERRSTS_CE 0x0001 89 #define I3000_ERRSTS_CE 0x0001
90 90
91 #define I3000_ERRCMD 0xca /* Error Command (16b) 91 #define I3000_ERRCMD 0xca /* Error Command (16b)
92 * 92 *
93 * 15:12 reserved 93 * 15:12 reserved
94 * 11 SERR on MCH Thermal Sensor Event 94 * 11 SERR on MCH Thermal Sensor Event
95 * (TSESERR) 95 * (TSESERR)
96 * 10 reserved 96 * 10 reserved
97 * 9 SERR on LOCK to non-DRAM Memory 97 * 9 SERR on LOCK to non-DRAM Memory
98 * (LCKERR) 98 * (LCKERR)
99 * 8 SERR on DRAM Refresh Timeout 99 * 8 SERR on DRAM Refresh Timeout
100 * (DRTOERR) 100 * (DRTOERR)
101 * 7:2 reserved 101 * 7:2 reserved
102 * 1 SERR Multi-Bit DRAM ECC Error 102 * 1 SERR Multi-Bit DRAM ECC Error
103 * (DMERR) 103 * (DMERR)
104 * 0 SERR on Single-Bit ECC Error 104 * 0 SERR on Single-Bit ECC Error
105 * (DSERR) 105 * (DSERR)
106 */ 106 */
107 107
108 /* Intel MMIO register space - device 0 function 0 - MMR space */ 108 /* Intel MMIO register space - device 0 function 0 - MMR space */
109 109
110 #define I3000_DRB_SHIFT 25 /* 32MiB grain */ 110 #define I3000_DRB_SHIFT 25 /* 32MiB grain */
111 111
112 #define I3000_C0DRB 0x100 /* Channel 0 DRAM Rank Boundary (8b x 4) 112 #define I3000_C0DRB 0x100 /* Channel 0 DRAM Rank Boundary (8b x 4)
113 * 113 *
114 * 7:0 Channel 0 DRAM Rank Boundary Address 114 * 7:0 Channel 0 DRAM Rank Boundary Address
115 */ 115 */
116 #define I3000_C1DRB 0x180 /* Channel 1 DRAM Rank Boundary (8b x 4) 116 #define I3000_C1DRB 0x180 /* Channel 1 DRAM Rank Boundary (8b x 4)
117 * 117 *
118 * 7:0 Channel 1 DRAM Rank Boundary Address 118 * 7:0 Channel 1 DRAM Rank Boundary Address
119 */ 119 */
120 120
121 #define I3000_C0DRA 0x108 /* Channel 0 DRAM Rank Attribute (8b x 2) 121 #define I3000_C0DRA 0x108 /* Channel 0 DRAM Rank Attribute (8b x 2)
122 * 122 *
123 * 7 reserved 123 * 7 reserved
124 * 6:4 DRAM odd Rank Attribute 124 * 6:4 DRAM odd Rank Attribute
125 * 3 reserved 125 * 3 reserved
126 * 2:0 DRAM even Rank Attribute 126 * 2:0 DRAM even Rank Attribute
127 * 127 *
128 * Each attribute defines the page 128 * Each attribute defines the page
129 * size of the corresponding rank: 129 * size of the corresponding rank:
130 * 000: unpopulated 130 * 000: unpopulated
131 * 001: reserved 131 * 001: reserved
132 * 010: 4 KB 132 * 010: 4 KB
133 * 011: 8 KB 133 * 011: 8 KB
134 * 100: 16 KB 134 * 100: 16 KB
135 * Others: reserved 135 * Others: reserved
136 */ 136 */
137 #define I3000_C1DRA 0x188 /* Channel 1 DRAM Rank Attribute (8b x 2) */ 137 #define I3000_C1DRA 0x188 /* Channel 1 DRAM Rank Attribute (8b x 2) */
138 138
139 static inline unsigned char odd_rank_attrib(unsigned char dra) 139 static inline unsigned char odd_rank_attrib(unsigned char dra)
140 { 140 {
141 return (dra & 0x70) >> 4; 141 return (dra & 0x70) >> 4;
142 } 142 }
143 143
144 static inline unsigned char even_rank_attrib(unsigned char dra) 144 static inline unsigned char even_rank_attrib(unsigned char dra)
145 { 145 {
146 return dra & 0x07; 146 return dra & 0x07;
147 } 147 }
148 148
149 #define I3000_C0DRC0 0x120 /* DRAM Controller Mode 0 (32b) 149 #define I3000_C0DRC0 0x120 /* DRAM Controller Mode 0 (32b)
150 * 150 *
151 * 31:30 reserved 151 * 31:30 reserved
152 * 29 Initialization Complete (IC) 152 * 29 Initialization Complete (IC)
153 * 28:11 reserved 153 * 28:11 reserved
154 * 10:8 Refresh Mode Select (RMS) 154 * 10:8 Refresh Mode Select (RMS)
155 * 7 reserved 155 * 7 reserved
156 * 6:4 Mode Select (SMS) 156 * 6:4 Mode Select (SMS)
157 * 3:2 reserved 157 * 3:2 reserved
158 * 1:0 DRAM Type (DT) 158 * 1:0 DRAM Type (DT)
159 */ 159 */
160 160
161 #define I3000_C0DRC1 0x124 /* DRAM Controller Mode 1 (32b) 161 #define I3000_C0DRC1 0x124 /* DRAM Controller Mode 1 (32b)
162 * 162 *
163 * 31 Enhanced Addressing Enable (ENHADE) 163 * 31 Enhanced Addressing Enable (ENHADE)
164 * 30:0 reserved 164 * 30:0 reserved
165 */ 165 */
166 166
167 enum i3000p_chips { 167 enum i3000p_chips {
168 I3000 = 0, 168 I3000 = 0,
169 }; 169 };
170 170
171 struct i3000_dev_info { 171 struct i3000_dev_info {
172 const char *ctl_name; 172 const char *ctl_name;
173 }; 173 };
174 174
175 struct i3000_error_info { 175 struct i3000_error_info {
176 u16 errsts; 176 u16 errsts;
177 u8 derrsyn; 177 u8 derrsyn;
178 u8 edeap; 178 u8 edeap;
179 u32 deap; 179 u32 deap;
180 u16 errsts2; 180 u16 errsts2;
181 }; 181 };
182 182
183 static const struct i3000_dev_info i3000_devs[] = { 183 static const struct i3000_dev_info i3000_devs[] = {
184 [I3000] = { 184 [I3000] = {
185 .ctl_name = "i3000"}, 185 .ctl_name = "i3000"},
186 }; 186 };
187 187
188 static struct pci_dev *mci_pdev; 188 static struct pci_dev *mci_pdev;
189 static int i3000_registered = 1; 189 static int i3000_registered = 1;
190 static struct edac_pci_ctl_info *i3000_pci; 190 static struct edac_pci_ctl_info *i3000_pci;
191 191
192 static void i3000_get_error_info(struct mem_ctl_info *mci, 192 static void i3000_get_error_info(struct mem_ctl_info *mci,
193 struct i3000_error_info *info) 193 struct i3000_error_info *info)
194 { 194 {
195 struct pci_dev *pdev; 195 struct pci_dev *pdev;
196 196
197 pdev = to_pci_dev(mci->pdev); 197 pdev = to_pci_dev(mci->pdev);
198 198
199 /* 199 /*
200 * This is a mess because there is no atomic way to read all the 200 * This is a mess because there is no atomic way to read all the
201 * registers at once and the registers can transition from CE being 201 * registers at once and the registers can transition from CE being
202 * overwritten by UE. 202 * overwritten by UE.
203 */ 203 */
204 pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts); 204 pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts);
205 if (!(info->errsts & I3000_ERRSTS_BITS)) 205 if (!(info->errsts & I3000_ERRSTS_BITS))
206 return; 206 return;
207 pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap); 207 pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
208 pci_read_config_dword(pdev, I3000_DEAP, &info->deap); 208 pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
209 pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn); 209 pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
210 pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts2); 210 pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts2);
211 211
212 /* 212 /*
213 * If the error is the same for both reads then the first set 213 * If the error is the same for both reads then the first set
214 * of reads is valid. If there is a change then there is a CE 214 * of reads is valid. If there is a change then there is a CE
215 * with no info and the second set of reads is valid and 215 * with no info and the second set of reads is valid and
216 * should be UE info. 216 * should be UE info.
217 */ 217 */
218 if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) { 218 if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
219 pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap); 219 pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
220 pci_read_config_dword(pdev, I3000_DEAP, &info->deap); 220 pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
221 pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn); 221 pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
222 } 222 }
223 223
224 /* 224 /*
225 * Clear any error bits. 225 * Clear any error bits.
226 * (Yes, we really clear bits by writing 1 to them.) 226 * (Yes, we really clear bits by writing 1 to them.)
227 */ 227 */
228 pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS, 228 pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
229 I3000_ERRSTS_BITS); 229 I3000_ERRSTS_BITS);
230 } 230 }
231 231
232 static int i3000_process_error_info(struct mem_ctl_info *mci, 232 static int i3000_process_error_info(struct mem_ctl_info *mci,
233 struct i3000_error_info *info, 233 struct i3000_error_info *info,
234 int handle_errors) 234 int handle_errors)
235 { 235 {
236 int row, multi_chan, channel; 236 int row, multi_chan, channel;
237 unsigned long pfn, offset; 237 unsigned long pfn, offset;
238 238
239 multi_chan = mci->csrows[0].nr_channels - 1; 239 multi_chan = mci->csrows[0]->nr_channels - 1;
240 240
241 if (!(info->errsts & I3000_ERRSTS_BITS)) 241 if (!(info->errsts & I3000_ERRSTS_BITS))
242 return 0; 242 return 0;
243 243
244 if (!handle_errors) 244 if (!handle_errors)
245 return 1; 245 return 1;
246 246
247 if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) { 247 if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
248 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 248 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
249 -1, -1, -1, 249 -1, -1, -1,
250 "UE overwrote CE", "", NULL); 250 "UE overwrote CE", "", NULL);
251 info->errsts = info->errsts2; 251 info->errsts = info->errsts2;
252 } 252 }
253 253
254 pfn = deap_pfn(info->edeap, info->deap); 254 pfn = deap_pfn(info->edeap, info->deap);
255 offset = deap_offset(info->deap); 255 offset = deap_offset(info->deap);
256 channel = deap_channel(info->deap); 256 channel = deap_channel(info->deap);
257 257
258 row = edac_mc_find_csrow_by_page(mci, pfn); 258 row = edac_mc_find_csrow_by_page(mci, pfn);
259 259
260 if (info->errsts & I3000_ERRSTS_UE) 260 if (info->errsts & I3000_ERRSTS_UE)
261 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 261 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
262 pfn, offset, 0, 262 pfn, offset, 0,
263 row, -1, -1, 263 row, -1, -1,
264 "i3000 UE", "", NULL); 264 "i3000 UE", "", NULL);
265 else 265 else
266 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 266 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
267 pfn, offset, info->derrsyn, 267 pfn, offset, info->derrsyn,
268 row, multi_chan ? channel : 0, -1, 268 row, multi_chan ? channel : 0, -1,
269 "i3000 CE", "", NULL); 269 "i3000 CE", "", NULL);
270 270
271 return 1; 271 return 1;
272 } 272 }
273 273
274 static void i3000_check(struct mem_ctl_info *mci) 274 static void i3000_check(struct mem_ctl_info *mci)
275 { 275 {
276 struct i3000_error_info info; 276 struct i3000_error_info info;
277 277
278 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 278 debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
279 i3000_get_error_info(mci, &info); 279 i3000_get_error_info(mci, &info);
280 i3000_process_error_info(mci, &info, 1); 280 i3000_process_error_info(mci, &info, 1);
281 } 281 }
282 282
283 static int i3000_is_interleaved(const unsigned char *c0dra, 283 static int i3000_is_interleaved(const unsigned char *c0dra,
284 const unsigned char *c1dra, 284 const unsigned char *c1dra,
285 const unsigned char *c0drb, 285 const unsigned char *c0drb,
286 const unsigned char *c1drb) 286 const unsigned char *c1drb)
287 { 287 {
288 int i; 288 int i;
289 289
290 /* 290 /*
291 * If the channels aren't populated identically then 291 * If the channels aren't populated identically then
292 * we're not interleaved. 292 * we're not interleaved.
293 */ 293 */
294 for (i = 0; i < I3000_RANKS_PER_CHANNEL / 2; i++) 294 for (i = 0; i < I3000_RANKS_PER_CHANNEL / 2; i++)
295 if (odd_rank_attrib(c0dra[i]) != odd_rank_attrib(c1dra[i]) || 295 if (odd_rank_attrib(c0dra[i]) != odd_rank_attrib(c1dra[i]) ||
296 even_rank_attrib(c0dra[i]) != 296 even_rank_attrib(c0dra[i]) !=
297 even_rank_attrib(c1dra[i])) 297 even_rank_attrib(c1dra[i]))
298 return 0; 298 return 0;
299 299
300 /* 300 /*
301 * If the rank boundaries for the two channels are different 301 * If the rank boundaries for the two channels are different
302 * then we're not interleaved. 302 * then we're not interleaved.
303 */ 303 */
304 for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++) 304 for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++)
305 if (c0drb[i] != c1drb[i]) 305 if (c0drb[i] != c1drb[i])
306 return 0; 306 return 0;
307 307
308 return 1; 308 return 1;
309 } 309 }
310 310
311 static int i3000_probe1(struct pci_dev *pdev, int dev_idx) 311 static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
312 { 312 {
313 int rc; 313 int rc;
314 int i, j; 314 int i, j;
315 struct mem_ctl_info *mci = NULL; 315 struct mem_ctl_info *mci = NULL;
316 struct edac_mc_layer layers[2]; 316 struct edac_mc_layer layers[2];
317 unsigned long last_cumul_size, nr_pages; 317 unsigned long last_cumul_size, nr_pages;
318 int interleaved, nr_channels; 318 int interleaved, nr_channels;
319 unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS]; 319 unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS];
320 unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2]; 320 unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2];
321 unsigned char *c0drb = drb, *c1drb = &drb[I3000_RANKS_PER_CHANNEL]; 321 unsigned char *c0drb = drb, *c1drb = &drb[I3000_RANKS_PER_CHANNEL];
322 unsigned long mchbar; 322 unsigned long mchbar;
323 void __iomem *window; 323 void __iomem *window;
324 324
325 debugf0("MC: %s()\n", __func__); 325 debugf0("MC: %s()\n", __func__);
326 326
327 pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar); 327 pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar);
328 mchbar &= I3000_MCHBAR_MASK; 328 mchbar &= I3000_MCHBAR_MASK;
329 window = ioremap_nocache(mchbar, I3000_MMR_WINDOW_SIZE); 329 window = ioremap_nocache(mchbar, I3000_MMR_WINDOW_SIZE);
330 if (!window) { 330 if (!window) {
331 printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n", 331 printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n",
332 mchbar); 332 mchbar);
333 return -ENODEV; 333 return -ENODEV;
334 } 334 }
335 335
336 c0dra[0] = readb(window + I3000_C0DRA + 0); /* ranks 0,1 */ 336 c0dra[0] = readb(window + I3000_C0DRA + 0); /* ranks 0,1 */
337 c0dra[1] = readb(window + I3000_C0DRA + 1); /* ranks 2,3 */ 337 c0dra[1] = readb(window + I3000_C0DRA + 1); /* ranks 2,3 */
338 c1dra[0] = readb(window + I3000_C1DRA + 0); /* ranks 0,1 */ 338 c1dra[0] = readb(window + I3000_C1DRA + 0); /* ranks 0,1 */
339 c1dra[1] = readb(window + I3000_C1DRA + 1); /* ranks 2,3 */ 339 c1dra[1] = readb(window + I3000_C1DRA + 1); /* ranks 2,3 */
340 340
341 for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++) { 341 for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++) {
342 c0drb[i] = readb(window + I3000_C0DRB + i); 342 c0drb[i] = readb(window + I3000_C0DRB + i);
343 c1drb[i] = readb(window + I3000_C1DRB + i); 343 c1drb[i] = readb(window + I3000_C1DRB + i);
344 } 344 }
345 345
346 iounmap(window); 346 iounmap(window);
347 347
348 /* 348 /*
349 * Figure out how many channels we have. 349 * Figure out how many channels we have.
350 * 350 *
351 * If we have what the datasheet calls "asymmetric channels" 351 * If we have what the datasheet calls "asymmetric channels"
352 * (essentially the same as what was called "virtual single 352 * (essentially the same as what was called "virtual single
353 * channel mode" in the i82875) then it's a single channel as 353 * channel mode" in the i82875) then it's a single channel as
354 * far as EDAC is concerned. 354 * far as EDAC is concerned.
355 */ 355 */
356 interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb); 356 interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb);
357 nr_channels = interleaved ? 2 : 1; 357 nr_channels = interleaved ? 2 : 1;
358 358
359 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 359 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
360 layers[0].size = I3000_RANKS / nr_channels; 360 layers[0].size = I3000_RANKS / nr_channels;
361 layers[0].is_virt_csrow = true; 361 layers[0].is_virt_csrow = true;
362 layers[1].type = EDAC_MC_LAYER_CHANNEL; 362 layers[1].type = EDAC_MC_LAYER_CHANNEL;
363 layers[1].size = nr_channels; 363 layers[1].size = nr_channels;
364 layers[1].is_virt_csrow = false; 364 layers[1].is_virt_csrow = false;
365 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); 365 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
366 if (!mci) 366 if (!mci)
367 return -ENOMEM; 367 return -ENOMEM;
368 368
369 debugf3("MC: %s(): init mci\n", __func__); 369 debugf3("MC: %s(): init mci\n", __func__);
370 370
371 mci->pdev = &pdev->dev; 371 mci->pdev = &pdev->dev;
372 mci->mtype_cap = MEM_FLAG_DDR2; 372 mci->mtype_cap = MEM_FLAG_DDR2;
373 373
374 mci->edac_ctl_cap = EDAC_FLAG_SECDED; 374 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
375 mci->edac_cap = EDAC_FLAG_SECDED; 375 mci->edac_cap = EDAC_FLAG_SECDED;
376 376
377 mci->mod_name = EDAC_MOD_STR; 377 mci->mod_name = EDAC_MOD_STR;
378 mci->mod_ver = I3000_REVISION; 378 mci->mod_ver = I3000_REVISION;
379 mci->ctl_name = i3000_devs[dev_idx].ctl_name; 379 mci->ctl_name = i3000_devs[dev_idx].ctl_name;
380 mci->dev_name = pci_name(pdev); 380 mci->dev_name = pci_name(pdev);
381 mci->edac_check = i3000_check; 381 mci->edac_check = i3000_check;
382 mci->ctl_page_to_phys = NULL; 382 mci->ctl_page_to_phys = NULL;
383 383
384 /* 384 /*
385 * The dram rank boundary (DRB) reg values are boundary addresses 385 * The dram rank boundary (DRB) reg values are boundary addresses
386 * for each DRAM rank with a granularity of 32MB. DRB regs are 386 * for each DRAM rank with a granularity of 32MB. DRB regs are
387 * cumulative; the last one will contain the total memory 387 * cumulative; the last one will contain the total memory
388 * contained in all ranks. 388 * contained in all ranks.
389 * 389 *
390 * If we're in interleaved mode then we're only walking through 390 * If we're in interleaved mode then we're only walking through
391 * the ranks of controller 0, so we double all the values we see. 391 * the ranks of controller 0, so we double all the values we see.
392 */ 392 */
393 for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) { 393 for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) {
394 u8 value; 394 u8 value;
395 u32 cumul_size; 395 u32 cumul_size;
396 struct csrow_info *csrow = &mci->csrows[i]; 396 struct csrow_info *csrow = mci->csrows[i];
397 397
398 value = drb[i]; 398 value = drb[i];
399 cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT); 399 cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT);
400 if (interleaved) 400 if (interleaved)
401 cumul_size <<= 1; 401 cumul_size <<= 1;
402 debugf3("MC: %s(): (%d) cumul_size 0x%x\n", 402 debugf3("MC: %s(): (%d) cumul_size 0x%x\n",
403 __func__, i, cumul_size); 403 __func__, i, cumul_size);
404 if (cumul_size == last_cumul_size) 404 if (cumul_size == last_cumul_size)
405 continue; 405 continue;
406 406
407 csrow->first_page = last_cumul_size; 407 csrow->first_page = last_cumul_size;
408 csrow->last_page = cumul_size - 1; 408 csrow->last_page = cumul_size - 1;
409 nr_pages = cumul_size - last_cumul_size; 409 nr_pages = cumul_size - last_cumul_size;
410 last_cumul_size = cumul_size; 410 last_cumul_size = cumul_size;
411 411
412 for (j = 0; j < nr_channels; j++) { 412 for (j = 0; j < nr_channels; j++) {
413 struct dimm_info *dimm = csrow->channels[j].dimm; 413 struct dimm_info *dimm = csrow->channels[j]->dimm;
414 414
415 dimm->nr_pages = nr_pages / nr_channels; 415 dimm->nr_pages = nr_pages / nr_channels;
416 dimm->grain = I3000_DEAP_GRAIN; 416 dimm->grain = I3000_DEAP_GRAIN;
417 dimm->mtype = MEM_DDR2; 417 dimm->mtype = MEM_DDR2;
418 dimm->dtype = DEV_UNKNOWN; 418 dimm->dtype = DEV_UNKNOWN;
419 dimm->edac_mode = EDAC_UNKNOWN; 419 dimm->edac_mode = EDAC_UNKNOWN;
420 } 420 }
421 } 421 }
422 422
423 /* 423 /*
424 * Clear any error bits. 424 * Clear any error bits.
425 * (Yes, we really clear bits by writing 1 to them.) 425 * (Yes, we really clear bits by writing 1 to them.)
426 */ 426 */
427 pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS, 427 pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
428 I3000_ERRSTS_BITS); 428 I3000_ERRSTS_BITS);
429 429
430 rc = -ENODEV; 430 rc = -ENODEV;
431 if (edac_mc_add_mc(mci)) { 431 if (edac_mc_add_mc(mci)) {
432 debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); 432 debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
433 goto fail; 433 goto fail;
434 } 434 }
435 435
436 /* allocating generic PCI control info */ 436 /* allocating generic PCI control info */
437 i3000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); 437 i3000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
438 if (!i3000_pci) { 438 if (!i3000_pci) {
439 printk(KERN_WARNING 439 printk(KERN_WARNING
440 "%s(): Unable to create PCI control\n", 440 "%s(): Unable to create PCI control\n",
441 __func__); 441 __func__);
442 printk(KERN_WARNING 442 printk(KERN_WARNING
443 "%s(): PCI error report via EDAC not setup\n", 443 "%s(): PCI error report via EDAC not setup\n",
444 __func__); 444 __func__);
445 } 445 }
446 446
447 /* get this far and it's successful */ 447 /* get this far and it's successful */
448 debugf3("MC: %s(): success\n", __func__); 448 debugf3("MC: %s(): success\n", __func__);
449 return 0; 449 return 0;
450 450
451 fail: 451 fail:
452 if (mci) 452 if (mci)
453 edac_mc_free(mci); 453 edac_mc_free(mci);
454 454
455 return rc; 455 return rc;
456 } 456 }
457 457
458 /* returns count (>= 0), or negative on error */ 458 /* returns count (>= 0), or negative on error */
459 static int __devinit i3000_init_one(struct pci_dev *pdev, 459 static int __devinit i3000_init_one(struct pci_dev *pdev,
460 const struct pci_device_id *ent) 460 const struct pci_device_id *ent)
461 { 461 {
462 int rc; 462 int rc;
463 463
464 debugf0("MC: %s()\n", __func__); 464 debugf0("MC: %s()\n", __func__);
465 465
466 if (pci_enable_device(pdev) < 0) 466 if (pci_enable_device(pdev) < 0)
467 return -EIO; 467 return -EIO;
468 468
469 rc = i3000_probe1(pdev, ent->driver_data); 469 rc = i3000_probe1(pdev, ent->driver_data);
470 if (!mci_pdev) 470 if (!mci_pdev)
471 mci_pdev = pci_dev_get(pdev); 471 mci_pdev = pci_dev_get(pdev);
472 472
473 return rc; 473 return rc;
474 } 474 }
475 475
476 static void __devexit i3000_remove_one(struct pci_dev *pdev) 476 static void __devexit i3000_remove_one(struct pci_dev *pdev)
477 { 477 {
478 struct mem_ctl_info *mci; 478 struct mem_ctl_info *mci;
479 479
480 debugf0("%s()\n", __func__); 480 debugf0("%s()\n", __func__);
481 481
482 if (i3000_pci) 482 if (i3000_pci)
483 edac_pci_release_generic_ctl(i3000_pci); 483 edac_pci_release_generic_ctl(i3000_pci);
484 484
485 mci = edac_mc_del_mc(&pdev->dev); 485 mci = edac_mc_del_mc(&pdev->dev);
486 if (!mci) 486 if (!mci)
487 return; 487 return;
488 488
489 edac_mc_free(mci); 489 edac_mc_free(mci);
490 } 490 }
491 491
492 static DEFINE_PCI_DEVICE_TABLE(i3000_pci_tbl) = { 492 static DEFINE_PCI_DEVICE_TABLE(i3000_pci_tbl) = {
493 { 493 {
494 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 494 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
495 I3000}, 495 I3000},
496 { 496 {
497 0, 497 0,
498 } /* 0 terminated list. */ 498 } /* 0 terminated list. */
499 }; 499 };
500 500
501 MODULE_DEVICE_TABLE(pci, i3000_pci_tbl); 501 MODULE_DEVICE_TABLE(pci, i3000_pci_tbl);
502 502
503 static struct pci_driver i3000_driver = { 503 static struct pci_driver i3000_driver = {
504 .name = EDAC_MOD_STR, 504 .name = EDAC_MOD_STR,
505 .probe = i3000_init_one, 505 .probe = i3000_init_one,
506 .remove = __devexit_p(i3000_remove_one), 506 .remove = __devexit_p(i3000_remove_one),
507 .id_table = i3000_pci_tbl, 507 .id_table = i3000_pci_tbl,
508 }; 508 };
509 509
510 static int __init i3000_init(void) 510 static int __init i3000_init(void)
511 { 511 {
512 int pci_rc; 512 int pci_rc;
513 513
514 debugf3("MC: %s()\n", __func__); 514 debugf3("MC: %s()\n", __func__);
515 515
516 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 516 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
517 opstate_init(); 517 opstate_init();
518 518
519 pci_rc = pci_register_driver(&i3000_driver); 519 pci_rc = pci_register_driver(&i3000_driver);
520 if (pci_rc < 0) 520 if (pci_rc < 0)
521 goto fail0; 521 goto fail0;
522 522
523 if (!mci_pdev) { 523 if (!mci_pdev) {
524 i3000_registered = 0; 524 i3000_registered = 0;
525 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 525 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
526 PCI_DEVICE_ID_INTEL_3000_HB, NULL); 526 PCI_DEVICE_ID_INTEL_3000_HB, NULL);
527 if (!mci_pdev) { 527 if (!mci_pdev) {
528 debugf0("i3000 pci_get_device fail\n"); 528 debugf0("i3000 pci_get_device fail\n");
529 pci_rc = -ENODEV; 529 pci_rc = -ENODEV;
530 goto fail1; 530 goto fail1;
531 } 531 }
532 532
533 pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl); 533 pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl);
534 if (pci_rc < 0) { 534 if (pci_rc < 0) {
535 debugf0("i3000 init fail\n"); 535 debugf0("i3000 init fail\n");
536 pci_rc = -ENODEV; 536 pci_rc = -ENODEV;
537 goto fail1; 537 goto fail1;
538 } 538 }
539 } 539 }
540 540
541 return 0; 541 return 0;
542 542
543 fail1: 543 fail1:
544 pci_unregister_driver(&i3000_driver); 544 pci_unregister_driver(&i3000_driver);
545 545
546 fail0: 546 fail0:
547 if (mci_pdev) 547 if (mci_pdev)
548 pci_dev_put(mci_pdev); 548 pci_dev_put(mci_pdev);
549 549
550 return pci_rc; 550 return pci_rc;
551 } 551 }
552 552
553 static void __exit i3000_exit(void) 553 static void __exit i3000_exit(void)
554 { 554 {
555 debugf3("MC: %s()\n", __func__); 555 debugf3("MC: %s()\n", __func__);
556 556
557 pci_unregister_driver(&i3000_driver); 557 pci_unregister_driver(&i3000_driver);
558 if (!i3000_registered) { 558 if (!i3000_registered) {
559 i3000_remove_one(mci_pdev); 559 i3000_remove_one(mci_pdev);
560 pci_dev_put(mci_pdev); 560 pci_dev_put(mci_pdev);
561 } 561 }
562 } 562 }
563 563
564 module_init(i3000_init); 564 module_init(i3000_init);
565 module_exit(i3000_exit); 565 module_exit(i3000_exit);
566 566
567 MODULE_LICENSE("GPL"); 567 MODULE_LICENSE("GPL");
568 MODULE_AUTHOR("Akamai Technologies Arthur Ulfeldt/Jason Uhlenkott"); 568 MODULE_AUTHOR("Akamai Technologies Arthur Ulfeldt/Jason Uhlenkott");
569 MODULE_DESCRIPTION("MC support for Intel 3000 memory hub controllers"); 569 MODULE_DESCRIPTION("MC support for Intel 3000 memory hub controllers");
570 570
571 module_param(edac_op_state, int, 0444); 571 module_param(edac_op_state, int, 0444);
572 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 572 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
573 573
drivers/edac/i3200_edac.c
1 /* 1 /*
2 * Intel 3200/3210 Memory Controller kernel module 2 * Intel 3200/3210 Memory Controller kernel module
3 * Copyright (C) 2008-2009 Akamai Technologies, Inc. 3 * Copyright (C) 2008-2009 Akamai Technologies, Inc.
4 * Portions by Hitoshi Mitake <h.mitake@gmail.com>. 4 * Portions by Hitoshi Mitake <h.mitake@gmail.com>.
5 * 5 *
6 * This file may be distributed under the terms of the 6 * This file may be distributed under the terms of the
7 * GNU General Public License. 7 * GNU General Public License.
8 */ 8 */
9 9
10 #include <linux/module.h> 10 #include <linux/module.h>
11 #include <linux/init.h> 11 #include <linux/init.h>
12 #include <linux/pci.h> 12 #include <linux/pci.h>
13 #include <linux/pci_ids.h> 13 #include <linux/pci_ids.h>
14 #include <linux/edac.h> 14 #include <linux/edac.h>
15 #include <linux/io.h> 15 #include <linux/io.h>
16 #include "edac_core.h" 16 #include "edac_core.h"
17 17
18 #include <asm-generic/io-64-nonatomic-lo-hi.h> 18 #include <asm-generic/io-64-nonatomic-lo-hi.h>
19 19
20 #define I3200_REVISION "1.1" 20 #define I3200_REVISION "1.1"
21 21
22 #define EDAC_MOD_STR "i3200_edac" 22 #define EDAC_MOD_STR "i3200_edac"
23 23
24 #define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0 24 #define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0
25 25
26 #define I3200_DIMMS 4 26 #define I3200_DIMMS 4
27 #define I3200_RANKS 8 27 #define I3200_RANKS 8
28 #define I3200_RANKS_PER_CHANNEL 4 28 #define I3200_RANKS_PER_CHANNEL 4
29 #define I3200_CHANNELS 2 29 #define I3200_CHANNELS 2
30 30
31 /* Intel 3200 register addresses - device 0 function 0 - DRAM Controller */ 31 /* Intel 3200 register addresses - device 0 function 0 - DRAM Controller */
32 32
33 #define I3200_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */ 33 #define I3200_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */
34 #define I3200_MCHBAR_HIGH 0x4c 34 #define I3200_MCHBAR_HIGH 0x4c
35 #define I3200_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */ 35 #define I3200_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */
36 #define I3200_MMR_WINDOW_SIZE 16384 36 #define I3200_MMR_WINDOW_SIZE 16384
37 37
38 #define I3200_TOM 0xa0 /* Top of Memory (16b) 38 #define I3200_TOM 0xa0 /* Top of Memory (16b)
39 * 39 *
40 * 15:10 reserved 40 * 15:10 reserved
41 * 9:0 total populated physical memory 41 * 9:0 total populated physical memory
42 */ 42 */
43 #define I3200_TOM_MASK 0x3ff /* bits 9:0 */ 43 #define I3200_TOM_MASK 0x3ff /* bits 9:0 */
44 #define I3200_TOM_SHIFT 26 /* 64MiB grain */ 44 #define I3200_TOM_SHIFT 26 /* 64MiB grain */
45 45
46 #define I3200_ERRSTS 0xc8 /* Error Status Register (16b) 46 #define I3200_ERRSTS 0xc8 /* Error Status Register (16b)
47 * 47 *
48 * 15 reserved 48 * 15 reserved
49 * 14 Isochronous TBWRR Run Behind FIFO Full 49 * 14 Isochronous TBWRR Run Behind FIFO Full
50 * (ITCV) 50 * (ITCV)
51 * 13 Isochronous TBWRR Run Behind FIFO Put 51 * 13 Isochronous TBWRR Run Behind FIFO Put
52 * (ITSTV) 52 * (ITSTV)
53 * 12 reserved 53 * 12 reserved
54 * 11 MCH Thermal Sensor Event 54 * 11 MCH Thermal Sensor Event
55 * for SMI/SCI/SERR (GTSE) 55 * for SMI/SCI/SERR (GTSE)
56 * 10 reserved 56 * 10 reserved
57 * 9 LOCK to non-DRAM Memory Flag (LCKF) 57 * 9 LOCK to non-DRAM Memory Flag (LCKF)
58 * 8 reserved 58 * 8 reserved
59 * 7 DRAM Throttle Flag (DTF) 59 * 7 DRAM Throttle Flag (DTF)
60 * 6:2 reserved 60 * 6:2 reserved
61 * 1 Multi-bit DRAM ECC Error Flag (DMERR) 61 * 1 Multi-bit DRAM ECC Error Flag (DMERR)
62 * 0 Single-bit DRAM ECC Error Flag (DSERR) 62 * 0 Single-bit DRAM ECC Error Flag (DSERR)
63 */ 63 */
64 #define I3200_ERRSTS_UE 0x0002 64 #define I3200_ERRSTS_UE 0x0002
65 #define I3200_ERRSTS_CE 0x0001 65 #define I3200_ERRSTS_CE 0x0001
66 #define I3200_ERRSTS_BITS (I3200_ERRSTS_UE | I3200_ERRSTS_CE) 66 #define I3200_ERRSTS_BITS (I3200_ERRSTS_UE | I3200_ERRSTS_CE)
67 67
68 68
69 /* Intel MMIO register space - device 0 function 0 - MMR space */ 69 /* Intel MMIO register space - device 0 function 0 - MMR space */
70 70
71 #define I3200_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4) 71 #define I3200_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4)
72 * 72 *
73 * 15:10 reserved 73 * 15:10 reserved
74 * 9:0 Channel 0 DRAM Rank Boundary Address 74 * 9:0 Channel 0 DRAM Rank Boundary Address
75 */ 75 */
76 #define I3200_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */ 76 #define I3200_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */
77 #define I3200_DRB_MASK 0x3ff /* bits 9:0 */ 77 #define I3200_DRB_MASK 0x3ff /* bits 9:0 */
78 #define I3200_DRB_SHIFT 26 /* 64MiB grain */ 78 #define I3200_DRB_SHIFT 26 /* 64MiB grain */
79 79
80 #define I3200_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b) 80 #define I3200_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b)
81 * 81 *
82 * 63:48 Error Column Address (ERRCOL) 82 * 63:48 Error Column Address (ERRCOL)
83 * 47:32 Error Row Address (ERRROW) 83 * 47:32 Error Row Address (ERRROW)
84 * 31:29 Error Bank Address (ERRBANK) 84 * 31:29 Error Bank Address (ERRBANK)
85 * 28:27 Error Rank Address (ERRRANK) 85 * 28:27 Error Rank Address (ERRRANK)
86 * 26:24 reserved 86 * 26:24 reserved
87 * 23:16 Error Syndrome (ERRSYND) 87 * 23:16 Error Syndrome (ERRSYND)
88 * 15: 2 reserved 88 * 15: 2 reserved
89 * 1 Multiple Bit Error Status (MERRSTS) 89 * 1 Multiple Bit Error Status (MERRSTS)
90 * 0 Correctable Error Status (CERRSTS) 90 * 0 Correctable Error Status (CERRSTS)
91 */ 91 */
92 #define I3200_C1ECCERRLOG 0x680 /* Chan 1 ECC Error Log (64b) */ 92 #define I3200_C1ECCERRLOG 0x680 /* Chan 1 ECC Error Log (64b) */
93 #define I3200_ECCERRLOG_CE 0x1 93 #define I3200_ECCERRLOG_CE 0x1
94 #define I3200_ECCERRLOG_UE 0x2 94 #define I3200_ECCERRLOG_UE 0x2
95 #define I3200_ECCERRLOG_RANK_BITS 0x18000000 95 #define I3200_ECCERRLOG_RANK_BITS 0x18000000
96 #define I3200_ECCERRLOG_RANK_SHIFT 27 96 #define I3200_ECCERRLOG_RANK_SHIFT 27
97 #define I3200_ECCERRLOG_SYNDROME_BITS 0xff0000 97 #define I3200_ECCERRLOG_SYNDROME_BITS 0xff0000
98 #define I3200_ECCERRLOG_SYNDROME_SHIFT 16 98 #define I3200_ECCERRLOG_SYNDROME_SHIFT 16
99 #define I3200_CAPID0 0xe0 /* P.95 of spec for details */ 99 #define I3200_CAPID0 0xe0 /* P.95 of spec for details */
100 100
101 struct i3200_priv { 101 struct i3200_priv {
102 void __iomem *window; 102 void __iomem *window;
103 }; 103 };
104 104
105 static int nr_channels; 105 static int nr_channels;
106 106
107 static int how_many_channels(struct pci_dev *pdev) 107 static int how_many_channels(struct pci_dev *pdev)
108 { 108 {
109 unsigned char capid0_8b; /* 8th byte of CAPID0 */ 109 unsigned char capid0_8b; /* 8th byte of CAPID0 */
110 110
111 pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b); 111 pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b);
112 if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */ 112 if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
113 debugf0("In single channel mode.\n"); 113 debugf0("In single channel mode.\n");
114 return 1; 114 return 1;
115 } else { 115 } else {
116 debugf0("In dual channel mode.\n"); 116 debugf0("In dual channel mode.\n");
117 return 2; 117 return 2;
118 } 118 }
119 } 119 }
120 120
121 static unsigned long eccerrlog_syndrome(u64 log) 121 static unsigned long eccerrlog_syndrome(u64 log)
122 { 122 {
123 return (log & I3200_ECCERRLOG_SYNDROME_BITS) >> 123 return (log & I3200_ECCERRLOG_SYNDROME_BITS) >>
124 I3200_ECCERRLOG_SYNDROME_SHIFT; 124 I3200_ECCERRLOG_SYNDROME_SHIFT;
125 } 125 }
126 126
127 static int eccerrlog_row(int channel, u64 log) 127 static int eccerrlog_row(int channel, u64 log)
128 { 128 {
129 u64 rank = ((log & I3200_ECCERRLOG_RANK_BITS) >> 129 u64 rank = ((log & I3200_ECCERRLOG_RANK_BITS) >>
130 I3200_ECCERRLOG_RANK_SHIFT); 130 I3200_ECCERRLOG_RANK_SHIFT);
131 return rank | (channel * I3200_RANKS_PER_CHANNEL); 131 return rank | (channel * I3200_RANKS_PER_CHANNEL);
132 } 132 }
133 133
134 enum i3200_chips { 134 enum i3200_chips {
135 I3200 = 0, 135 I3200 = 0,
136 }; 136 };
137 137
138 struct i3200_dev_info { 138 struct i3200_dev_info {
139 const char *ctl_name; 139 const char *ctl_name;
140 }; 140 };
141 141
142 struct i3200_error_info { 142 struct i3200_error_info {
143 u16 errsts; 143 u16 errsts;
144 u16 errsts2; 144 u16 errsts2;
145 u64 eccerrlog[I3200_CHANNELS]; 145 u64 eccerrlog[I3200_CHANNELS];
146 }; 146 };
147 147
148 static const struct i3200_dev_info i3200_devs[] = { 148 static const struct i3200_dev_info i3200_devs[] = {
149 [I3200] = { 149 [I3200] = {
150 .ctl_name = "i3200" 150 .ctl_name = "i3200"
151 }, 151 },
152 }; 152 };
153 153
154 static struct pci_dev *mci_pdev; 154 static struct pci_dev *mci_pdev;
155 static int i3200_registered = 1; 155 static int i3200_registered = 1;
156 156
157 157
158 static void i3200_clear_error_info(struct mem_ctl_info *mci) 158 static void i3200_clear_error_info(struct mem_ctl_info *mci)
159 { 159 {
160 struct pci_dev *pdev; 160 struct pci_dev *pdev;
161 161
162 pdev = to_pci_dev(mci->pdev); 162 pdev = to_pci_dev(mci->pdev);
163 163
164 /* 164 /*
165 * Clear any error bits. 165 * Clear any error bits.
166 * (Yes, we really clear bits by writing 1 to them.) 166 * (Yes, we really clear bits by writing 1 to them.)
167 */ 167 */
168 pci_write_bits16(pdev, I3200_ERRSTS, I3200_ERRSTS_BITS, 168 pci_write_bits16(pdev, I3200_ERRSTS, I3200_ERRSTS_BITS,
169 I3200_ERRSTS_BITS); 169 I3200_ERRSTS_BITS);
170 } 170 }
171 171
172 static void i3200_get_and_clear_error_info(struct mem_ctl_info *mci, 172 static void i3200_get_and_clear_error_info(struct mem_ctl_info *mci,
173 struct i3200_error_info *info) 173 struct i3200_error_info *info)
174 { 174 {
175 struct pci_dev *pdev; 175 struct pci_dev *pdev;
176 struct i3200_priv *priv = mci->pvt_info; 176 struct i3200_priv *priv = mci->pvt_info;
177 void __iomem *window = priv->window; 177 void __iomem *window = priv->window;
178 178
179 pdev = to_pci_dev(mci->pdev); 179 pdev = to_pci_dev(mci->pdev);
180 180
181 /* 181 /*
182 * This is a mess because there is no atomic way to read all the 182 * This is a mess because there is no atomic way to read all the
183 * registers at once and the registers can transition from CE being 183 * registers at once and the registers can transition from CE being
184 * overwritten by UE. 184 * overwritten by UE.
185 */ 185 */
186 pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts); 186 pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts);
187 if (!(info->errsts & I3200_ERRSTS_BITS)) 187 if (!(info->errsts & I3200_ERRSTS_BITS))
188 return; 188 return;
189 189
190 info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG); 190 info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG);
191 if (nr_channels == 2) 191 if (nr_channels == 2)
192 info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG); 192 info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG);
193 193
194 pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts2); 194 pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts2);
195 195
196 /* 196 /*
197 * If the error is the same for both reads then the first set 197 * If the error is the same for both reads then the first set
198 * of reads is valid. If there is a change then there is a CE 198 * of reads is valid. If there is a change then there is a CE
199 * with no info and the second set of reads is valid and 199 * with no info and the second set of reads is valid and
200 * should be UE info. 200 * should be UE info.
201 */ 201 */
202 if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) { 202 if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
203 info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG); 203 info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG);
204 if (nr_channels == 2) 204 if (nr_channels == 2)
205 info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG); 205 info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG);
206 } 206 }
207 207
208 i3200_clear_error_info(mci); 208 i3200_clear_error_info(mci);
209 } 209 }
210 210
211 static void i3200_process_error_info(struct mem_ctl_info *mci, 211 static void i3200_process_error_info(struct mem_ctl_info *mci,
212 struct i3200_error_info *info) 212 struct i3200_error_info *info)
213 { 213 {
214 int channel; 214 int channel;
215 u64 log; 215 u64 log;
216 216
217 if (!(info->errsts & I3200_ERRSTS_BITS)) 217 if (!(info->errsts & I3200_ERRSTS_BITS))
218 return; 218 return;
219 219
220 if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) { 220 if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
221 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 221 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
222 -1, -1, -1, "UE overwrote CE", "", NULL); 222 -1, -1, -1, "UE overwrote CE", "", NULL);
223 info->errsts = info->errsts2; 223 info->errsts = info->errsts2;
224 } 224 }
225 225
226 for (channel = 0; channel < nr_channels; channel++) { 226 for (channel = 0; channel < nr_channels; channel++) {
227 log = info->eccerrlog[channel]; 227 log = info->eccerrlog[channel];
228 if (log & I3200_ECCERRLOG_UE) { 228 if (log & I3200_ECCERRLOG_UE) {
229 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 229 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
230 0, 0, 0, 230 0, 0, 0,
231 eccerrlog_row(channel, log), 231 eccerrlog_row(channel, log),
232 -1, -1, 232 -1, -1,
233 "i3000 UE", "", NULL); 233 "i3000 UE", "", NULL);
234 } else if (log & I3200_ECCERRLOG_CE) { 234 } else if (log & I3200_ECCERRLOG_CE) {
235 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 235 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
236 0, 0, eccerrlog_syndrome(log), 236 0, 0, eccerrlog_syndrome(log),
237 eccerrlog_row(channel, log), 237 eccerrlog_row(channel, log),
238 -1, -1, 238 -1, -1,
239 "i3000 UE", "", NULL); 239 "i3000 UE", "", NULL);
240 } 240 }
241 } 241 }
242 } 242 }
243 243
244 static void i3200_check(struct mem_ctl_info *mci) 244 static void i3200_check(struct mem_ctl_info *mci)
245 { 245 {
246 struct i3200_error_info info; 246 struct i3200_error_info info;
247 247
248 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 248 debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
249 i3200_get_and_clear_error_info(mci, &info); 249 i3200_get_and_clear_error_info(mci, &info);
250 i3200_process_error_info(mci, &info); 250 i3200_process_error_info(mci, &info);
251 } 251 }
252 252
253 253
254 void __iomem *i3200_map_mchbar(struct pci_dev *pdev) 254 void __iomem *i3200_map_mchbar(struct pci_dev *pdev)
255 { 255 {
256 union { 256 union {
257 u64 mchbar; 257 u64 mchbar;
258 struct { 258 struct {
259 u32 mchbar_low; 259 u32 mchbar_low;
260 u32 mchbar_high; 260 u32 mchbar_high;
261 }; 261 };
262 } u; 262 } u;
263 void __iomem *window; 263 void __iomem *window;
264 264
265 pci_read_config_dword(pdev, I3200_MCHBAR_LOW, &u.mchbar_low); 265 pci_read_config_dword(pdev, I3200_MCHBAR_LOW, &u.mchbar_low);
266 pci_read_config_dword(pdev, I3200_MCHBAR_HIGH, &u.mchbar_high); 266 pci_read_config_dword(pdev, I3200_MCHBAR_HIGH, &u.mchbar_high);
267 u.mchbar &= I3200_MCHBAR_MASK; 267 u.mchbar &= I3200_MCHBAR_MASK;
268 268
269 if (u.mchbar != (resource_size_t)u.mchbar) { 269 if (u.mchbar != (resource_size_t)u.mchbar) {
270 printk(KERN_ERR 270 printk(KERN_ERR
271 "i3200: mmio space beyond accessible range (0x%llx)\n", 271 "i3200: mmio space beyond accessible range (0x%llx)\n",
272 (unsigned long long)u.mchbar); 272 (unsigned long long)u.mchbar);
273 return NULL; 273 return NULL;
274 } 274 }
275 275
276 window = ioremap_nocache(u.mchbar, I3200_MMR_WINDOW_SIZE); 276 window = ioremap_nocache(u.mchbar, I3200_MMR_WINDOW_SIZE);
277 if (!window) 277 if (!window)
278 printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n", 278 printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n",
279 (unsigned long long)u.mchbar); 279 (unsigned long long)u.mchbar);
280 280
281 return window; 281 return window;
282 } 282 }
283 283
284 284
285 static void i3200_get_drbs(void __iomem *window, 285 static void i3200_get_drbs(void __iomem *window,
286 u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]) 286 u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL])
287 { 287 {
288 int i; 288 int i;
289 289
290 for (i = 0; i < I3200_RANKS_PER_CHANNEL; i++) { 290 for (i = 0; i < I3200_RANKS_PER_CHANNEL; i++) {
291 drbs[0][i] = readw(window + I3200_C0DRB + 2*i) & I3200_DRB_MASK; 291 drbs[0][i] = readw(window + I3200_C0DRB + 2*i) & I3200_DRB_MASK;
292 drbs[1][i] = readw(window + I3200_C1DRB + 2*i) & I3200_DRB_MASK; 292 drbs[1][i] = readw(window + I3200_C1DRB + 2*i) & I3200_DRB_MASK;
293 } 293 }
294 } 294 }
295 295
296 static bool i3200_is_stacked(struct pci_dev *pdev, 296 static bool i3200_is_stacked(struct pci_dev *pdev,
297 u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]) 297 u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL])
298 { 298 {
299 u16 tom; 299 u16 tom;
300 300
301 pci_read_config_word(pdev, I3200_TOM, &tom); 301 pci_read_config_word(pdev, I3200_TOM, &tom);
302 tom &= I3200_TOM_MASK; 302 tom &= I3200_TOM_MASK;
303 303
304 return drbs[I3200_CHANNELS - 1][I3200_RANKS_PER_CHANNEL - 1] == tom; 304 return drbs[I3200_CHANNELS - 1][I3200_RANKS_PER_CHANNEL - 1] == tom;
305 } 305 }
306 306
307 static unsigned long drb_to_nr_pages( 307 static unsigned long drb_to_nr_pages(
308 u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL], bool stacked, 308 u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL], bool stacked,
309 int channel, int rank) 309 int channel, int rank)
310 { 310 {
311 int n; 311 int n;
312 312
313 n = drbs[channel][rank]; 313 n = drbs[channel][rank];
314 if (rank > 0) 314 if (rank > 0)
315 n -= drbs[channel][rank - 1]; 315 n -= drbs[channel][rank - 1];
316 if (stacked && (channel == 1) && 316 if (stacked && (channel == 1) &&
317 drbs[channel][rank] == drbs[channel][I3200_RANKS_PER_CHANNEL - 1]) 317 drbs[channel][rank] == drbs[channel][I3200_RANKS_PER_CHANNEL - 1])
318 n -= drbs[0][I3200_RANKS_PER_CHANNEL - 1]; 318 n -= drbs[0][I3200_RANKS_PER_CHANNEL - 1];
319 319
320 n <<= (I3200_DRB_SHIFT - PAGE_SHIFT); 320 n <<= (I3200_DRB_SHIFT - PAGE_SHIFT);
321 return n; 321 return n;
322 } 322 }
323 323
324 static int i3200_probe1(struct pci_dev *pdev, int dev_idx) 324 static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
325 { 325 {
326 int rc; 326 int rc;
327 int i, j; 327 int i, j;
328 struct mem_ctl_info *mci = NULL; 328 struct mem_ctl_info *mci = NULL;
329 struct edac_mc_layer layers[2]; 329 struct edac_mc_layer layers[2];
330 u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]; 330 u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL];
331 bool stacked; 331 bool stacked;
332 void __iomem *window; 332 void __iomem *window;
333 struct i3200_priv *priv; 333 struct i3200_priv *priv;
334 334
335 debugf0("MC: %s()\n", __func__); 335 debugf0("MC: %s()\n", __func__);
336 336
337 window = i3200_map_mchbar(pdev); 337 window = i3200_map_mchbar(pdev);
338 if (!window) 338 if (!window)
339 return -ENODEV; 339 return -ENODEV;
340 340
341 i3200_get_drbs(window, drbs); 341 i3200_get_drbs(window, drbs);
342 nr_channels = how_many_channels(pdev); 342 nr_channels = how_many_channels(pdev);
343 343
344 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 344 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
345 layers[0].size = I3200_DIMMS; 345 layers[0].size = I3200_DIMMS;
346 layers[0].is_virt_csrow = true; 346 layers[0].is_virt_csrow = true;
347 layers[1].type = EDAC_MC_LAYER_CHANNEL; 347 layers[1].type = EDAC_MC_LAYER_CHANNEL;
348 layers[1].size = nr_channels; 348 layers[1].size = nr_channels;
349 layers[1].is_virt_csrow = false; 349 layers[1].is_virt_csrow = false;
350 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 350 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
351 sizeof(struct i3200_priv)); 351 sizeof(struct i3200_priv));
352 if (!mci) 352 if (!mci)
353 return -ENOMEM; 353 return -ENOMEM;
354 354
355 debugf3("MC: %s(): init mci\n", __func__); 355 debugf3("MC: %s(): init mci\n", __func__);
356 356
357 mci->pdev = &pdev->dev; 357 mci->pdev = &pdev->dev;
358 mci->mtype_cap = MEM_FLAG_DDR2; 358 mci->mtype_cap = MEM_FLAG_DDR2;
359 359
360 mci->edac_ctl_cap = EDAC_FLAG_SECDED; 360 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
361 mci->edac_cap = EDAC_FLAG_SECDED; 361 mci->edac_cap = EDAC_FLAG_SECDED;
362 362
363 mci->mod_name = EDAC_MOD_STR; 363 mci->mod_name = EDAC_MOD_STR;
364 mci->mod_ver = I3200_REVISION; 364 mci->mod_ver = I3200_REVISION;
365 mci->ctl_name = i3200_devs[dev_idx].ctl_name; 365 mci->ctl_name = i3200_devs[dev_idx].ctl_name;
366 mci->dev_name = pci_name(pdev); 366 mci->dev_name = pci_name(pdev);
367 mci->edac_check = i3200_check; 367 mci->edac_check = i3200_check;
368 mci->ctl_page_to_phys = NULL; 368 mci->ctl_page_to_phys = NULL;
369 priv = mci->pvt_info; 369 priv = mci->pvt_info;
370 priv->window = window; 370 priv->window = window;
371 371
372 stacked = i3200_is_stacked(pdev, drbs); 372 stacked = i3200_is_stacked(pdev, drbs);
373 373
374 /* 374 /*
375 * The dram rank boundary (DRB) reg values are boundary addresses 375 * The dram rank boundary (DRB) reg values are boundary addresses
376 * for each DRAM rank with a granularity of 64MB. DRB regs are 376 * for each DRAM rank with a granularity of 64MB. DRB regs are
377 * cumulative; the last one will contain the total memory 377 * cumulative; the last one will contain the total memory
378 * contained in all ranks. 378 * contained in all ranks.
379 */ 379 */
380 for (i = 0; i < mci->nr_csrows; i++) { 380 for (i = 0; i < mci->nr_csrows; i++) {
381 unsigned long nr_pages; 381 unsigned long nr_pages;
382 struct csrow_info *csrow = &mci->csrows[i]; 382 struct csrow_info *csrow = mci->csrows[i];
383 383
384 nr_pages = drb_to_nr_pages(drbs, stacked, 384 nr_pages = drb_to_nr_pages(drbs, stacked,
385 i / I3200_RANKS_PER_CHANNEL, 385 i / I3200_RANKS_PER_CHANNEL,
386 i % I3200_RANKS_PER_CHANNEL); 386 i % I3200_RANKS_PER_CHANNEL);
387 387
388 if (nr_pages == 0) 388 if (nr_pages == 0)
389 continue; 389 continue;
390 390
391 for (j = 0; j < nr_channels; j++) { 391 for (j = 0; j < nr_channels; j++) {
392 struct dimm_info *dimm = csrow->channels[j].dimm; 392 struct dimm_info *dimm = csrow->channels[j]->dimm;
393 393
394 dimm->nr_pages = nr_pages / nr_channels; 394 dimm->nr_pages = nr_pages / nr_channels;
395 dimm->grain = nr_pages << PAGE_SHIFT; 395 dimm->grain = nr_pages << PAGE_SHIFT;
396 dimm->mtype = MEM_DDR2; 396 dimm->mtype = MEM_DDR2;
397 dimm->dtype = DEV_UNKNOWN; 397 dimm->dtype = DEV_UNKNOWN;
398 dimm->edac_mode = EDAC_UNKNOWN; 398 dimm->edac_mode = EDAC_UNKNOWN;
399 } 399 }
400 } 400 }
401 401
402 i3200_clear_error_info(mci); 402 i3200_clear_error_info(mci);
403 403
404 rc = -ENODEV; 404 rc = -ENODEV;
405 if (edac_mc_add_mc(mci)) { 405 if (edac_mc_add_mc(mci)) {
406 debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); 406 debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
407 goto fail; 407 goto fail;
408 } 408 }
409 409
410 /* get this far and it's successful */ 410 /* get this far and it's successful */
411 debugf3("MC: %s(): success\n", __func__); 411 debugf3("MC: %s(): success\n", __func__);
412 return 0; 412 return 0;
413 413
414 fail: 414 fail:
415 iounmap(window); 415 iounmap(window);
416 if (mci) 416 if (mci)
417 edac_mc_free(mci); 417 edac_mc_free(mci);
418 418
419 return rc; 419 return rc;
420 } 420 }
421 421
422 static int __devinit i3200_init_one(struct pci_dev *pdev, 422 static int __devinit i3200_init_one(struct pci_dev *pdev,
423 const struct pci_device_id *ent) 423 const struct pci_device_id *ent)
424 { 424 {
425 int rc; 425 int rc;
426 426
427 debugf0("MC: %s()\n", __func__); 427 debugf0("MC: %s()\n", __func__);
428 428
429 if (pci_enable_device(pdev) < 0) 429 if (pci_enable_device(pdev) < 0)
430 return -EIO; 430 return -EIO;
431 431
432 rc = i3200_probe1(pdev, ent->driver_data); 432 rc = i3200_probe1(pdev, ent->driver_data);
433 if (!mci_pdev) 433 if (!mci_pdev)
434 mci_pdev = pci_dev_get(pdev); 434 mci_pdev = pci_dev_get(pdev);
435 435
436 return rc; 436 return rc;
437 } 437 }
438 438
439 static void __devexit i3200_remove_one(struct pci_dev *pdev) 439 static void __devexit i3200_remove_one(struct pci_dev *pdev)
440 { 440 {
441 struct mem_ctl_info *mci; 441 struct mem_ctl_info *mci;
442 struct i3200_priv *priv; 442 struct i3200_priv *priv;
443 443
444 debugf0("%s()\n", __func__); 444 debugf0("%s()\n", __func__);
445 445
446 mci = edac_mc_del_mc(&pdev->dev); 446 mci = edac_mc_del_mc(&pdev->dev);
447 if (!mci) 447 if (!mci)
448 return; 448 return;
449 449
450 priv = mci->pvt_info; 450 priv = mci->pvt_info;
451 iounmap(priv->window); 451 iounmap(priv->window);
452 452
453 edac_mc_free(mci); 453 edac_mc_free(mci);
454 } 454 }
455 455
456 static DEFINE_PCI_DEVICE_TABLE(i3200_pci_tbl) = { 456 static DEFINE_PCI_DEVICE_TABLE(i3200_pci_tbl) = {
457 { 457 {
458 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 458 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
459 I3200}, 459 I3200},
460 { 460 {
461 0, 461 0,
462 } /* 0 terminated list. */ 462 } /* 0 terminated list. */
463 }; 463 };
464 464
465 MODULE_DEVICE_TABLE(pci, i3200_pci_tbl); 465 MODULE_DEVICE_TABLE(pci, i3200_pci_tbl);
466 466
467 static struct pci_driver i3200_driver = { 467 static struct pci_driver i3200_driver = {
468 .name = EDAC_MOD_STR, 468 .name = EDAC_MOD_STR,
469 .probe = i3200_init_one, 469 .probe = i3200_init_one,
470 .remove = __devexit_p(i3200_remove_one), 470 .remove = __devexit_p(i3200_remove_one),
471 .id_table = i3200_pci_tbl, 471 .id_table = i3200_pci_tbl,
472 }; 472 };
473 473
474 static int __init i3200_init(void) 474 static int __init i3200_init(void)
475 { 475 {
476 int pci_rc; 476 int pci_rc;
477 477
478 debugf3("MC: %s()\n", __func__); 478 debugf3("MC: %s()\n", __func__);
479 479
480 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 480 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
481 opstate_init(); 481 opstate_init();
482 482
483 pci_rc = pci_register_driver(&i3200_driver); 483 pci_rc = pci_register_driver(&i3200_driver);
484 if (pci_rc < 0) 484 if (pci_rc < 0)
485 goto fail0; 485 goto fail0;
486 486
487 if (!mci_pdev) { 487 if (!mci_pdev) {
488 i3200_registered = 0; 488 i3200_registered = 0;
489 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 489 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
490 PCI_DEVICE_ID_INTEL_3200_HB, NULL); 490 PCI_DEVICE_ID_INTEL_3200_HB, NULL);
491 if (!mci_pdev) { 491 if (!mci_pdev) {
492 debugf0("i3200 pci_get_device fail\n"); 492 debugf0("i3200 pci_get_device fail\n");
493 pci_rc = -ENODEV; 493 pci_rc = -ENODEV;
494 goto fail1; 494 goto fail1;
495 } 495 }
496 496
497 pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl); 497 pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl);
498 if (pci_rc < 0) { 498 if (pci_rc < 0) {
499 debugf0("i3200 init fail\n"); 499 debugf0("i3200 init fail\n");
500 pci_rc = -ENODEV; 500 pci_rc = -ENODEV;
501 goto fail1; 501 goto fail1;
502 } 502 }
503 } 503 }
504 504
505 return 0; 505 return 0;
506 506
507 fail1: 507 fail1:
508 pci_unregister_driver(&i3200_driver); 508 pci_unregister_driver(&i3200_driver);
509 509
510 fail0: 510 fail0:
511 if (mci_pdev) 511 if (mci_pdev)
512 pci_dev_put(mci_pdev); 512 pci_dev_put(mci_pdev);
513 513
514 return pci_rc; 514 return pci_rc;
515 } 515 }
516 516
517 static void __exit i3200_exit(void) 517 static void __exit i3200_exit(void)
518 { 518 {
519 debugf3("MC: %s()\n", __func__); 519 debugf3("MC: %s()\n", __func__);
520 520
521 pci_unregister_driver(&i3200_driver); 521 pci_unregister_driver(&i3200_driver);
522 if (!i3200_registered) { 522 if (!i3200_registered) {
523 i3200_remove_one(mci_pdev); 523 i3200_remove_one(mci_pdev);
524 pci_dev_put(mci_pdev); 524 pci_dev_put(mci_pdev);
525 } 525 }
526 } 526 }
527 527
528 module_init(i3200_init); 528 module_init(i3200_init);
529 module_exit(i3200_exit); 529 module_exit(i3200_exit);
530 530
531 MODULE_LICENSE("GPL"); 531 MODULE_LICENSE("GPL");
532 MODULE_AUTHOR("Akamai Technologies, Inc."); 532 MODULE_AUTHOR("Akamai Technologies, Inc.");
533 MODULE_DESCRIPTION("MC support for Intel 3200 memory hub controllers"); 533 MODULE_DESCRIPTION("MC support for Intel 3200 memory hub controllers");
534 534
535 module_param(edac_op_state, int, 0444); 535 module_param(edac_op_state, int, 0444);
536 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 536 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
537 537
drivers/edac/i5400_edac.c
1 /* 1 /*
2 * Intel 5400 class Memory Controllers kernel module (Seaburg) 2 * Intel 5400 class Memory Controllers kernel module (Seaburg)
3 * 3 *
4 * This file may be distributed under the terms of the 4 * This file may be distributed under the terms of the
5 * GNU General Public License. 5 * GNU General Public License.
6 * 6 *
7 * Copyright (c) 2008 by: 7 * Copyright (c) 2008 by:
8 * Ben Woodard <woodard@redhat.com> 8 * Ben Woodard <woodard@redhat.com>
9 * Mauro Carvalho Chehab <mchehab@redhat.com> 9 * Mauro Carvalho Chehab <mchehab@redhat.com>
10 * 10 *
11 * Red Hat Inc. http://www.redhat.com 11 * Red Hat Inc. http://www.redhat.com
12 * 12 *
13 * Forked and adapted from the i5000_edac driver which was 13 * Forked and adapted from the i5000_edac driver which was
14 * written by Douglas Thompson Linux Networx <norsk5@xmission.com> 14 * written by Douglas Thompson Linux Networx <norsk5@xmission.com>
15 * 15 *
16 * This module is based on the following document: 16 * This module is based on the following document:
17 * 17 *
18 * Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet 18 * Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet
19 * http://developer.intel.com/design/chipsets/datashts/313070.htm 19 * http://developer.intel.com/design/chipsets/datashts/313070.htm
20 * 20 *
21 * This Memory Controller manages DDR2 FB-DIMMs. It has 2 branches, each with 21 * This Memory Controller manages DDR2 FB-DIMMs. It has 2 branches, each with
22 * 2 channels operating in lockstep no-mirror mode. Each channel can have up to 22 * 2 channels operating in lockstep no-mirror mode. Each channel can have up to
23 * 4 dimm's, each with up to 8GB. 23 * 4 dimm's, each with up to 8GB.
24 * 24 *
25 */ 25 */
26 26
27 #include <linux/module.h> 27 #include <linux/module.h>
28 #include <linux/init.h> 28 #include <linux/init.h>
29 #include <linux/pci.h> 29 #include <linux/pci.h>
30 #include <linux/pci_ids.h> 30 #include <linux/pci_ids.h>
31 #include <linux/slab.h> 31 #include <linux/slab.h>
32 #include <linux/edac.h> 32 #include <linux/edac.h>
33 #include <linux/mmzone.h> 33 #include <linux/mmzone.h>
34 34
35 #include "edac_core.h" 35 #include "edac_core.h"
36 36
37 /* 37 /*
38 * Alter this version for the I5400 module when modifications are made 38 * Alter this version for the I5400 module when modifications are made
39 */ 39 */
40 #define I5400_REVISION " Ver: 1.0.0" 40 #define I5400_REVISION " Ver: 1.0.0"
41 41
42 #define EDAC_MOD_STR "i5400_edac" 42 #define EDAC_MOD_STR "i5400_edac"
43 43
44 #define i5400_printk(level, fmt, arg...) \ 44 #define i5400_printk(level, fmt, arg...) \
45 edac_printk(level, "i5400", fmt, ##arg) 45 edac_printk(level, "i5400", fmt, ##arg)
46 46
47 #define i5400_mc_printk(mci, level, fmt, arg...) \ 47 #define i5400_mc_printk(mci, level, fmt, arg...) \
48 edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg) 48 edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg)
49 49
50 /* Limits for i5400 */ 50 /* Limits for i5400 */
51 #define MAX_BRANCHES 2 51 #define MAX_BRANCHES 2
52 #define CHANNELS_PER_BRANCH 2 52 #define CHANNELS_PER_BRANCH 2
53 #define DIMMS_PER_CHANNEL 4 53 #define DIMMS_PER_CHANNEL 4
54 #define MAX_CHANNELS (MAX_BRANCHES * CHANNELS_PER_BRANCH) 54 #define MAX_CHANNELS (MAX_BRANCHES * CHANNELS_PER_BRANCH)
55 55
56 /* Device 16, 56 /* Device 16,
57 * Function 0: System Address 57 * Function 0: System Address
58 * Function 1: Memory Branch Map, Control, Errors Register 58 * Function 1: Memory Branch Map, Control, Errors Register
59 * Function 2: FSB Error Registers 59 * Function 2: FSB Error Registers
60 * 60 *
61 * All 3 functions of Device 16 (0,1,2) share the SAME DID and 61 * All 3 functions of Device 16 (0,1,2) share the SAME DID and
62 * uses PCI_DEVICE_ID_INTEL_5400_ERR for device 16 (0,1,2), 62 * uses PCI_DEVICE_ID_INTEL_5400_ERR for device 16 (0,1,2),
63 * PCI_DEVICE_ID_INTEL_5400_FBD0 and PCI_DEVICE_ID_INTEL_5400_FBD1 63 * PCI_DEVICE_ID_INTEL_5400_FBD0 and PCI_DEVICE_ID_INTEL_5400_FBD1
64 * for device 21 (0,1). 64 * for device 21 (0,1).
65 */ 65 */
66 66
67 /* OFFSETS for Function 0 */ 67 /* OFFSETS for Function 0 */
68 #define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */ 68 #define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */
69 #define MAXCH 0x56 /* Max Channel Number */ 69 #define MAXCH 0x56 /* Max Channel Number */
70 #define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */ 70 #define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */
71 71
72 /* OFFSETS for Function 1 */ 72 /* OFFSETS for Function 1 */
73 #define TOLM 0x6C 73 #define TOLM 0x6C
74 #define REDMEMB 0x7C 74 #define REDMEMB 0x7C
75 #define REC_ECC_LOCATOR_ODD(x) ((x) & 0x3fe00) /* bits [17:9] indicate ODD, [8:0] indicate EVEN */ 75 #define REC_ECC_LOCATOR_ODD(x) ((x) & 0x3fe00) /* bits [17:9] indicate ODD, [8:0] indicate EVEN */
76 #define MIR0 0x80 76 #define MIR0 0x80
77 #define MIR1 0x84 77 #define MIR1 0x84
78 #define AMIR0 0x8c 78 #define AMIR0 0x8c
79 #define AMIR1 0x90 79 #define AMIR1 0x90
80 80
81 /* Fatal error registers */ 81 /* Fatal error registers */
82 #define FERR_FAT_FBD 0x98 /* also called as FERR_FAT_FB_DIMM at datasheet */ 82 #define FERR_FAT_FBD 0x98 /* also called as FERR_FAT_FB_DIMM at datasheet */
83 #define FERR_FAT_FBDCHAN (3<<28) /* channel index where the highest-order error occurred */ 83 #define FERR_FAT_FBDCHAN (3<<28) /* channel index where the highest-order error occurred */
84 84
85 #define NERR_FAT_FBD 0x9c 85 #define NERR_FAT_FBD 0x9c
86 #define FERR_NF_FBD 0xa0 /* also called as FERR_NFAT_FB_DIMM at datasheet */ 86 #define FERR_NF_FBD 0xa0 /* also called as FERR_NFAT_FB_DIMM at datasheet */
87 87
88 /* Non-fatal error register */ 88 /* Non-fatal error register */
89 #define NERR_NF_FBD 0xa4 89 #define NERR_NF_FBD 0xa4
90 90
91 /* Enable error mask */ 91 /* Enable error mask */
92 #define EMASK_FBD 0xa8 92 #define EMASK_FBD 0xa8
93 93
94 #define ERR0_FBD 0xac 94 #define ERR0_FBD 0xac
95 #define ERR1_FBD 0xb0 95 #define ERR1_FBD 0xb0
96 #define ERR2_FBD 0xb4 96 #define ERR2_FBD 0xb4
97 #define MCERR_FBD 0xb8 97 #define MCERR_FBD 0xb8
98 98
99 /* No OFFSETS for Device 16 Function 2 */ 99 /* No OFFSETS for Device 16 Function 2 */
100 100
101 /* 101 /*
102 * Device 21, 102 * Device 21,
103 * Function 0: Memory Map Branch 0 103 * Function 0: Memory Map Branch 0
104 * 104 *
105 * Device 22, 105 * Device 22,
106 * Function 0: Memory Map Branch 1 106 * Function 0: Memory Map Branch 1
107 */ 107 */
108 108
109 /* OFFSETS for Function 0 */ 109 /* OFFSETS for Function 0 */
110 #define AMBPRESENT_0 0x64 110 #define AMBPRESENT_0 0x64
111 #define AMBPRESENT_1 0x66 111 #define AMBPRESENT_1 0x66
112 #define MTR0 0x80 112 #define MTR0 0x80
113 #define MTR1 0x82 113 #define MTR1 0x82
114 #define MTR2 0x84 114 #define MTR2 0x84
115 #define MTR3 0x86 115 #define MTR3 0x86
116 116
117 /* OFFSETS for Function 1 */ 117 /* OFFSETS for Function 1 */
118 #define NRECFGLOG 0x74 118 #define NRECFGLOG 0x74
119 #define RECFGLOG 0x78 119 #define RECFGLOG 0x78
120 #define NRECMEMA 0xbe 120 #define NRECMEMA 0xbe
121 #define NRECMEMB 0xc0 121 #define NRECMEMB 0xc0
122 #define NRECFB_DIMMA 0xc4 122 #define NRECFB_DIMMA 0xc4
123 #define NRECFB_DIMMB 0xc8 123 #define NRECFB_DIMMB 0xc8
124 #define NRECFB_DIMMC 0xcc 124 #define NRECFB_DIMMC 0xcc
125 #define NRECFB_DIMMD 0xd0 125 #define NRECFB_DIMMD 0xd0
126 #define NRECFB_DIMME 0xd4 126 #define NRECFB_DIMME 0xd4
127 #define NRECFB_DIMMF 0xd8 127 #define NRECFB_DIMMF 0xd8
128 #define REDMEMA 0xdC 128 #define REDMEMA 0xdC
129 #define RECMEMA 0xf0 129 #define RECMEMA 0xf0
130 #define RECMEMB 0xf4 130 #define RECMEMB 0xf4
131 #define RECFB_DIMMA 0xf8 131 #define RECFB_DIMMA 0xf8
132 #define RECFB_DIMMB 0xec 132 #define RECFB_DIMMB 0xec
133 #define RECFB_DIMMC 0xf0 133 #define RECFB_DIMMC 0xf0
134 #define RECFB_DIMMD 0xf4 134 #define RECFB_DIMMD 0xf4
135 #define RECFB_DIMME 0xf8 135 #define RECFB_DIMME 0xf8
136 #define RECFB_DIMMF 0xfC 136 #define RECFB_DIMMF 0xfC
137 137
138 /* 138 /*
139 * Error indicator bits and masks 139 * Error indicator bits and masks
140 * Error masks are according with Table 5-17 of i5400 datasheet 140 * Error masks are according with Table 5-17 of i5400 datasheet
141 */ 141 */
142 142
143 enum error_mask { 143 enum error_mask {
144 EMASK_M1 = 1<<0, /* Memory Write error on non-redundant retry */ 144 EMASK_M1 = 1<<0, /* Memory Write error on non-redundant retry */
145 EMASK_M2 = 1<<1, /* Memory or FB-DIMM configuration CRC read error */ 145 EMASK_M2 = 1<<1, /* Memory or FB-DIMM configuration CRC read error */
146 EMASK_M3 = 1<<2, /* Reserved */ 146 EMASK_M3 = 1<<2, /* Reserved */
147 EMASK_M4 = 1<<3, /* Uncorrectable Data ECC on Replay */ 147 EMASK_M4 = 1<<3, /* Uncorrectable Data ECC on Replay */
148 EMASK_M5 = 1<<4, /* Aliased Uncorrectable Non-Mirrored Demand Data ECC */ 148 EMASK_M5 = 1<<4, /* Aliased Uncorrectable Non-Mirrored Demand Data ECC */
149 EMASK_M6 = 1<<5, /* Unsupported on i5400 */ 149 EMASK_M6 = 1<<5, /* Unsupported on i5400 */
150 EMASK_M7 = 1<<6, /* Aliased Uncorrectable Resilver- or Spare-Copy Data ECC */ 150 EMASK_M7 = 1<<6, /* Aliased Uncorrectable Resilver- or Spare-Copy Data ECC */
151 EMASK_M8 = 1<<7, /* Aliased Uncorrectable Patrol Data ECC */ 151 EMASK_M8 = 1<<7, /* Aliased Uncorrectable Patrol Data ECC */
152 EMASK_M9 = 1<<8, /* Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC */ 152 EMASK_M9 = 1<<8, /* Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC */
153 EMASK_M10 = 1<<9, /* Unsupported on i5400 */ 153 EMASK_M10 = 1<<9, /* Unsupported on i5400 */
154 EMASK_M11 = 1<<10, /* Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC */ 154 EMASK_M11 = 1<<10, /* Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC */
155 EMASK_M12 = 1<<11, /* Non-Aliased Uncorrectable Patrol Data ECC */ 155 EMASK_M12 = 1<<11, /* Non-Aliased Uncorrectable Patrol Data ECC */
156 EMASK_M13 = 1<<12, /* Memory Write error on first attempt */ 156 EMASK_M13 = 1<<12, /* Memory Write error on first attempt */
157 EMASK_M14 = 1<<13, /* FB-DIMM Configuration Write error on first attempt */ 157 EMASK_M14 = 1<<13, /* FB-DIMM Configuration Write error on first attempt */
158 EMASK_M15 = 1<<14, /* Memory or FB-DIMM configuration CRC read error */ 158 EMASK_M15 = 1<<14, /* Memory or FB-DIMM configuration CRC read error */
159 EMASK_M16 = 1<<15, /* Channel Failed-Over Occurred */ 159 EMASK_M16 = 1<<15, /* Channel Failed-Over Occurred */
160 EMASK_M17 = 1<<16, /* Correctable Non-Mirrored Demand Data ECC */ 160 EMASK_M17 = 1<<16, /* Correctable Non-Mirrored Demand Data ECC */
161 EMASK_M18 = 1<<17, /* Unsupported on i5400 */ 161 EMASK_M18 = 1<<17, /* Unsupported on i5400 */
162 EMASK_M19 = 1<<18, /* Correctable Resilver- or Spare-Copy Data ECC */ 162 EMASK_M19 = 1<<18, /* Correctable Resilver- or Spare-Copy Data ECC */
163 EMASK_M20 = 1<<19, /* Correctable Patrol Data ECC */ 163 EMASK_M20 = 1<<19, /* Correctable Patrol Data ECC */
164 EMASK_M21 = 1<<20, /* FB-DIMM Northbound parity error on FB-DIMM Sync Status */ 164 EMASK_M21 = 1<<20, /* FB-DIMM Northbound parity error on FB-DIMM Sync Status */
165 EMASK_M22 = 1<<21, /* SPD protocol Error */ 165 EMASK_M22 = 1<<21, /* SPD protocol Error */
166 EMASK_M23 = 1<<22, /* Non-Redundant Fast Reset Timeout */ 166 EMASK_M23 = 1<<22, /* Non-Redundant Fast Reset Timeout */
167 EMASK_M24 = 1<<23, /* Refresh error */ 167 EMASK_M24 = 1<<23, /* Refresh error */
168 EMASK_M25 = 1<<24, /* Memory Write error on redundant retry */ 168 EMASK_M25 = 1<<24, /* Memory Write error on redundant retry */
169 EMASK_M26 = 1<<25, /* Redundant Fast Reset Timeout */ 169 EMASK_M26 = 1<<25, /* Redundant Fast Reset Timeout */
170 EMASK_M27 = 1<<26, /* Correctable Counter Threshold Exceeded */ 170 EMASK_M27 = 1<<26, /* Correctable Counter Threshold Exceeded */
171 EMASK_M28 = 1<<27, /* DIMM-Spare Copy Completed */ 171 EMASK_M28 = 1<<27, /* DIMM-Spare Copy Completed */
172 EMASK_M29 = 1<<28, /* DIMM-Isolation Completed */ 172 EMASK_M29 = 1<<28, /* DIMM-Isolation Completed */
173 }; 173 };
174 174
175 /* 175 /*
176 * Names to translate bit error into something useful 176 * Names to translate bit error into something useful
177 */ 177 */
178 static const char *error_name[] = { 178 static const char *error_name[] = {
179 [0] = "Memory Write error on non-redundant retry", 179 [0] = "Memory Write error on non-redundant retry",
180 [1] = "Memory or FB-DIMM configuration CRC read error", 180 [1] = "Memory or FB-DIMM configuration CRC read error",
181 /* Reserved */ 181 /* Reserved */
182 [3] = "Uncorrectable Data ECC on Replay", 182 [3] = "Uncorrectable Data ECC on Replay",
183 [4] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC", 183 [4] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
184 /* M6 Unsupported on i5400 */ 184 /* M6 Unsupported on i5400 */
185 [6] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC", 185 [6] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
186 [7] = "Aliased Uncorrectable Patrol Data ECC", 186 [7] = "Aliased Uncorrectable Patrol Data ECC",
187 [8] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC", 187 [8] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
188 /* M10 Unsupported on i5400 */ 188 /* M10 Unsupported on i5400 */
189 [10] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC", 189 [10] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
190 [11] = "Non-Aliased Uncorrectable Patrol Data ECC", 190 [11] = "Non-Aliased Uncorrectable Patrol Data ECC",
191 [12] = "Memory Write error on first attempt", 191 [12] = "Memory Write error on first attempt",
192 [13] = "FB-DIMM Configuration Write error on first attempt", 192 [13] = "FB-DIMM Configuration Write error on first attempt",
193 [14] = "Memory or FB-DIMM configuration CRC read error", 193 [14] = "Memory or FB-DIMM configuration CRC read error",
194 [15] = "Channel Failed-Over Occurred", 194 [15] = "Channel Failed-Over Occurred",
195 [16] = "Correctable Non-Mirrored Demand Data ECC", 195 [16] = "Correctable Non-Mirrored Demand Data ECC",
196 /* M18 Unsupported on i5400 */ 196 /* M18 Unsupported on i5400 */
197 [18] = "Correctable Resilver- or Spare-Copy Data ECC", 197 [18] = "Correctable Resilver- or Spare-Copy Data ECC",
198 [19] = "Correctable Patrol Data ECC", 198 [19] = "Correctable Patrol Data ECC",
199 [20] = "FB-DIMM Northbound parity error on FB-DIMM Sync Status", 199 [20] = "FB-DIMM Northbound parity error on FB-DIMM Sync Status",
200 [21] = "SPD protocol Error", 200 [21] = "SPD protocol Error",
201 [22] = "Non-Redundant Fast Reset Timeout", 201 [22] = "Non-Redundant Fast Reset Timeout",
202 [23] = "Refresh error", 202 [23] = "Refresh error",
203 [24] = "Memory Write error on redundant retry", 203 [24] = "Memory Write error on redundant retry",
204 [25] = "Redundant Fast Reset Timeout", 204 [25] = "Redundant Fast Reset Timeout",
205 [26] = "Correctable Counter Threshold Exceeded", 205 [26] = "Correctable Counter Threshold Exceeded",
206 [27] = "DIMM-Spare Copy Completed", 206 [27] = "DIMM-Spare Copy Completed",
207 [28] = "DIMM-Isolation Completed", 207 [28] = "DIMM-Isolation Completed",
208 }; 208 };
209 209
210 /* Fatal errors */ 210 /* Fatal errors */
211 #define ERROR_FAT_MASK (EMASK_M1 | \ 211 #define ERROR_FAT_MASK (EMASK_M1 | \
212 EMASK_M2 | \ 212 EMASK_M2 | \
213 EMASK_M23) 213 EMASK_M23)
214 214
215 /* Correctable errors */ 215 /* Correctable errors */
216 #define ERROR_NF_CORRECTABLE (EMASK_M27 | \ 216 #define ERROR_NF_CORRECTABLE (EMASK_M27 | \
217 EMASK_M20 | \ 217 EMASK_M20 | \
218 EMASK_M19 | \ 218 EMASK_M19 | \
219 EMASK_M18 | \ 219 EMASK_M18 | \
220 EMASK_M17 | \ 220 EMASK_M17 | \
221 EMASK_M16) 221 EMASK_M16)
222 #define ERROR_NF_DIMM_SPARE (EMASK_M29 | \ 222 #define ERROR_NF_DIMM_SPARE (EMASK_M29 | \
223 EMASK_M28) 223 EMASK_M28)
224 #define ERROR_NF_SPD_PROTOCOL (EMASK_M22) 224 #define ERROR_NF_SPD_PROTOCOL (EMASK_M22)
225 #define ERROR_NF_NORTH_CRC (EMASK_M21) 225 #define ERROR_NF_NORTH_CRC (EMASK_M21)
226 226
227 /* Recoverable errors */ 227 /* Recoverable errors */
228 #define ERROR_NF_RECOVERABLE (EMASK_M26 | \ 228 #define ERROR_NF_RECOVERABLE (EMASK_M26 | \
229 EMASK_M25 | \ 229 EMASK_M25 | \
230 EMASK_M24 | \ 230 EMASK_M24 | \
231 EMASK_M15 | \ 231 EMASK_M15 | \
232 EMASK_M14 | \ 232 EMASK_M14 | \
233 EMASK_M13 | \ 233 EMASK_M13 | \
234 EMASK_M12 | \ 234 EMASK_M12 | \
235 EMASK_M11 | \ 235 EMASK_M11 | \
236 EMASK_M9 | \ 236 EMASK_M9 | \
237 EMASK_M8 | \ 237 EMASK_M8 | \
238 EMASK_M7 | \ 238 EMASK_M7 | \
239 EMASK_M5) 239 EMASK_M5)
240 240
241 /* uncorrectable errors */ 241 /* uncorrectable errors */
242 #define ERROR_NF_UNCORRECTABLE (EMASK_M4) 242 #define ERROR_NF_UNCORRECTABLE (EMASK_M4)
243 243
244 /* mask to all non-fatal errors */ 244 /* mask to all non-fatal errors */
245 #define ERROR_NF_MASK (ERROR_NF_CORRECTABLE | \ 245 #define ERROR_NF_MASK (ERROR_NF_CORRECTABLE | \
246 ERROR_NF_UNCORRECTABLE | \ 246 ERROR_NF_UNCORRECTABLE | \
247 ERROR_NF_RECOVERABLE | \ 247 ERROR_NF_RECOVERABLE | \
248 ERROR_NF_DIMM_SPARE | \ 248 ERROR_NF_DIMM_SPARE | \
249 ERROR_NF_SPD_PROTOCOL | \ 249 ERROR_NF_SPD_PROTOCOL | \
250 ERROR_NF_NORTH_CRC) 250 ERROR_NF_NORTH_CRC)
251 251
252 /* 252 /*
253 * Define error masks for the several registers 253 * Define error masks for the several registers
254 */ 254 */
255 255
256 /* Enable all fatal and non fatal errors */ 256 /* Enable all fatal and non fatal errors */
257 #define ENABLE_EMASK_ALL (ERROR_FAT_MASK | ERROR_NF_MASK) 257 #define ENABLE_EMASK_ALL (ERROR_FAT_MASK | ERROR_NF_MASK)
258 258
259 /* mask for fatal error registers */ 259 /* mask for fatal error registers */
260 #define FERR_FAT_MASK ERROR_FAT_MASK 260 #define FERR_FAT_MASK ERROR_FAT_MASK
261 261
262 /* masks for non-fatal error register */ 262 /* masks for non-fatal error register */
263 static inline int to_nf_mask(unsigned int mask) 263 static inline int to_nf_mask(unsigned int mask)
264 { 264 {
265 return (mask & EMASK_M29) | (mask >> 3); 265 return (mask & EMASK_M29) | (mask >> 3);
266 }; 266 };
267 267
268 static inline int from_nf_ferr(unsigned int mask) 268 static inline int from_nf_ferr(unsigned int mask)
269 { 269 {
270 return (mask & EMASK_M29) | /* Bit 28 */ 270 return (mask & EMASK_M29) | /* Bit 28 */
271 (mask & ((1 << 28) - 1) << 3); /* Bits 0 to 27 */ 271 (mask & ((1 << 28) - 1) << 3); /* Bits 0 to 27 */
272 }; 272 };
273 273
274 #define FERR_NF_MASK to_nf_mask(ERROR_NF_MASK) 274 #define FERR_NF_MASK to_nf_mask(ERROR_NF_MASK)
275 #define FERR_NF_CORRECTABLE to_nf_mask(ERROR_NF_CORRECTABLE) 275 #define FERR_NF_CORRECTABLE to_nf_mask(ERROR_NF_CORRECTABLE)
276 #define FERR_NF_DIMM_SPARE to_nf_mask(ERROR_NF_DIMM_SPARE) 276 #define FERR_NF_DIMM_SPARE to_nf_mask(ERROR_NF_DIMM_SPARE)
277 #define FERR_NF_SPD_PROTOCOL to_nf_mask(ERROR_NF_SPD_PROTOCOL) 277 #define FERR_NF_SPD_PROTOCOL to_nf_mask(ERROR_NF_SPD_PROTOCOL)
278 #define FERR_NF_NORTH_CRC to_nf_mask(ERROR_NF_NORTH_CRC) 278 #define FERR_NF_NORTH_CRC to_nf_mask(ERROR_NF_NORTH_CRC)
279 #define FERR_NF_RECOVERABLE to_nf_mask(ERROR_NF_RECOVERABLE) 279 #define FERR_NF_RECOVERABLE to_nf_mask(ERROR_NF_RECOVERABLE)
280 #define FERR_NF_UNCORRECTABLE to_nf_mask(ERROR_NF_UNCORRECTABLE) 280 #define FERR_NF_UNCORRECTABLE to_nf_mask(ERROR_NF_UNCORRECTABLE)
281 281
282 /* Defines to extract the vaious fields from the 282 /* Defines to extract the vaious fields from the
283 * MTRx - Memory Technology Registers 283 * MTRx - Memory Technology Registers
284 */ 284 */
285 #define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 10)) 285 #define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 10))
286 #define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 9)) 286 #define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 9))
287 #define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 8)) ? 8 : 4) 287 #define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 8)) ? 8 : 4)
288 #define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 6)) ? 8 : 4) 288 #define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 6)) ? 8 : 4)
289 #define MTR_DRAM_BANKS_ADDR_BITS(mtr) ((MTR_DRAM_BANKS(mtr) == 8) ? 3 : 2) 289 #define MTR_DRAM_BANKS_ADDR_BITS(mtr) ((MTR_DRAM_BANKS(mtr) == 8) ? 3 : 2)
290 #define MTR_DIMM_RANK(mtr) (((mtr) >> 5) & 0x1) 290 #define MTR_DIMM_RANK(mtr) (((mtr) >> 5) & 0x1)
291 #define MTR_DIMM_RANK_ADDR_BITS(mtr) (MTR_DIMM_RANK(mtr) ? 2 : 1) 291 #define MTR_DIMM_RANK_ADDR_BITS(mtr) (MTR_DIMM_RANK(mtr) ? 2 : 1)
292 #define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3) 292 #define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3)
293 #define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13) 293 #define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13)
294 #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) 294 #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
295 #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) 295 #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
296 296
297 /* This applies to FERR_NF_FB-DIMM as well as FERR_FAT_FB-DIMM */ 297 /* This applies to FERR_NF_FB-DIMM as well as FERR_FAT_FB-DIMM */
298 static inline int extract_fbdchan_indx(u32 x) 298 static inline int extract_fbdchan_indx(u32 x)
299 { 299 {
300 return (x>>28) & 0x3; 300 return (x>>28) & 0x3;
301 } 301 }
302 302
303 #ifdef CONFIG_EDAC_DEBUG 303 #ifdef CONFIG_EDAC_DEBUG
304 /* MTR NUMROW */ 304 /* MTR NUMROW */
305 static const char *numrow_toString[] = { 305 static const char *numrow_toString[] = {
306 "8,192 - 13 rows", 306 "8,192 - 13 rows",
307 "16,384 - 14 rows", 307 "16,384 - 14 rows",
308 "32,768 - 15 rows", 308 "32,768 - 15 rows",
309 "65,536 - 16 rows" 309 "65,536 - 16 rows"
310 }; 310 };
311 311
312 /* MTR NUMCOL */ 312 /* MTR NUMCOL */
313 static const char *numcol_toString[] = { 313 static const char *numcol_toString[] = {
314 "1,024 - 10 columns", 314 "1,024 - 10 columns",
315 "2,048 - 11 columns", 315 "2,048 - 11 columns",
316 "4,096 - 12 columns", 316 "4,096 - 12 columns",
317 "reserved" 317 "reserved"
318 }; 318 };
319 #endif 319 #endif
320 320
321 /* Device name and register DID (Device ID) */ 321 /* Device name and register DID (Device ID) */
322 struct i5400_dev_info { 322 struct i5400_dev_info {
323 const char *ctl_name; /* name for this device */ 323 const char *ctl_name; /* name for this device */
324 u16 fsb_mapping_errors; /* DID for the branchmap,control */ 324 u16 fsb_mapping_errors; /* DID for the branchmap,control */
325 }; 325 };
326 326
327 /* Table of devices attributes supported by this driver */ 327 /* Table of devices attributes supported by this driver */
328 static const struct i5400_dev_info i5400_devs[] = { 328 static const struct i5400_dev_info i5400_devs[] = {
329 { 329 {
330 .ctl_name = "I5400", 330 .ctl_name = "I5400",
331 .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_5400_ERR, 331 .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_5400_ERR,
332 }, 332 },
333 }; 333 };
334 334
335 struct i5400_dimm_info { 335 struct i5400_dimm_info {
336 int megabytes; /* size, 0 means not present */ 336 int megabytes; /* size, 0 means not present */
337 }; 337 };
338 338
339 /* driver private data structure */ 339 /* driver private data structure */
340 struct i5400_pvt { 340 struct i5400_pvt {
341 struct pci_dev *system_address; /* 16.0 */ 341 struct pci_dev *system_address; /* 16.0 */
342 struct pci_dev *branchmap_werrors; /* 16.1 */ 342 struct pci_dev *branchmap_werrors; /* 16.1 */
343 struct pci_dev *fsb_error_regs; /* 16.2 */ 343 struct pci_dev *fsb_error_regs; /* 16.2 */
344 struct pci_dev *branch_0; /* 21.0 */ 344 struct pci_dev *branch_0; /* 21.0 */
345 struct pci_dev *branch_1; /* 22.0 */ 345 struct pci_dev *branch_1; /* 22.0 */
346 346
347 u16 tolm; /* top of low memory */ 347 u16 tolm; /* top of low memory */
348 u64 ambase; /* AMB BAR */ 348 u64 ambase; /* AMB BAR */
349 349
350 u16 mir0, mir1; 350 u16 mir0, mir1;
351 351
352 u16 b0_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */ 352 u16 b0_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */
353 u16 b0_ambpresent0; /* Branch 0, Channel 0 */ 353 u16 b0_ambpresent0; /* Branch 0, Channel 0 */
354 u16 b0_ambpresent1; /* Brnach 0, Channel 1 */ 354 u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
355 355
356 u16 b1_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */ 356 u16 b1_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */
357 u16 b1_ambpresent0; /* Branch 1, Channel 8 */ 357 u16 b1_ambpresent0; /* Branch 1, Channel 8 */
358 u16 b1_ambpresent1; /* Branch 1, Channel 1 */ 358 u16 b1_ambpresent1; /* Branch 1, Channel 1 */
359 359
360 /* DIMM information matrix, allocating architecture maximums */ 360 /* DIMM information matrix, allocating architecture maximums */
361 struct i5400_dimm_info dimm_info[DIMMS_PER_CHANNEL][MAX_CHANNELS]; 361 struct i5400_dimm_info dimm_info[DIMMS_PER_CHANNEL][MAX_CHANNELS];
362 362
363 /* Actual values for this controller */ 363 /* Actual values for this controller */
364 int maxch; /* Max channels */ 364 int maxch; /* Max channels */
365 int maxdimmperch; /* Max DIMMs per channel */ 365 int maxdimmperch; /* Max DIMMs per channel */
366 }; 366 };
367 367
368 /* I5400 MCH error information retrieved from Hardware */ 368 /* I5400 MCH error information retrieved from Hardware */
369 struct i5400_error_info { 369 struct i5400_error_info {
370 /* These registers are always read from the MC */ 370 /* These registers are always read from the MC */
371 u32 ferr_fat_fbd; /* First Errors Fatal */ 371 u32 ferr_fat_fbd; /* First Errors Fatal */
372 u32 nerr_fat_fbd; /* Next Errors Fatal */ 372 u32 nerr_fat_fbd; /* Next Errors Fatal */
373 u32 ferr_nf_fbd; /* First Errors Non-Fatal */ 373 u32 ferr_nf_fbd; /* First Errors Non-Fatal */
374 u32 nerr_nf_fbd; /* Next Errors Non-Fatal */ 374 u32 nerr_nf_fbd; /* Next Errors Non-Fatal */
375 375
376 /* These registers are input ONLY if there was a Recoverable Error */ 376 /* These registers are input ONLY if there was a Recoverable Error */
377 u32 redmemb; /* Recoverable Mem Data Error log B */ 377 u32 redmemb; /* Recoverable Mem Data Error log B */
378 u16 recmema; /* Recoverable Mem Error log A */ 378 u16 recmema; /* Recoverable Mem Error log A */
379 u32 recmemb; /* Recoverable Mem Error log B */ 379 u32 recmemb; /* Recoverable Mem Error log B */
380 380
381 /* These registers are input ONLY if there was a Non-Rec Error */ 381 /* These registers are input ONLY if there was a Non-Rec Error */
382 u16 nrecmema; /* Non-Recoverable Mem log A */ 382 u16 nrecmema; /* Non-Recoverable Mem log A */
383 u16 nrecmemb; /* Non-Recoverable Mem log B */ 383 u16 nrecmemb; /* Non-Recoverable Mem log B */
384 384
385 }; 385 };
386 386
387 /* note that nrec_rdwr changed from NRECMEMA to NRECMEMB between the 5000 and 387 /* note that nrec_rdwr changed from NRECMEMA to NRECMEMB between the 5000 and
388 5400 better to use an inline function than a macro in this case */ 388 5400 better to use an inline function than a macro in this case */
389 static inline int nrec_bank(struct i5400_error_info *info) 389 static inline int nrec_bank(struct i5400_error_info *info)
390 { 390 {
391 return ((info->nrecmema) >> 12) & 0x7; 391 return ((info->nrecmema) >> 12) & 0x7;
392 } 392 }
393 static inline int nrec_rank(struct i5400_error_info *info) 393 static inline int nrec_rank(struct i5400_error_info *info)
394 { 394 {
395 return ((info->nrecmema) >> 8) & 0xf; 395 return ((info->nrecmema) >> 8) & 0xf;
396 } 396 }
397 static inline int nrec_buf_id(struct i5400_error_info *info) 397 static inline int nrec_buf_id(struct i5400_error_info *info)
398 { 398 {
399 return ((info->nrecmema)) & 0xff; 399 return ((info->nrecmema)) & 0xff;
400 } 400 }
401 static inline int nrec_rdwr(struct i5400_error_info *info) 401 static inline int nrec_rdwr(struct i5400_error_info *info)
402 { 402 {
403 return (info->nrecmemb) >> 31; 403 return (info->nrecmemb) >> 31;
404 } 404 }
405 /* This applies to both NREC and REC string so it can be used with nrec_rdwr 405 /* This applies to both NREC and REC string so it can be used with nrec_rdwr
406 and rec_rdwr */ 406 and rec_rdwr */
407 static inline const char *rdwr_str(int rdwr) 407 static inline const char *rdwr_str(int rdwr)
408 { 408 {
409 return rdwr ? "Write" : "Read"; 409 return rdwr ? "Write" : "Read";
410 } 410 }
411 static inline int nrec_cas(struct i5400_error_info *info) 411 static inline int nrec_cas(struct i5400_error_info *info)
412 { 412 {
413 return ((info->nrecmemb) >> 16) & 0x1fff; 413 return ((info->nrecmemb) >> 16) & 0x1fff;
414 } 414 }
415 static inline int nrec_ras(struct i5400_error_info *info) 415 static inline int nrec_ras(struct i5400_error_info *info)
416 { 416 {
417 return (info->nrecmemb) & 0xffff; 417 return (info->nrecmemb) & 0xffff;
418 } 418 }
419 static inline int rec_bank(struct i5400_error_info *info) 419 static inline int rec_bank(struct i5400_error_info *info)
420 { 420 {
421 return ((info->recmema) >> 12) & 0x7; 421 return ((info->recmema) >> 12) & 0x7;
422 } 422 }
423 static inline int rec_rank(struct i5400_error_info *info) 423 static inline int rec_rank(struct i5400_error_info *info)
424 { 424 {
425 return ((info->recmema) >> 8) & 0xf; 425 return ((info->recmema) >> 8) & 0xf;
426 } 426 }
427 static inline int rec_rdwr(struct i5400_error_info *info) 427 static inline int rec_rdwr(struct i5400_error_info *info)
428 { 428 {
429 return (info->recmemb) >> 31; 429 return (info->recmemb) >> 31;
430 } 430 }
431 static inline int rec_cas(struct i5400_error_info *info) 431 static inline int rec_cas(struct i5400_error_info *info)
432 { 432 {
433 return ((info->recmemb) >> 16) & 0x1fff; 433 return ((info->recmemb) >> 16) & 0x1fff;
434 } 434 }
435 static inline int rec_ras(struct i5400_error_info *info) 435 static inline int rec_ras(struct i5400_error_info *info)
436 { 436 {
437 return (info->recmemb) & 0xffff; 437 return (info->recmemb) & 0xffff;
438 } 438 }
439 439
440 static struct edac_pci_ctl_info *i5400_pci; 440 static struct edac_pci_ctl_info *i5400_pci;
441 441
442 /* 442 /*
443 * i5400_get_error_info Retrieve the hardware error information from 443 * i5400_get_error_info Retrieve the hardware error information from
444 * the hardware and cache it in the 'info' 444 * the hardware and cache it in the 'info'
445 * structure 445 * structure
446 */ 446 */
447 static void i5400_get_error_info(struct mem_ctl_info *mci, 447 static void i5400_get_error_info(struct mem_ctl_info *mci,
448 struct i5400_error_info *info) 448 struct i5400_error_info *info)
449 { 449 {
450 struct i5400_pvt *pvt; 450 struct i5400_pvt *pvt;
451 u32 value; 451 u32 value;
452 452
453 pvt = mci->pvt_info; 453 pvt = mci->pvt_info;
454 454
455 /* read in the 1st FATAL error register */ 455 /* read in the 1st FATAL error register */
456 pci_read_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, &value); 456 pci_read_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, &value);
457 457
458 /* Mask only the bits that the doc says are valid 458 /* Mask only the bits that the doc says are valid
459 */ 459 */
460 value &= (FERR_FAT_FBDCHAN | FERR_FAT_MASK); 460 value &= (FERR_FAT_FBDCHAN | FERR_FAT_MASK);
461 461
462 /* If there is an error, then read in the 462 /* If there is an error, then read in the
463 NEXT FATAL error register and the Memory Error Log Register A 463 NEXT FATAL error register and the Memory Error Log Register A
464 */ 464 */
465 if (value & FERR_FAT_MASK) { 465 if (value & FERR_FAT_MASK) {
466 info->ferr_fat_fbd = value; 466 info->ferr_fat_fbd = value;
467 467
468 /* harvest the various error data we need */ 468 /* harvest the various error data we need */
469 pci_read_config_dword(pvt->branchmap_werrors, 469 pci_read_config_dword(pvt->branchmap_werrors,
470 NERR_FAT_FBD, &info->nerr_fat_fbd); 470 NERR_FAT_FBD, &info->nerr_fat_fbd);
471 pci_read_config_word(pvt->branchmap_werrors, 471 pci_read_config_word(pvt->branchmap_werrors,
472 NRECMEMA, &info->nrecmema); 472 NRECMEMA, &info->nrecmema);
473 pci_read_config_word(pvt->branchmap_werrors, 473 pci_read_config_word(pvt->branchmap_werrors,
474 NRECMEMB, &info->nrecmemb); 474 NRECMEMB, &info->nrecmemb);
475 475
476 /* Clear the error bits, by writing them back */ 476 /* Clear the error bits, by writing them back */
477 pci_write_config_dword(pvt->branchmap_werrors, 477 pci_write_config_dword(pvt->branchmap_werrors,
478 FERR_FAT_FBD, value); 478 FERR_FAT_FBD, value);
479 } else { 479 } else {
480 info->ferr_fat_fbd = 0; 480 info->ferr_fat_fbd = 0;
481 info->nerr_fat_fbd = 0; 481 info->nerr_fat_fbd = 0;
482 info->nrecmema = 0; 482 info->nrecmema = 0;
483 info->nrecmemb = 0; 483 info->nrecmemb = 0;
484 } 484 }
485 485
486 /* read in the 1st NON-FATAL error register */ 486 /* read in the 1st NON-FATAL error register */
487 pci_read_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, &value); 487 pci_read_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, &value);
488 488
489 /* If there is an error, then read in the 1st NON-FATAL error 489 /* If there is an error, then read in the 1st NON-FATAL error
490 * register as well */ 490 * register as well */
491 if (value & FERR_NF_MASK) { 491 if (value & FERR_NF_MASK) {
492 info->ferr_nf_fbd = value; 492 info->ferr_nf_fbd = value;
493 493
494 /* harvest the various error data we need */ 494 /* harvest the various error data we need */
495 pci_read_config_dword(pvt->branchmap_werrors, 495 pci_read_config_dword(pvt->branchmap_werrors,
496 NERR_NF_FBD, &info->nerr_nf_fbd); 496 NERR_NF_FBD, &info->nerr_nf_fbd);
497 pci_read_config_word(pvt->branchmap_werrors, 497 pci_read_config_word(pvt->branchmap_werrors,
498 RECMEMA, &info->recmema); 498 RECMEMA, &info->recmema);
499 pci_read_config_dword(pvt->branchmap_werrors, 499 pci_read_config_dword(pvt->branchmap_werrors,
500 RECMEMB, &info->recmemb); 500 RECMEMB, &info->recmemb);
501 pci_read_config_dword(pvt->branchmap_werrors, 501 pci_read_config_dword(pvt->branchmap_werrors,
502 REDMEMB, &info->redmemb); 502 REDMEMB, &info->redmemb);
503 503
504 /* Clear the error bits, by writing them back */ 504 /* Clear the error bits, by writing them back */
505 pci_write_config_dword(pvt->branchmap_werrors, 505 pci_write_config_dword(pvt->branchmap_werrors,
506 FERR_NF_FBD, value); 506 FERR_NF_FBD, value);
507 } else { 507 } else {
508 info->ferr_nf_fbd = 0; 508 info->ferr_nf_fbd = 0;
509 info->nerr_nf_fbd = 0; 509 info->nerr_nf_fbd = 0;
510 info->recmema = 0; 510 info->recmema = 0;
511 info->recmemb = 0; 511 info->recmemb = 0;
512 info->redmemb = 0; 512 info->redmemb = 0;
513 } 513 }
514 } 514 }
515 515
516 /* 516 /*
517 * i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci, 517 * i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
518 * struct i5400_error_info *info, 518 * struct i5400_error_info *info,
519 * int handle_errors); 519 * int handle_errors);
520 * 520 *
521 * handle the Intel FATAL and unrecoverable errors, if any 521 * handle the Intel FATAL and unrecoverable errors, if any
522 */ 522 */
523 static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci, 523 static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
524 struct i5400_error_info *info, 524 struct i5400_error_info *info,
525 unsigned long allErrors) 525 unsigned long allErrors)
526 { 526 {
527 char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80]; 527 char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80];
528 int branch; 528 int branch;
529 int channel; 529 int channel;
530 int bank; 530 int bank;
531 int buf_id; 531 int buf_id;
532 int rank; 532 int rank;
533 int rdwr; 533 int rdwr;
534 int ras, cas; 534 int ras, cas;
535 int errnum; 535 int errnum;
536 char *type = NULL; 536 char *type = NULL;
537 enum hw_event_mc_err_type tp_event = HW_EVENT_ERR_UNCORRECTED; 537 enum hw_event_mc_err_type tp_event = HW_EVENT_ERR_UNCORRECTED;
538 538
539 if (!allErrors) 539 if (!allErrors)
540 return; /* if no error, return now */ 540 return; /* if no error, return now */
541 541
542 if (allErrors & ERROR_FAT_MASK) { 542 if (allErrors & ERROR_FAT_MASK) {
543 type = "FATAL"; 543 type = "FATAL";
544 tp_event = HW_EVENT_ERR_FATAL; 544 tp_event = HW_EVENT_ERR_FATAL;
545 } else if (allErrors & FERR_NF_UNCORRECTABLE) 545 } else if (allErrors & FERR_NF_UNCORRECTABLE)
546 type = "NON-FATAL uncorrected"; 546 type = "NON-FATAL uncorrected";
547 else 547 else
548 type = "NON-FATAL recoverable"; 548 type = "NON-FATAL recoverable";
549 549
550 /* ONLY ONE of the possible error bits will be set, as per the docs */ 550 /* ONLY ONE of the possible error bits will be set, as per the docs */
551 551
552 branch = extract_fbdchan_indx(info->ferr_fat_fbd); 552 branch = extract_fbdchan_indx(info->ferr_fat_fbd);
553 channel = branch; 553 channel = branch;
554 554
555 /* Use the NON-Recoverable macros to extract data */ 555 /* Use the NON-Recoverable macros to extract data */
556 bank = nrec_bank(info); 556 bank = nrec_bank(info);
557 rank = nrec_rank(info); 557 rank = nrec_rank(info);
558 buf_id = nrec_buf_id(info); 558 buf_id = nrec_buf_id(info);
559 rdwr = nrec_rdwr(info); 559 rdwr = nrec_rdwr(info);
560 ras = nrec_ras(info); 560 ras = nrec_ras(info);
561 cas = nrec_cas(info); 561 cas = nrec_cas(info);
562 562
563 debugf0("\t\tDIMM= %d Channels= %d,%d (Branch= %d " 563 debugf0("\t\tDIMM= %d Channels= %d,%d (Branch= %d "
564 "DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n", 564 "DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
565 rank, channel, channel + 1, branch >> 1, bank, 565 rank, channel, channel + 1, branch >> 1, bank,
566 buf_id, rdwr_str(rdwr), ras, cas); 566 buf_id, rdwr_str(rdwr), ras, cas);
567 567
568 /* Only 1 bit will be on */ 568 /* Only 1 bit will be on */
569 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); 569 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
570 570
571 /* Form out message */ 571 /* Form out message */
572 snprintf(msg, sizeof(msg), 572 snprintf(msg, sizeof(msg),
573 "Bank=%d Buffer ID = %d RAS=%d CAS=%d Err=0x%lx (%s)", 573 "Bank=%d Buffer ID = %d RAS=%d CAS=%d Err=0x%lx (%s)",
574 bank, buf_id, ras, cas, allErrors, error_name[errnum]); 574 bank, buf_id, ras, cas, allErrors, error_name[errnum]);
575 575
576 edac_mc_handle_error(tp_event, mci, 0, 0, 0, 576 edac_mc_handle_error(tp_event, mci, 0, 0, 0,
577 branch >> 1, -1, rank, 577 branch >> 1, -1, rank,
578 rdwr ? "Write error" : "Read error", 578 rdwr ? "Write error" : "Read error",
579 msg, NULL); 579 msg, NULL);
580 } 580 }
581 581
582 /* 582 /*
583 * i5400_process_fatal_error_info(struct mem_ctl_info *mci, 583 * i5400_process_fatal_error_info(struct mem_ctl_info *mci,
584 * struct i5400_error_info *info, 584 * struct i5400_error_info *info,
585 * int handle_errors); 585 * int handle_errors);
586 * 586 *
587 * handle the Intel NON-FATAL errors, if any 587 * handle the Intel NON-FATAL errors, if any
588 */ 588 */
589 static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci, 589 static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
590 struct i5400_error_info *info) 590 struct i5400_error_info *info)
591 { 591 {
592 char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80]; 592 char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80];
593 unsigned long allErrors; 593 unsigned long allErrors;
594 int branch; 594 int branch;
595 int channel; 595 int channel;
596 int bank; 596 int bank;
597 int rank; 597 int rank;
598 int rdwr; 598 int rdwr;
599 int ras, cas; 599 int ras, cas;
600 int errnum; 600 int errnum;
601 601
602 /* mask off the Error bits that are possible */ 602 /* mask off the Error bits that are possible */
603 allErrors = from_nf_ferr(info->ferr_nf_fbd & FERR_NF_MASK); 603 allErrors = from_nf_ferr(info->ferr_nf_fbd & FERR_NF_MASK);
604 if (!allErrors) 604 if (!allErrors)
605 return; /* if no error, return now */ 605 return; /* if no error, return now */
606 606
607 /* ONLY ONE of the possible error bits will be set, as per the docs */ 607 /* ONLY ONE of the possible error bits will be set, as per the docs */
608 608
609 if (allErrors & (ERROR_NF_UNCORRECTABLE | ERROR_NF_RECOVERABLE)) { 609 if (allErrors & (ERROR_NF_UNCORRECTABLE | ERROR_NF_RECOVERABLE)) {
610 i5400_proccess_non_recoverable_info(mci, info, allErrors); 610 i5400_proccess_non_recoverable_info(mci, info, allErrors);
611 return; 611 return;
612 } 612 }
613 613
614 /* Correctable errors */ 614 /* Correctable errors */
615 if (allErrors & ERROR_NF_CORRECTABLE) { 615 if (allErrors & ERROR_NF_CORRECTABLE) {
616 debugf0("\tCorrected bits= 0x%lx\n", allErrors); 616 debugf0("\tCorrected bits= 0x%lx\n", allErrors);
617 617
618 branch = extract_fbdchan_indx(info->ferr_nf_fbd); 618 branch = extract_fbdchan_indx(info->ferr_nf_fbd);
619 619
620 channel = 0; 620 channel = 0;
621 if (REC_ECC_LOCATOR_ODD(info->redmemb)) 621 if (REC_ECC_LOCATOR_ODD(info->redmemb))
622 channel = 1; 622 channel = 1;
623 623
624 /* Convert channel to be based from zero, instead of 624 /* Convert channel to be based from zero, instead of
625 * from branch base of 0 */ 625 * from branch base of 0 */
626 channel += branch; 626 channel += branch;
627 627
628 bank = rec_bank(info); 628 bank = rec_bank(info);
629 rank = rec_rank(info); 629 rank = rec_rank(info);
630 rdwr = rec_rdwr(info); 630 rdwr = rec_rdwr(info);
631 ras = rec_ras(info); 631 ras = rec_ras(info);
632 cas = rec_cas(info); 632 cas = rec_cas(info);
633 633
634 /* Only 1 bit will be on */ 634 /* Only 1 bit will be on */
635 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); 635 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
636 636
637 debugf0("\t\tDIMM= %d Channel= %d (Branch %d " 637 debugf0("\t\tDIMM= %d Channel= %d (Branch %d "
638 "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", 638 "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
639 rank, channel, branch >> 1, bank, 639 rank, channel, branch >> 1, bank,
640 rdwr_str(rdwr), ras, cas); 640 rdwr_str(rdwr), ras, cas);
641 641
642 /* Form out message */ 642 /* Form out message */
643 snprintf(msg, sizeof(msg), 643 snprintf(msg, sizeof(msg),
644 "Corrected error (Branch=%d DRAM-Bank=%d RDWR=%s " 644 "Corrected error (Branch=%d DRAM-Bank=%d RDWR=%s "
645 "RAS=%d CAS=%d, CE Err=0x%lx (%s))", 645 "RAS=%d CAS=%d, CE Err=0x%lx (%s))",
646 branch >> 1, bank, rdwr_str(rdwr), ras, cas, 646 branch >> 1, bank, rdwr_str(rdwr), ras, cas,
647 allErrors, error_name[errnum]); 647 allErrors, error_name[errnum]);
648 648
649 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0, 649 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
650 branch >> 1, channel % 2, rank, 650 branch >> 1, channel % 2, rank,
651 rdwr ? "Write error" : "Read error", 651 rdwr ? "Write error" : "Read error",
652 msg, NULL); 652 msg, NULL);
653 653
654 return; 654 return;
655 } 655 }
656 656
657 /* Miscellaneous errors */ 657 /* Miscellaneous errors */
658 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); 658 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
659 659
660 branch = extract_fbdchan_indx(info->ferr_nf_fbd); 660 branch = extract_fbdchan_indx(info->ferr_nf_fbd);
661 661
662 i5400_mc_printk(mci, KERN_EMERG, 662 i5400_mc_printk(mci, KERN_EMERG,
663 "Non-Fatal misc error (Branch=%d Err=%#lx (%s))", 663 "Non-Fatal misc error (Branch=%d Err=%#lx (%s))",
664 branch >> 1, allErrors, error_name[errnum]); 664 branch >> 1, allErrors, error_name[errnum]);
665 } 665 }
666 666
667 /* 667 /*
668 * i5400_process_error_info Process the error info that is 668 * i5400_process_error_info Process the error info that is
669 * in the 'info' structure, previously retrieved from hardware 669 * in the 'info' structure, previously retrieved from hardware
670 */ 670 */
671 static void i5400_process_error_info(struct mem_ctl_info *mci, 671 static void i5400_process_error_info(struct mem_ctl_info *mci,
672 struct i5400_error_info *info) 672 struct i5400_error_info *info)
673 { u32 allErrors; 673 { u32 allErrors;
674 674
675 /* First handle any fatal errors that occurred */ 675 /* First handle any fatal errors that occurred */
676 allErrors = (info->ferr_fat_fbd & FERR_FAT_MASK); 676 allErrors = (info->ferr_fat_fbd & FERR_FAT_MASK);
677 i5400_proccess_non_recoverable_info(mci, info, allErrors); 677 i5400_proccess_non_recoverable_info(mci, info, allErrors);
678 678
679 /* now handle any non-fatal errors that occurred */ 679 /* now handle any non-fatal errors that occurred */
680 i5400_process_nonfatal_error_info(mci, info); 680 i5400_process_nonfatal_error_info(mci, info);
681 } 681 }
682 682
683 /* 683 /*
684 * i5400_clear_error Retrieve any error from the hardware 684 * i5400_clear_error Retrieve any error from the hardware
685 * but do NOT process that error. 685 * but do NOT process that error.
686 * Used for 'clearing' out of previous errors 686 * Used for 'clearing' out of previous errors
687 * Called by the Core module. 687 * Called by the Core module.
688 */ 688 */
689 static void i5400_clear_error(struct mem_ctl_info *mci) 689 static void i5400_clear_error(struct mem_ctl_info *mci)
690 { 690 {
691 struct i5400_error_info info; 691 struct i5400_error_info info;
692 692
693 i5400_get_error_info(mci, &info); 693 i5400_get_error_info(mci, &info);
694 } 694 }
695 695
696 /* 696 /*
697 * i5400_check_error Retrieve and process errors reported by the 697 * i5400_check_error Retrieve and process errors reported by the
698 * hardware. Called by the Core module. 698 * hardware. Called by the Core module.
699 */ 699 */
700 static void i5400_check_error(struct mem_ctl_info *mci) 700 static void i5400_check_error(struct mem_ctl_info *mci)
701 { 701 {
702 struct i5400_error_info info; 702 struct i5400_error_info info;
703 debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); 703 debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
704 i5400_get_error_info(mci, &info); 704 i5400_get_error_info(mci, &info);
705 i5400_process_error_info(mci, &info); 705 i5400_process_error_info(mci, &info);
706 } 706 }
707 707
708 /* 708 /*
709 * i5400_put_devices 'put' all the devices that we have 709 * i5400_put_devices 'put' all the devices that we have
710 * reserved via 'get' 710 * reserved via 'get'
711 */ 711 */
712 static void i5400_put_devices(struct mem_ctl_info *mci) 712 static void i5400_put_devices(struct mem_ctl_info *mci)
713 { 713 {
714 struct i5400_pvt *pvt; 714 struct i5400_pvt *pvt;
715 715
716 pvt = mci->pvt_info; 716 pvt = mci->pvt_info;
717 717
718 /* Decrement usage count for devices */ 718 /* Decrement usage count for devices */
719 pci_dev_put(pvt->branch_1); 719 pci_dev_put(pvt->branch_1);
720 pci_dev_put(pvt->branch_0); 720 pci_dev_put(pvt->branch_0);
721 pci_dev_put(pvt->fsb_error_regs); 721 pci_dev_put(pvt->fsb_error_regs);
722 pci_dev_put(pvt->branchmap_werrors); 722 pci_dev_put(pvt->branchmap_werrors);
723 } 723 }
724 724
725 /* 725 /*
726 * i5400_get_devices Find and perform 'get' operation on the MCH's 726 * i5400_get_devices Find and perform 'get' operation on the MCH's
727 * device/functions we want to reference for this driver 727 * device/functions we want to reference for this driver
728 * 728 *
729 * Need to 'get' device 16 func 1 and func 2 729 * Need to 'get' device 16 func 1 and func 2
730 */ 730 */
731 static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx) 731 static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx)
732 { 732 {
733 struct i5400_pvt *pvt; 733 struct i5400_pvt *pvt;
734 struct pci_dev *pdev; 734 struct pci_dev *pdev;
735 735
736 pvt = mci->pvt_info; 736 pvt = mci->pvt_info;
737 pvt->branchmap_werrors = NULL; 737 pvt->branchmap_werrors = NULL;
738 pvt->fsb_error_regs = NULL; 738 pvt->fsb_error_regs = NULL;
739 pvt->branch_0 = NULL; 739 pvt->branch_0 = NULL;
740 pvt->branch_1 = NULL; 740 pvt->branch_1 = NULL;
741 741
742 /* Attempt to 'get' the MCH register we want */ 742 /* Attempt to 'get' the MCH register we want */
743 pdev = NULL; 743 pdev = NULL;
744 while (1) { 744 while (1) {
745 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 745 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
746 PCI_DEVICE_ID_INTEL_5400_ERR, pdev); 746 PCI_DEVICE_ID_INTEL_5400_ERR, pdev);
747 if (!pdev) { 747 if (!pdev) {
748 /* End of list, leave */ 748 /* End of list, leave */
749 i5400_printk(KERN_ERR, 749 i5400_printk(KERN_ERR,
750 "'system address,Process Bus' " 750 "'system address,Process Bus' "
751 "device not found:" 751 "device not found:"
752 "vendor 0x%x device 0x%x ERR func 1 " 752 "vendor 0x%x device 0x%x ERR func 1 "
753 "(broken BIOS?)\n", 753 "(broken BIOS?)\n",
754 PCI_VENDOR_ID_INTEL, 754 PCI_VENDOR_ID_INTEL,
755 PCI_DEVICE_ID_INTEL_5400_ERR); 755 PCI_DEVICE_ID_INTEL_5400_ERR);
756 return -ENODEV; 756 return -ENODEV;
757 } 757 }
758 758
759 /* Store device 16 func 1 */ 759 /* Store device 16 func 1 */
760 if (PCI_FUNC(pdev->devfn) == 1) 760 if (PCI_FUNC(pdev->devfn) == 1)
761 break; 761 break;
762 } 762 }
763 pvt->branchmap_werrors = pdev; 763 pvt->branchmap_werrors = pdev;
764 764
765 pdev = NULL; 765 pdev = NULL;
766 while (1) { 766 while (1) {
767 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 767 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
768 PCI_DEVICE_ID_INTEL_5400_ERR, pdev); 768 PCI_DEVICE_ID_INTEL_5400_ERR, pdev);
769 if (!pdev) { 769 if (!pdev) {
770 /* End of list, leave */ 770 /* End of list, leave */
771 i5400_printk(KERN_ERR, 771 i5400_printk(KERN_ERR,
772 "'system address,Process Bus' " 772 "'system address,Process Bus' "
773 "device not found:" 773 "device not found:"
774 "vendor 0x%x device 0x%x ERR func 2 " 774 "vendor 0x%x device 0x%x ERR func 2 "
775 "(broken BIOS?)\n", 775 "(broken BIOS?)\n",
776 PCI_VENDOR_ID_INTEL, 776 PCI_VENDOR_ID_INTEL,
777 PCI_DEVICE_ID_INTEL_5400_ERR); 777 PCI_DEVICE_ID_INTEL_5400_ERR);
778 778
779 pci_dev_put(pvt->branchmap_werrors); 779 pci_dev_put(pvt->branchmap_werrors);
780 return -ENODEV; 780 return -ENODEV;
781 } 781 }
782 782
783 /* Store device 16 func 2 */ 783 /* Store device 16 func 2 */
784 if (PCI_FUNC(pdev->devfn) == 2) 784 if (PCI_FUNC(pdev->devfn) == 2)
785 break; 785 break;
786 } 786 }
787 pvt->fsb_error_regs = pdev; 787 pvt->fsb_error_regs = pdev;
788 788
789 debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n", 789 debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n",
790 pci_name(pvt->system_address), 790 pci_name(pvt->system_address),
791 pvt->system_address->vendor, pvt->system_address->device); 791 pvt->system_address->vendor, pvt->system_address->device);
792 debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", 792 debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
793 pci_name(pvt->branchmap_werrors), 793 pci_name(pvt->branchmap_werrors),
794 pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device); 794 pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device);
795 debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n", 795 debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n",
796 pci_name(pvt->fsb_error_regs), 796 pci_name(pvt->fsb_error_regs),
797 pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device); 797 pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
798 798
799 pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL, 799 pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL,
800 PCI_DEVICE_ID_INTEL_5400_FBD0, NULL); 800 PCI_DEVICE_ID_INTEL_5400_FBD0, NULL);
801 if (!pvt->branch_0) { 801 if (!pvt->branch_0) {
802 i5400_printk(KERN_ERR, 802 i5400_printk(KERN_ERR,
803 "MC: 'BRANCH 0' device not found:" 803 "MC: 'BRANCH 0' device not found:"
804 "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n", 804 "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
805 PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_FBD0); 805 PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_FBD0);
806 806
807 pci_dev_put(pvt->fsb_error_regs); 807 pci_dev_put(pvt->fsb_error_regs);
808 pci_dev_put(pvt->branchmap_werrors); 808 pci_dev_put(pvt->branchmap_werrors);
809 return -ENODEV; 809 return -ENODEV;
810 } 810 }
811 811
812 /* If this device claims to have more than 2 channels then 812 /* If this device claims to have more than 2 channels then
813 * fetch Branch 1's information 813 * fetch Branch 1's information
814 */ 814 */
815 if (pvt->maxch < CHANNELS_PER_BRANCH) 815 if (pvt->maxch < CHANNELS_PER_BRANCH)
816 return 0; 816 return 0;
817 817
818 pvt->branch_1 = pci_get_device(PCI_VENDOR_ID_INTEL, 818 pvt->branch_1 = pci_get_device(PCI_VENDOR_ID_INTEL,
819 PCI_DEVICE_ID_INTEL_5400_FBD1, NULL); 819 PCI_DEVICE_ID_INTEL_5400_FBD1, NULL);
820 if (!pvt->branch_1) { 820 if (!pvt->branch_1) {
821 i5400_printk(KERN_ERR, 821 i5400_printk(KERN_ERR,
822 "MC: 'BRANCH 1' device not found:" 822 "MC: 'BRANCH 1' device not found:"
823 "vendor 0x%x device 0x%x Func 0 " 823 "vendor 0x%x device 0x%x Func 0 "
824 "(broken BIOS?)\n", 824 "(broken BIOS?)\n",
825 PCI_VENDOR_ID_INTEL, 825 PCI_VENDOR_ID_INTEL,
826 PCI_DEVICE_ID_INTEL_5400_FBD1); 826 PCI_DEVICE_ID_INTEL_5400_FBD1);
827 827
828 pci_dev_put(pvt->branch_0); 828 pci_dev_put(pvt->branch_0);
829 pci_dev_put(pvt->fsb_error_regs); 829 pci_dev_put(pvt->fsb_error_regs);
830 pci_dev_put(pvt->branchmap_werrors); 830 pci_dev_put(pvt->branchmap_werrors);
831 return -ENODEV; 831 return -ENODEV;
832 } 832 }
833 833
834 return 0; 834 return 0;
835 } 835 }
836 836
837 /* 837 /*
838 * determine_amb_present 838 * determine_amb_present
839 * 839 *
840 * the information is contained in DIMMS_PER_CHANNEL different 840 * the information is contained in DIMMS_PER_CHANNEL different
841 * registers determining which of the DIMMS_PER_CHANNEL requires 841 * registers determining which of the DIMMS_PER_CHANNEL requires
842 * knowing which channel is in question 842 * knowing which channel is in question
843 * 843 *
844 * 2 branches, each with 2 channels 844 * 2 branches, each with 2 channels
845 * b0_ambpresent0 for channel '0' 845 * b0_ambpresent0 for channel '0'
846 * b0_ambpresent1 for channel '1' 846 * b0_ambpresent1 for channel '1'
847 * b1_ambpresent0 for channel '2' 847 * b1_ambpresent0 for channel '2'
848 * b1_ambpresent1 for channel '3' 848 * b1_ambpresent1 for channel '3'
849 */ 849 */
850 static int determine_amb_present_reg(struct i5400_pvt *pvt, int channel) 850 static int determine_amb_present_reg(struct i5400_pvt *pvt, int channel)
851 { 851 {
852 int amb_present; 852 int amb_present;
853 853
854 if (channel < CHANNELS_PER_BRANCH) { 854 if (channel < CHANNELS_PER_BRANCH) {
855 if (channel & 0x1) 855 if (channel & 0x1)
856 amb_present = pvt->b0_ambpresent1; 856 amb_present = pvt->b0_ambpresent1;
857 else 857 else
858 amb_present = pvt->b0_ambpresent0; 858 amb_present = pvt->b0_ambpresent0;
859 } else { 859 } else {
860 if (channel & 0x1) 860 if (channel & 0x1)
861 amb_present = pvt->b1_ambpresent1; 861 amb_present = pvt->b1_ambpresent1;
862 else 862 else
863 amb_present = pvt->b1_ambpresent0; 863 amb_present = pvt->b1_ambpresent0;
864 } 864 }
865 865
866 return amb_present; 866 return amb_present;
867 } 867 }
868 868
869 /* 869 /*
870 * determine_mtr(pvt, dimm, channel) 870 * determine_mtr(pvt, dimm, channel)
871 * 871 *
872 * return the proper MTR register as determine by the dimm and desired channel 872 * return the proper MTR register as determine by the dimm and desired channel
873 */ 873 */
874 static int determine_mtr(struct i5400_pvt *pvt, int dimm, int channel) 874 static int determine_mtr(struct i5400_pvt *pvt, int dimm, int channel)
875 { 875 {
876 int mtr; 876 int mtr;
877 int n; 877 int n;
878 878
879 /* There is one MTR for each slot pair of FB-DIMMs, 879 /* There is one MTR for each slot pair of FB-DIMMs,
880 Each slot pair may be at branch 0 or branch 1. 880 Each slot pair may be at branch 0 or branch 1.
881 */ 881 */
882 n = dimm; 882 n = dimm;
883 883
884 if (n >= DIMMS_PER_CHANNEL) { 884 if (n >= DIMMS_PER_CHANNEL) {
885 debugf0("ERROR: trying to access an invalid dimm: %d\n", 885 debugf0("ERROR: trying to access an invalid dimm: %d\n",
886 dimm); 886 dimm);
887 return 0; 887 return 0;
888 } 888 }
889 889
890 if (channel < CHANNELS_PER_BRANCH) 890 if (channel < CHANNELS_PER_BRANCH)
891 mtr = pvt->b0_mtr[n]; 891 mtr = pvt->b0_mtr[n];
892 else 892 else
893 mtr = pvt->b1_mtr[n]; 893 mtr = pvt->b1_mtr[n];
894 894
895 return mtr; 895 return mtr;
896 } 896 }
897 897
898 /* 898 /*
899 */ 899 */
900 static void decode_mtr(int slot_row, u16 mtr) 900 static void decode_mtr(int slot_row, u16 mtr)
901 { 901 {
902 int ans; 902 int ans;
903 903
904 ans = MTR_DIMMS_PRESENT(mtr); 904 ans = MTR_DIMMS_PRESENT(mtr);
905 905
906 debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr, 906 debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr,
907 ans ? "Present" : "NOT Present"); 907 ans ? "Present" : "NOT Present");
908 if (!ans) 908 if (!ans)
909 return; 909 return;
910 910
911 debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); 911 debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
912 912
913 debugf2("\t\tELECTRICAL THROTTLING is %s\n", 913 debugf2("\t\tELECTRICAL THROTTLING is %s\n",
914 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled"); 914 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
915 915
916 debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); 916 debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
917 debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single"); 917 debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single");
918 debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]); 918 debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]);
919 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); 919 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
920 } 920 }
921 921
922 static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel, 922 static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel,
923 struct i5400_dimm_info *dinfo) 923 struct i5400_dimm_info *dinfo)
924 { 924 {
925 int mtr; 925 int mtr;
926 int amb_present_reg; 926 int amb_present_reg;
927 int addrBits; 927 int addrBits;
928 928
929 mtr = determine_mtr(pvt, dimm, channel); 929 mtr = determine_mtr(pvt, dimm, channel);
930 if (MTR_DIMMS_PRESENT(mtr)) { 930 if (MTR_DIMMS_PRESENT(mtr)) {
931 amb_present_reg = determine_amb_present_reg(pvt, channel); 931 amb_present_reg = determine_amb_present_reg(pvt, channel);
932 932
933 /* Determine if there is a DIMM present in this DIMM slot */ 933 /* Determine if there is a DIMM present in this DIMM slot */
934 if (amb_present_reg & (1 << dimm)) { 934 if (amb_present_reg & (1 << dimm)) {
935 /* Start with the number of bits for a Bank 935 /* Start with the number of bits for a Bank
936 * on the DRAM */ 936 * on the DRAM */
937 addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr); 937 addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
938 /* Add thenumber of ROW bits */ 938 /* Add thenumber of ROW bits */
939 addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr); 939 addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
940 /* add the number of COLUMN bits */ 940 /* add the number of COLUMN bits */
941 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); 941 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
942 /* add the number of RANK bits */ 942 /* add the number of RANK bits */
943 addrBits += MTR_DIMM_RANK(mtr); 943 addrBits += MTR_DIMM_RANK(mtr);
944 944
945 addrBits += 6; /* add 64 bits per DIMM */ 945 addrBits += 6; /* add 64 bits per DIMM */
946 addrBits -= 20; /* divide by 2^^20 */ 946 addrBits -= 20; /* divide by 2^^20 */
947 addrBits -= 3; /* 8 bits per bytes */ 947 addrBits -= 3; /* 8 bits per bytes */
948 948
949 dinfo->megabytes = 1 << addrBits; 949 dinfo->megabytes = 1 << addrBits;
950 } 950 }
951 } 951 }
952 } 952 }
953 953
954 /* 954 /*
955 * calculate_dimm_size 955 * calculate_dimm_size
956 * 956 *
957 * also will output a DIMM matrix map, if debug is enabled, for viewing 957 * also will output a DIMM matrix map, if debug is enabled, for viewing
958 * how the DIMMs are populated 958 * how the DIMMs are populated
959 */ 959 */
960 static void calculate_dimm_size(struct i5400_pvt *pvt) 960 static void calculate_dimm_size(struct i5400_pvt *pvt)
961 { 961 {
962 struct i5400_dimm_info *dinfo; 962 struct i5400_dimm_info *dinfo;
963 int dimm, max_dimms; 963 int dimm, max_dimms;
964 char *p, *mem_buffer; 964 char *p, *mem_buffer;
965 int space, n; 965 int space, n;
966 int channel, branch; 966 int channel, branch;
967 967
968 /* ================= Generate some debug output ================= */ 968 /* ================= Generate some debug output ================= */
969 space = PAGE_SIZE; 969 space = PAGE_SIZE;
970 mem_buffer = p = kmalloc(space, GFP_KERNEL); 970 mem_buffer = p = kmalloc(space, GFP_KERNEL);
971 if (p == NULL) { 971 if (p == NULL) {
972 i5400_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n", 972 i5400_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n",
973 __FILE__, __func__); 973 __FILE__, __func__);
974 return; 974 return;
975 } 975 }
976 976
977 /* Scan all the actual DIMMS 977 /* Scan all the actual DIMMS
978 * and calculate the information for each DIMM 978 * and calculate the information for each DIMM
979 * Start with the highest dimm first, to display it first 979 * Start with the highest dimm first, to display it first
980 * and work toward the 0th dimm 980 * and work toward the 0th dimm
981 */ 981 */
982 max_dimms = pvt->maxdimmperch; 982 max_dimms = pvt->maxdimmperch;
983 for (dimm = max_dimms - 1; dimm >= 0; dimm--) { 983 for (dimm = max_dimms - 1; dimm >= 0; dimm--) {
984 984
985 /* on an odd dimm, first output a 'boundary' marker, 985 /* on an odd dimm, first output a 'boundary' marker,
986 * then reset the message buffer */ 986 * then reset the message buffer */
987 if (dimm & 0x1) { 987 if (dimm & 0x1) {
988 n = snprintf(p, space, "---------------------------" 988 n = snprintf(p, space, "---------------------------"
989 "-------------------------------"); 989 "-------------------------------");
990 p += n; 990 p += n;
991 space -= n; 991 space -= n;
992 debugf2("%s\n", mem_buffer); 992 debugf2("%s\n", mem_buffer);
993 p = mem_buffer; 993 p = mem_buffer;
994 space = PAGE_SIZE; 994 space = PAGE_SIZE;
995 } 995 }
996 n = snprintf(p, space, "dimm %2d ", dimm); 996 n = snprintf(p, space, "dimm %2d ", dimm);
997 p += n; 997 p += n;
998 space -= n; 998 space -= n;
999 999
1000 for (channel = 0; channel < pvt->maxch; channel++) { 1000 for (channel = 0; channel < pvt->maxch; channel++) {
1001 dinfo = &pvt->dimm_info[dimm][channel]; 1001 dinfo = &pvt->dimm_info[dimm][channel];
1002 handle_channel(pvt, dimm, channel, dinfo); 1002 handle_channel(pvt, dimm, channel, dinfo);
1003 n = snprintf(p, space, "%4d MB | ", dinfo->megabytes); 1003 n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
1004 p += n; 1004 p += n;
1005 space -= n; 1005 space -= n;
1006 } 1006 }
1007 debugf2("%s\n", mem_buffer); 1007 debugf2("%s\n", mem_buffer);
1008 p = mem_buffer; 1008 p = mem_buffer;
1009 space = PAGE_SIZE; 1009 space = PAGE_SIZE;
1010 } 1010 }
1011 1011
1012 /* Output the last bottom 'boundary' marker */ 1012 /* Output the last bottom 'boundary' marker */
1013 n = snprintf(p, space, "---------------------------" 1013 n = snprintf(p, space, "---------------------------"
1014 "-------------------------------"); 1014 "-------------------------------");
1015 p += n; 1015 p += n;
1016 space -= n; 1016 space -= n;
1017 debugf2("%s\n", mem_buffer); 1017 debugf2("%s\n", mem_buffer);
1018 p = mem_buffer; 1018 p = mem_buffer;
1019 space = PAGE_SIZE; 1019 space = PAGE_SIZE;
1020 1020
1021 /* now output the 'channel' labels */ 1021 /* now output the 'channel' labels */
1022 n = snprintf(p, space, " "); 1022 n = snprintf(p, space, " ");
1023 p += n; 1023 p += n;
1024 space -= n; 1024 space -= n;
1025 for (channel = 0; channel < pvt->maxch; channel++) { 1025 for (channel = 0; channel < pvt->maxch; channel++) {
1026 n = snprintf(p, space, "channel %d | ", channel); 1026 n = snprintf(p, space, "channel %d | ", channel);
1027 p += n; 1027 p += n;
1028 space -= n; 1028 space -= n;
1029 } 1029 }
1030 1030
1031 space -= n; 1031 space -= n;
1032 debugf2("%s\n", mem_buffer); 1032 debugf2("%s\n", mem_buffer);
1033 p = mem_buffer; 1033 p = mem_buffer;
1034 space = PAGE_SIZE; 1034 space = PAGE_SIZE;
1035 1035
1036 n = snprintf(p, space, " "); 1036 n = snprintf(p, space, " ");
1037 p += n; 1037 p += n;
1038 for (branch = 0; branch < MAX_BRANCHES; branch++) { 1038 for (branch = 0; branch < MAX_BRANCHES; branch++) {
1039 n = snprintf(p, space, " branch %d | ", branch); 1039 n = snprintf(p, space, " branch %d | ", branch);
1040 p += n; 1040 p += n;
1041 space -= n; 1041 space -= n;
1042 } 1042 }
1043 1043
1044 /* output the last message and free buffer */ 1044 /* output the last message and free buffer */
1045 debugf2("%s\n", mem_buffer); 1045 debugf2("%s\n", mem_buffer);
1046 kfree(mem_buffer); 1046 kfree(mem_buffer);
1047 } 1047 }
1048 1048
1049 /* 1049 /*
1050 * i5400_get_mc_regs read in the necessary registers and 1050 * i5400_get_mc_regs read in the necessary registers and
1051 * cache locally 1051 * cache locally
1052 * 1052 *
1053 * Fills in the private data members 1053 * Fills in the private data members
1054 */ 1054 */
1055 static void i5400_get_mc_regs(struct mem_ctl_info *mci) 1055 static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1056 { 1056 {
1057 struct i5400_pvt *pvt; 1057 struct i5400_pvt *pvt;
1058 u32 actual_tolm; 1058 u32 actual_tolm;
1059 u16 limit; 1059 u16 limit;
1060 int slot_row; 1060 int slot_row;
1061 int maxch; 1061 int maxch;
1062 int maxdimmperch; 1062 int maxdimmperch;
1063 int way0, way1; 1063 int way0, way1;
1064 1064
1065 pvt = mci->pvt_info; 1065 pvt = mci->pvt_info;
1066 1066
1067 pci_read_config_dword(pvt->system_address, AMBASE, 1067 pci_read_config_dword(pvt->system_address, AMBASE,
1068 (u32 *) &pvt->ambase); 1068 (u32 *) &pvt->ambase);
1069 pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32), 1069 pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
1070 ((u32 *) &pvt->ambase) + sizeof(u32)); 1070 ((u32 *) &pvt->ambase) + sizeof(u32));
1071 1071
1072 maxdimmperch = pvt->maxdimmperch; 1072 maxdimmperch = pvt->maxdimmperch;
1073 maxch = pvt->maxch; 1073 maxch = pvt->maxch;
1074 1074
1075 debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n", 1075 debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
1076 (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch); 1076 (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
1077 1077
1078 /* Get the Branch Map regs */ 1078 /* Get the Branch Map regs */
1079 pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm); 1079 pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
1080 pvt->tolm >>= 12; 1080 pvt->tolm >>= 12;
1081 debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, 1081 debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm,
1082 pvt->tolm); 1082 pvt->tolm);
1083 1083
1084 actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28)); 1084 actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
1085 debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n", 1085 debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
1086 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); 1086 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
1087 1087
1088 pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0); 1088 pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
1089 pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1); 1089 pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
1090 1090
1091 /* Get the MIR[0-1] regs */ 1091 /* Get the MIR[0-1] regs */
1092 limit = (pvt->mir0 >> 4) & 0x0fff; 1092 limit = (pvt->mir0 >> 4) & 0x0fff;
1093 way0 = pvt->mir0 & 0x1; 1093 way0 = pvt->mir0 & 0x1;
1094 way1 = pvt->mir0 & 0x2; 1094 way1 = pvt->mir0 & 0x2;
1095 debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); 1095 debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
1096 limit = (pvt->mir1 >> 4) & 0xfff; 1096 limit = (pvt->mir1 >> 4) & 0xfff;
1097 way0 = pvt->mir1 & 0x1; 1097 way0 = pvt->mir1 & 0x1;
1098 way1 = pvt->mir1 & 0x2; 1098 way1 = pvt->mir1 & 0x2;
1099 debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); 1099 debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
1100 1100
1101 /* Get the set of MTR[0-3] regs by each branch */ 1101 /* Get the set of MTR[0-3] regs by each branch */
1102 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) { 1102 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) {
1103 int where = MTR0 + (slot_row * sizeof(u16)); 1103 int where = MTR0 + (slot_row * sizeof(u16));
1104 1104
1105 /* Branch 0 set of MTR registers */ 1105 /* Branch 0 set of MTR registers */
1106 pci_read_config_word(pvt->branch_0, where, 1106 pci_read_config_word(pvt->branch_0, where,
1107 &pvt->b0_mtr[slot_row]); 1107 &pvt->b0_mtr[slot_row]);
1108 1108
1109 debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where, 1109 debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where,
1110 pvt->b0_mtr[slot_row]); 1110 pvt->b0_mtr[slot_row]);
1111 1111
1112 if (pvt->maxch < CHANNELS_PER_BRANCH) { 1112 if (pvt->maxch < CHANNELS_PER_BRANCH) {
1113 pvt->b1_mtr[slot_row] = 0; 1113 pvt->b1_mtr[slot_row] = 0;
1114 continue; 1114 continue;
1115 } 1115 }
1116 1116
1117 /* Branch 1 set of MTR registers */ 1117 /* Branch 1 set of MTR registers */
1118 pci_read_config_word(pvt->branch_1, where, 1118 pci_read_config_word(pvt->branch_1, where,
1119 &pvt->b1_mtr[slot_row]); 1119 &pvt->b1_mtr[slot_row]);
1120 debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, where, 1120 debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, where,
1121 pvt->b1_mtr[slot_row]); 1121 pvt->b1_mtr[slot_row]);
1122 } 1122 }
1123 1123
1124 /* Read and dump branch 0's MTRs */ 1124 /* Read and dump branch 0's MTRs */
1125 debugf2("\nMemory Technology Registers:\n"); 1125 debugf2("\nMemory Technology Registers:\n");
1126 debugf2(" Branch 0:\n"); 1126 debugf2(" Branch 0:\n");
1127 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) 1127 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
1128 decode_mtr(slot_row, pvt->b0_mtr[slot_row]); 1128 decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
1129 1129
1130 pci_read_config_word(pvt->branch_0, AMBPRESENT_0, 1130 pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
1131 &pvt->b0_ambpresent0); 1131 &pvt->b0_ambpresent0);
1132 debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0); 1132 debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
1133 pci_read_config_word(pvt->branch_0, AMBPRESENT_1, 1133 pci_read_config_word(pvt->branch_0, AMBPRESENT_1,
1134 &pvt->b0_ambpresent1); 1134 &pvt->b0_ambpresent1);
1135 debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1); 1135 debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
1136 1136
1137 /* Only if we have 2 branchs (4 channels) */ 1137 /* Only if we have 2 branchs (4 channels) */
1138 if (pvt->maxch < CHANNELS_PER_BRANCH) { 1138 if (pvt->maxch < CHANNELS_PER_BRANCH) {
1139 pvt->b1_ambpresent0 = 0; 1139 pvt->b1_ambpresent0 = 0;
1140 pvt->b1_ambpresent1 = 0; 1140 pvt->b1_ambpresent1 = 0;
1141 } else { 1141 } else {
1142 /* Read and dump branch 1's MTRs */ 1142 /* Read and dump branch 1's MTRs */
1143 debugf2(" Branch 1:\n"); 1143 debugf2(" Branch 1:\n");
1144 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) 1144 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
1145 decode_mtr(slot_row, pvt->b1_mtr[slot_row]); 1145 decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
1146 1146
1147 pci_read_config_word(pvt->branch_1, AMBPRESENT_0, 1147 pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
1148 &pvt->b1_ambpresent0); 1148 &pvt->b1_ambpresent0);
1149 debugf2("\t\tAMB-Branch 1-present0 0x%x:\n", 1149 debugf2("\t\tAMB-Branch 1-present0 0x%x:\n",
1150 pvt->b1_ambpresent0); 1150 pvt->b1_ambpresent0);
1151 pci_read_config_word(pvt->branch_1, AMBPRESENT_1, 1151 pci_read_config_word(pvt->branch_1, AMBPRESENT_1,
1152 &pvt->b1_ambpresent1); 1152 &pvt->b1_ambpresent1);
1153 debugf2("\t\tAMB-Branch 1-present1 0x%x:\n", 1153 debugf2("\t\tAMB-Branch 1-present1 0x%x:\n",
1154 pvt->b1_ambpresent1); 1154 pvt->b1_ambpresent1);
1155 } 1155 }
1156 1156
1157 /* Go and determine the size of each DIMM and place in an 1157 /* Go and determine the size of each DIMM and place in an
1158 * orderly matrix */ 1158 * orderly matrix */
1159 calculate_dimm_size(pvt); 1159 calculate_dimm_size(pvt);
1160 } 1160 }
1161 1161
1162 /* 1162 /*
1163 * i5400_init_dimms Initialize the 'dimms' table within 1163 * i5400_init_dimms Initialize the 'dimms' table within
1164 * the mci control structure with the 1164 * the mci control structure with the
1165 * addressing of memory. 1165 * addressing of memory.
1166 * 1166 *
1167 * return: 1167 * return:
1168 * 0 success 1168 * 0 success
1169 * 1 no actual memory found on this MC 1169 * 1 no actual memory found on this MC
1170 */ 1170 */
1171 static int i5400_init_dimms(struct mem_ctl_info *mci) 1171 static int i5400_init_dimms(struct mem_ctl_info *mci)
1172 { 1172 {
1173 struct i5400_pvt *pvt; 1173 struct i5400_pvt *pvt;
1174 struct dimm_info *dimm; 1174 struct dimm_info *dimm;
1175 int ndimms, channel_count; 1175 int ndimms, channel_count;
1176 int max_dimms; 1176 int max_dimms;
1177 int mtr; 1177 int mtr;
1178 int size_mb; 1178 int size_mb;
1179 int channel, slot; 1179 int channel, slot;
1180 1180
1181 pvt = mci->pvt_info; 1181 pvt = mci->pvt_info;
1182 1182
1183 channel_count = pvt->maxch; 1183 channel_count = pvt->maxch;
1184 max_dimms = pvt->maxdimmperch; 1184 max_dimms = pvt->maxdimmperch;
1185 1185
1186 ndimms = 0; 1186 ndimms = 0;
1187 1187
1188 /* 1188 /*
1189 * FIXME: remove pvt->dimm_info[slot][channel] and use the 3 1189 * FIXME: remove pvt->dimm_info[slot][channel] and use the 3
1190 * layers here. 1190 * layers here.
1191 */ 1191 */
1192 for (channel = 0; channel < mci->layers[0].size * mci->layers[1].size; 1192 for (channel = 0; channel < mci->layers[0].size * mci->layers[1].size;
1193 channel++) { 1193 channel++) {
1194 for (slot = 0; slot < mci->layers[2].size; slot++) { 1194 for (slot = 0; slot < mci->layers[2].size; slot++) {
1195 mtr = determine_mtr(pvt, slot, channel); 1195 mtr = determine_mtr(pvt, slot, channel);
1196 1196
1197 /* if no DIMMS on this slot, continue */ 1197 /* if no DIMMS on this slot, continue */
1198 if (!MTR_DIMMS_PRESENT(mtr)) 1198 if (!MTR_DIMMS_PRESENT(mtr))
1199 continue; 1199 continue;
1200 1200
1201 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, 1201 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
1202 channel / 2, channel % 2, slot); 1202 channel / 2, channel % 2, slot);
1203 1203
1204 size_mb = pvt->dimm_info[slot][channel].megabytes; 1204 size_mb = pvt->dimm_info[slot][channel].megabytes;
1205 1205
1206 debugf2("%s: dimm%zd (branch %d channel %d slot %d): %d.%03d GB\n", 1206 debugf2("%s: dimm (branch %d channel %d slot %d): %d.%03d GB\n",
1207 __func__, dimm - mci->dimms, 1207 __func__,
1208 channel / 2, channel % 2, slot, 1208 channel / 2, channel % 2, slot,
1209 size_mb / 1000, size_mb % 1000); 1209 size_mb / 1000, size_mb % 1000);
1210 1210
1211 dimm->nr_pages = size_mb << 8; 1211 dimm->nr_pages = size_mb << 8;
1212 dimm->grain = 8; 1212 dimm->grain = 8;
1213 dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4; 1213 dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4;
1214 dimm->mtype = MEM_FB_DDR2; 1214 dimm->mtype = MEM_FB_DDR2;
1215 /* 1215 /*
1216 * The eccc mechanism is SDDC (aka SECC), with 1216 * The eccc mechanism is SDDC (aka SECC), with
1217 * is similar to Chipkill. 1217 * is similar to Chipkill.
1218 */ 1218 */
1219 dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ? 1219 dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ?
1220 EDAC_S8ECD8ED : EDAC_S4ECD4ED; 1220 EDAC_S8ECD8ED : EDAC_S4ECD4ED;
1221 ndimms++; 1221 ndimms++;
1222 } 1222 }
1223 } 1223 }
1224 1224
1225 /* 1225 /*
1226 * When just one memory is provided, it should be at location (0,0,0). 1226 * When just one memory is provided, it should be at location (0,0,0).
1227 * With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+. 1227 * With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+.
1228 */ 1228 */
1229 if (ndimms == 1) 1229 if (ndimms == 1)
1230 mci->dimms[0].edac_mode = EDAC_SECDED; 1230 mci->dimms[0]->edac_mode = EDAC_SECDED;
1231 1231
1232 return (ndimms == 0); 1232 return (ndimms == 0);
1233 } 1233 }
1234 1234
1235 /* 1235 /*
1236 * i5400_enable_error_reporting 1236 * i5400_enable_error_reporting
1237 * Turn on the memory reporting features of the hardware 1237 * Turn on the memory reporting features of the hardware
1238 */ 1238 */
1239 static void i5400_enable_error_reporting(struct mem_ctl_info *mci) 1239 static void i5400_enable_error_reporting(struct mem_ctl_info *mci)
1240 { 1240 {
1241 struct i5400_pvt *pvt; 1241 struct i5400_pvt *pvt;
1242 u32 fbd_error_mask; 1242 u32 fbd_error_mask;
1243 1243
1244 pvt = mci->pvt_info; 1244 pvt = mci->pvt_info;
1245 1245
1246 /* Read the FBD Error Mask Register */ 1246 /* Read the FBD Error Mask Register */
1247 pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD, 1247 pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD,
1248 &fbd_error_mask); 1248 &fbd_error_mask);
1249 1249
1250 /* Enable with a '0' */ 1250 /* Enable with a '0' */
1251 fbd_error_mask &= ~(ENABLE_EMASK_ALL); 1251 fbd_error_mask &= ~(ENABLE_EMASK_ALL);
1252 1252
1253 pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD, 1253 pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD,
1254 fbd_error_mask); 1254 fbd_error_mask);
1255 } 1255 }
1256 1256
1257 /* 1257 /*
1258 * i5400_probe1 Probe for ONE instance of device to see if it is 1258 * i5400_probe1 Probe for ONE instance of device to see if it is
1259 * present. 1259 * present.
1260 * return: 1260 * return:
1261 * 0 for FOUND a device 1261 * 0 for FOUND a device
1262 * < 0 for error code 1262 * < 0 for error code
1263 */ 1263 */
1264 static int i5400_probe1(struct pci_dev *pdev, int dev_idx) 1264 static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1265 { 1265 {
1266 struct mem_ctl_info *mci; 1266 struct mem_ctl_info *mci;
1267 struct i5400_pvt *pvt; 1267 struct i5400_pvt *pvt;
1268 struct edac_mc_layer layers[3]; 1268 struct edac_mc_layer layers[3];
1269 1269
1270 if (dev_idx >= ARRAY_SIZE(i5400_devs)) 1270 if (dev_idx >= ARRAY_SIZE(i5400_devs))
1271 return -EINVAL; 1271 return -EINVAL;
1272 1272
1273 debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n", 1273 debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
1274 __FILE__, __func__, 1274 __FILE__, __func__,
1275 pdev->bus->number, 1275 pdev->bus->number,
1276 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 1276 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1277 1277
1278 /* We only are looking for func 0 of the set */ 1278 /* We only are looking for func 0 of the set */
1279 if (PCI_FUNC(pdev->devfn) != 0) 1279 if (PCI_FUNC(pdev->devfn) != 0)
1280 return -ENODEV; 1280 return -ENODEV;
1281 1281
1282 /* 1282 /*
1283 * allocate a new MC control structure 1283 * allocate a new MC control structure
1284 * 1284 *
1285 * This drivers uses the DIMM slot as "csrow" and the rest as "channel". 1285 * This drivers uses the DIMM slot as "csrow" and the rest as "channel".
1286 */ 1286 */
1287 layers[0].type = EDAC_MC_LAYER_BRANCH; 1287 layers[0].type = EDAC_MC_LAYER_BRANCH;
1288 layers[0].size = MAX_BRANCHES; 1288 layers[0].size = MAX_BRANCHES;
1289 layers[0].is_virt_csrow = false; 1289 layers[0].is_virt_csrow = false;
1290 layers[1].type = EDAC_MC_LAYER_CHANNEL; 1290 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1291 layers[1].size = CHANNELS_PER_BRANCH; 1291 layers[1].size = CHANNELS_PER_BRANCH;
1292 layers[1].is_virt_csrow = false; 1292 layers[1].is_virt_csrow = false;
1293 layers[2].type = EDAC_MC_LAYER_SLOT; 1293 layers[2].type = EDAC_MC_LAYER_SLOT;
1294 layers[2].size = DIMMS_PER_CHANNEL; 1294 layers[2].size = DIMMS_PER_CHANNEL;
1295 layers[2].is_virt_csrow = true; 1295 layers[2].is_virt_csrow = true;
1296 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); 1296 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1297 if (mci == NULL) 1297 if (mci == NULL)
1298 return -ENOMEM; 1298 return -ENOMEM;
1299 1299
1300 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); 1300 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
1301 1301
1302 mci->pdev = &pdev->dev; /* record ptr to the generic device */ 1302 mci->pdev = &pdev->dev; /* record ptr to the generic device */
1303 1303
1304 pvt = mci->pvt_info; 1304 pvt = mci->pvt_info;
1305 pvt->system_address = pdev; /* Record this device in our private */ 1305 pvt->system_address = pdev; /* Record this device in our private */
1306 pvt->maxch = MAX_CHANNELS; 1306 pvt->maxch = MAX_CHANNELS;
1307 pvt->maxdimmperch = DIMMS_PER_CHANNEL; 1307 pvt->maxdimmperch = DIMMS_PER_CHANNEL;
1308 1308
1309 /* 'get' the pci devices we want to reserve for our use */ 1309 /* 'get' the pci devices we want to reserve for our use */
1310 if (i5400_get_devices(mci, dev_idx)) 1310 if (i5400_get_devices(mci, dev_idx))
1311 goto fail0; 1311 goto fail0;
1312 1312
1313 /* Time to get serious */ 1313 /* Time to get serious */
1314 i5400_get_mc_regs(mci); /* retrieve the hardware registers */ 1314 i5400_get_mc_regs(mci); /* retrieve the hardware registers */
1315 1315
1316 mci->mc_idx = 0; 1316 mci->mc_idx = 0;
1317 mci->mtype_cap = MEM_FLAG_FB_DDR2; 1317 mci->mtype_cap = MEM_FLAG_FB_DDR2;
1318 mci->edac_ctl_cap = EDAC_FLAG_NONE; 1318 mci->edac_ctl_cap = EDAC_FLAG_NONE;
1319 mci->edac_cap = EDAC_FLAG_NONE; 1319 mci->edac_cap = EDAC_FLAG_NONE;
1320 mci->mod_name = "i5400_edac.c"; 1320 mci->mod_name = "i5400_edac.c";
1321 mci->mod_ver = I5400_REVISION; 1321 mci->mod_ver = I5400_REVISION;
1322 mci->ctl_name = i5400_devs[dev_idx].ctl_name; 1322 mci->ctl_name = i5400_devs[dev_idx].ctl_name;
1323 mci->dev_name = pci_name(pdev); 1323 mci->dev_name = pci_name(pdev);
1324 mci->ctl_page_to_phys = NULL; 1324 mci->ctl_page_to_phys = NULL;
1325 1325
1326 /* Set the function pointer to an actual operation function */ 1326 /* Set the function pointer to an actual operation function */
1327 mci->edac_check = i5400_check_error; 1327 mci->edac_check = i5400_check_error;
1328 1328
1329 /* initialize the MC control structure 'dimms' table 1329 /* initialize the MC control structure 'dimms' table
1330 * with the mapping and control information */ 1330 * with the mapping and control information */
1331 if (i5400_init_dimms(mci)) { 1331 if (i5400_init_dimms(mci)) {
1332 debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" 1332 debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
1333 " because i5400_init_dimms() returned nonzero " 1333 " because i5400_init_dimms() returned nonzero "
1334 "value\n"); 1334 "value\n");
1335 mci->edac_cap = EDAC_FLAG_NONE; /* no dimms found */ 1335 mci->edac_cap = EDAC_FLAG_NONE; /* no dimms found */
1336 } else { 1336 } else {
1337 debugf1("MC: Enable error reporting now\n"); 1337 debugf1("MC: Enable error reporting now\n");
1338 i5400_enable_error_reporting(mci); 1338 i5400_enable_error_reporting(mci);
1339 } 1339 }
1340 1340
1341 /* add this new MC control structure to EDAC's list of MCs */ 1341 /* add this new MC control structure to EDAC's list of MCs */
1342 if (edac_mc_add_mc(mci)) { 1342 if (edac_mc_add_mc(mci)) {
1343 debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n", 1343 debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
1344 __FILE__, __func__); 1344 __FILE__, __func__);
1345 /* FIXME: perhaps some code should go here that disables error 1345 /* FIXME: perhaps some code should go here that disables error
1346 * reporting if we just enabled it 1346 * reporting if we just enabled it
1347 */ 1347 */
1348 goto fail1; 1348 goto fail1;
1349 } 1349 }
1350 1350
1351 i5400_clear_error(mci); 1351 i5400_clear_error(mci);
1352 1352
1353 /* allocating generic PCI control info */ 1353 /* allocating generic PCI control info */
1354 i5400_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); 1354 i5400_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1355 if (!i5400_pci) { 1355 if (!i5400_pci) {
1356 printk(KERN_WARNING 1356 printk(KERN_WARNING
1357 "%s(): Unable to create PCI control\n", 1357 "%s(): Unable to create PCI control\n",
1358 __func__); 1358 __func__);
1359 printk(KERN_WARNING 1359 printk(KERN_WARNING
1360 "%s(): PCI error report via EDAC not setup\n", 1360 "%s(): PCI error report via EDAC not setup\n",
1361 __func__); 1361 __func__);
1362 } 1362 }
1363 1363
1364 return 0; 1364 return 0;
1365 1365
1366 /* Error exit unwinding stack */ 1366 /* Error exit unwinding stack */
1367 fail1: 1367 fail1:
1368 1368
1369 i5400_put_devices(mci); 1369 i5400_put_devices(mci);
1370 1370
1371 fail0: 1371 fail0:
1372 edac_mc_free(mci); 1372 edac_mc_free(mci);
1373 return -ENODEV; 1373 return -ENODEV;
1374 } 1374 }
1375 1375
1376 /* 1376 /*
1377 * i5400_init_one constructor for one instance of device 1377 * i5400_init_one constructor for one instance of device
1378 * 1378 *
1379 * returns: 1379 * returns:
1380 * negative on error 1380 * negative on error
1381 * count (>= 0) 1381 * count (>= 0)
1382 */ 1382 */
1383 static int __devinit i5400_init_one(struct pci_dev *pdev, 1383 static int __devinit i5400_init_one(struct pci_dev *pdev,
1384 const struct pci_device_id *id) 1384 const struct pci_device_id *id)
1385 { 1385 {
1386 int rc; 1386 int rc;
1387 1387
1388 debugf0("MC: %s: %s()\n", __FILE__, __func__); 1388 debugf0("MC: %s: %s()\n", __FILE__, __func__);
1389 1389
1390 /* wake up device */ 1390 /* wake up device */
1391 rc = pci_enable_device(pdev); 1391 rc = pci_enable_device(pdev);
1392 if (rc) 1392 if (rc)
1393 return rc; 1393 return rc;
1394 1394
1395 /* now probe and enable the device */ 1395 /* now probe and enable the device */
1396 return i5400_probe1(pdev, id->driver_data); 1396 return i5400_probe1(pdev, id->driver_data);
1397 } 1397 }
1398 1398
1399 /* 1399 /*
1400 * i5400_remove_one destructor for one instance of device 1400 * i5400_remove_one destructor for one instance of device
1401 * 1401 *
1402 */ 1402 */
1403 static void __devexit i5400_remove_one(struct pci_dev *pdev) 1403 static void __devexit i5400_remove_one(struct pci_dev *pdev)
1404 { 1404 {
1405 struct mem_ctl_info *mci; 1405 struct mem_ctl_info *mci;
1406 1406
1407 debugf0("%s: %s()\n", __FILE__, __func__); 1407 debugf0("%s: %s()\n", __FILE__, __func__);
1408 1408
1409 if (i5400_pci) 1409 if (i5400_pci)
1410 edac_pci_release_generic_ctl(i5400_pci); 1410 edac_pci_release_generic_ctl(i5400_pci);
1411 1411
1412 mci = edac_mc_del_mc(&pdev->dev); 1412 mci = edac_mc_del_mc(&pdev->dev);
1413 if (!mci) 1413 if (!mci)
1414 return; 1414 return;
1415 1415
1416 /* retrieve references to resources, and free those resources */ 1416 /* retrieve references to resources, and free those resources */
1417 i5400_put_devices(mci); 1417 i5400_put_devices(mci);
1418 1418
1419 edac_mc_free(mci); 1419 edac_mc_free(mci);
1420 } 1420 }
1421 1421
1422 /* 1422 /*
1423 * pci_device_id table for which devices we are looking for 1423 * pci_device_id table for which devices we are looking for
1424 * 1424 *
1425 * The "E500P" device is the first device supported. 1425 * The "E500P" device is the first device supported.
1426 */ 1426 */
1427 static DEFINE_PCI_DEVICE_TABLE(i5400_pci_tbl) = { 1427 static DEFINE_PCI_DEVICE_TABLE(i5400_pci_tbl) = {
1428 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)}, 1428 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
1429 {0,} /* 0 terminated list. */ 1429 {0,} /* 0 terminated list. */
1430 }; 1430 };
1431 1431
1432 MODULE_DEVICE_TABLE(pci, i5400_pci_tbl); 1432 MODULE_DEVICE_TABLE(pci, i5400_pci_tbl);
1433 1433
1434 /* 1434 /*
1435 * i5400_driver pci_driver structure for this module 1435 * i5400_driver pci_driver structure for this module
1436 * 1436 *
1437 */ 1437 */
1438 static struct pci_driver i5400_driver = { 1438 static struct pci_driver i5400_driver = {
1439 .name = "i5400_edac", 1439 .name = "i5400_edac",
1440 .probe = i5400_init_one, 1440 .probe = i5400_init_one,
1441 .remove = __devexit_p(i5400_remove_one), 1441 .remove = __devexit_p(i5400_remove_one),
1442 .id_table = i5400_pci_tbl, 1442 .id_table = i5400_pci_tbl,
1443 }; 1443 };
1444 1444
1445 /* 1445 /*
1446 * i5400_init Module entry function 1446 * i5400_init Module entry function
1447 * Try to initialize this module for its devices 1447 * Try to initialize this module for its devices
1448 */ 1448 */
1449 static int __init i5400_init(void) 1449 static int __init i5400_init(void)
1450 { 1450 {
1451 int pci_rc; 1451 int pci_rc;
1452 1452
1453 debugf2("MC: %s: %s()\n", __FILE__, __func__); 1453 debugf2("MC: %s: %s()\n", __FILE__, __func__);
1454 1454
1455 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1455 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1456 opstate_init(); 1456 opstate_init();
1457 1457
1458 pci_rc = pci_register_driver(&i5400_driver); 1458 pci_rc = pci_register_driver(&i5400_driver);
1459 1459
1460 return (pci_rc < 0) ? pci_rc : 0; 1460 return (pci_rc < 0) ? pci_rc : 0;
1461 } 1461 }
1462 1462
1463 /* 1463 /*
1464 * i5400_exit() Module exit function 1464 * i5400_exit() Module exit function
1465 * Unregister the driver 1465 * Unregister the driver
1466 */ 1466 */
1467 static void __exit i5400_exit(void) 1467 static void __exit i5400_exit(void)
1468 { 1468 {
1469 debugf2("MC: %s: %s()\n", __FILE__, __func__); 1469 debugf2("MC: %s: %s()\n", __FILE__, __func__);
1470 pci_unregister_driver(&i5400_driver); 1470 pci_unregister_driver(&i5400_driver);
1471 } 1471 }
1472 1472
1473 module_init(i5400_init); 1473 module_init(i5400_init);
1474 module_exit(i5400_exit); 1474 module_exit(i5400_exit);
1475 1475
1476 MODULE_LICENSE("GPL"); 1476 MODULE_LICENSE("GPL");
1477 MODULE_AUTHOR("Ben Woodard <woodard@redhat.com>"); 1477 MODULE_AUTHOR("Ben Woodard <woodard@redhat.com>");
1478 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); 1478 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
1479 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); 1479 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
1480 MODULE_DESCRIPTION("MC Driver for Intel I5400 memory controllers - " 1480 MODULE_DESCRIPTION("MC Driver for Intel I5400 memory controllers - "
1481 I5400_REVISION); 1481 I5400_REVISION);
1482 1482
1483 module_param(edac_op_state, int, 0444); 1483 module_param(edac_op_state, int, 0444);
1484 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 1484 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1485 1485
drivers/edac/i82443bxgx_edac.c
1 /* 1 /*
2 * Intel 82443BX/GX (440BX/GX chipset) Memory Controller EDAC kernel 2 * Intel 82443BX/GX (440BX/GX chipset) Memory Controller EDAC kernel
3 * module (C) 2006 Tim Small 3 * module (C) 2006 Tim Small
4 * 4 *
5 * This file may be distributed under the terms of the GNU General 5 * This file may be distributed under the terms of the GNU General
6 * Public License. 6 * Public License.
7 * 7 *
8 * Written by Tim Small <tim@buttersideup.com>, based on work by Linux 8 * Written by Tim Small <tim@buttersideup.com>, based on work by Linux
9 * Networx, Thayne Harbaugh, Dan Hollis <goemon at anime dot net> and 9 * Networx, Thayne Harbaugh, Dan Hollis <goemon at anime dot net> and
10 * others. 10 * others.
11 * 11 *
12 * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>. 12 * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>.
13 * 13 *
14 * Written with reference to 82443BX Host Bridge Datasheet: 14 * Written with reference to 82443BX Host Bridge Datasheet:
15 * http://download.intel.com/design/chipsets/datashts/29063301.pdf 15 * http://download.intel.com/design/chipsets/datashts/29063301.pdf
16 * references to this document given in []. 16 * references to this document given in [].
17 * 17 *
18 * This module doesn't support the 440LX, but it may be possible to 18 * This module doesn't support the 440LX, but it may be possible to
19 * make it do so (the 440LX's register definitions are different, but 19 * make it do so (the 440LX's register definitions are different, but
20 * not completely so - I haven't studied them in enough detail to know 20 * not completely so - I haven't studied them in enough detail to know
21 * how easy this would be). 21 * how easy this would be).
22 */ 22 */
23 23
24 #include <linux/module.h> 24 #include <linux/module.h>
25 #include <linux/init.h> 25 #include <linux/init.h>
26 26
27 #include <linux/pci.h> 27 #include <linux/pci.h>
28 #include <linux/pci_ids.h> 28 #include <linux/pci_ids.h>
29 29
30 30
31 #include <linux/edac.h> 31 #include <linux/edac.h>
32 #include "edac_core.h" 32 #include "edac_core.h"
33 33
34 #define I82443_REVISION "0.1" 34 #define I82443_REVISION "0.1"
35 35
36 #define EDAC_MOD_STR "i82443bxgx_edac" 36 #define EDAC_MOD_STR "i82443bxgx_edac"
37 37
38 /* The 82443BX supports SDRAM, or EDO (EDO for mobile only), "Memory 38 /* The 82443BX supports SDRAM, or EDO (EDO for mobile only), "Memory
39 * Size: 8 MB to 512 MB (1GB with Registered DIMMs) with eight memory 39 * Size: 8 MB to 512 MB (1GB with Registered DIMMs) with eight memory
40 * rows" "The 82443BX supports multiple-bit error detection and 40 * rows" "The 82443BX supports multiple-bit error detection and
41 * single-bit error correction when ECC mode is enabled and 41 * single-bit error correction when ECC mode is enabled and
42 * single/multi-bit error detection when correction is disabled. 42 * single/multi-bit error detection when correction is disabled.
43 * During writes to the DRAM, the 82443BX generates ECC for the data 43 * During writes to the DRAM, the 82443BX generates ECC for the data
44 * on a QWord basis. Partial QWord writes require a read-modify-write 44 * on a QWord basis. Partial QWord writes require a read-modify-write
45 * cycle when ECC is enabled." 45 * cycle when ECC is enabled."
46 */ 46 */
47 47
48 /* "Additionally, the 82443BX ensures that the data is corrected in 48 /* "Additionally, the 82443BX ensures that the data is corrected in
49 * main memory so that accumulation of errors is prevented. Another 49 * main memory so that accumulation of errors is prevented. Another
50 * error within the same QWord would result in a double-bit error 50 * error within the same QWord would result in a double-bit error
51 * which is unrecoverable. This is known as hardware scrubbing since 51 * which is unrecoverable. This is known as hardware scrubbing since
52 * it requires no software intervention to correct the data in memory." 52 * it requires no software intervention to correct the data in memory."
53 */ 53 */
54 54
55 /* [Also see page 100 (section 4.3), "DRAM Interface"] 55 /* [Also see page 100 (section 4.3), "DRAM Interface"]
56 * [Also see page 112 (section 4.6.1.4), ECC] 56 * [Also see page 112 (section 4.6.1.4), ECC]
57 */ 57 */
58 58
59 #define I82443BXGX_NR_CSROWS 8 59 #define I82443BXGX_NR_CSROWS 8
60 #define I82443BXGX_NR_CHANS 1 60 #define I82443BXGX_NR_CHANS 1
61 #define I82443BXGX_NR_DIMMS 4 61 #define I82443BXGX_NR_DIMMS 4
62 62
63 /* 82443 PCI Device 0 */ 63 /* 82443 PCI Device 0 */
64 #define I82443BXGX_NBXCFG 0x50 /* 32bit register starting at this PCI 64 #define I82443BXGX_NBXCFG 0x50 /* 32bit register starting at this PCI
65 * config space offset */ 65 * config space offset */
66 #define I82443BXGX_NBXCFG_OFFSET_NON_ECCROW 24 /* Array of bits, zero if 66 #define I82443BXGX_NBXCFG_OFFSET_NON_ECCROW 24 /* Array of bits, zero if
67 * row is non-ECC */ 67 * row is non-ECC */
68 #define I82443BXGX_NBXCFG_OFFSET_DRAM_FREQ 12 /* 2 bits,00=100MHz,10=66 MHz */ 68 #define I82443BXGX_NBXCFG_OFFSET_DRAM_FREQ 12 /* 2 bits,00=100MHz,10=66 MHz */
69 69
70 #define I82443BXGX_NBXCFG_OFFSET_DRAM_INTEGRITY 7 /* 2 bits: */ 70 #define I82443BXGX_NBXCFG_OFFSET_DRAM_INTEGRITY 7 /* 2 bits: */
71 #define I82443BXGX_NBXCFG_INTEGRITY_NONE 0x0 /* 00 = Non-ECC */ 71 #define I82443BXGX_NBXCFG_INTEGRITY_NONE 0x0 /* 00 = Non-ECC */
72 #define I82443BXGX_NBXCFG_INTEGRITY_EC 0x1 /* 01 = EC (only) */ 72 #define I82443BXGX_NBXCFG_INTEGRITY_EC 0x1 /* 01 = EC (only) */
73 #define I82443BXGX_NBXCFG_INTEGRITY_ECC 0x2 /* 10 = ECC */ 73 #define I82443BXGX_NBXCFG_INTEGRITY_ECC 0x2 /* 10 = ECC */
74 #define I82443BXGX_NBXCFG_INTEGRITY_SCRUB 0x3 /* 11 = ECC + HW Scrub */ 74 #define I82443BXGX_NBXCFG_INTEGRITY_SCRUB 0x3 /* 11 = ECC + HW Scrub */
75 75
76 #define I82443BXGX_NBXCFG_OFFSET_ECC_DIAG_ENABLE 6 76 #define I82443BXGX_NBXCFG_OFFSET_ECC_DIAG_ENABLE 6
77 77
78 /* 82443 PCI Device 0 */ 78 /* 82443 PCI Device 0 */
79 #define I82443BXGX_EAP 0x80 /* 32bit register starting at this PCI 79 #define I82443BXGX_EAP 0x80 /* 32bit register starting at this PCI
80 * config space offset, Error Address 80 * config space offset, Error Address
81 * Pointer Register */ 81 * Pointer Register */
82 #define I82443BXGX_EAP_OFFSET_EAP 12 /* High 20 bits of error address */ 82 #define I82443BXGX_EAP_OFFSET_EAP 12 /* High 20 bits of error address */
83 #define I82443BXGX_EAP_OFFSET_MBE BIT(1) /* Err at EAP was multi-bit (W1TC) */ 83 #define I82443BXGX_EAP_OFFSET_MBE BIT(1) /* Err at EAP was multi-bit (W1TC) */
84 #define I82443BXGX_EAP_OFFSET_SBE BIT(0) /* Err at EAP was single-bit (W1TC) */ 84 #define I82443BXGX_EAP_OFFSET_SBE BIT(0) /* Err at EAP was single-bit (W1TC) */
85 85
86 #define I82443BXGX_ERRCMD 0x90 /* 8bit register starting at this PCI 86 #define I82443BXGX_ERRCMD 0x90 /* 8bit register starting at this PCI
87 * config space offset. */ 87 * config space offset. */
88 #define I82443BXGX_ERRCMD_OFFSET_SERR_ON_MBE BIT(1) /* 1 = enable */ 88 #define I82443BXGX_ERRCMD_OFFSET_SERR_ON_MBE BIT(1) /* 1 = enable */
89 #define I82443BXGX_ERRCMD_OFFSET_SERR_ON_SBE BIT(0) /* 1 = enable */ 89 #define I82443BXGX_ERRCMD_OFFSET_SERR_ON_SBE BIT(0) /* 1 = enable */
90 90
91 #define I82443BXGX_ERRSTS 0x91 /* 16bit register starting at this PCI 91 #define I82443BXGX_ERRSTS 0x91 /* 16bit register starting at this PCI
92 * config space offset. */ 92 * config space offset. */
93 #define I82443BXGX_ERRSTS_OFFSET_MBFRE 5 /* 3 bits - first err row multibit */ 93 #define I82443BXGX_ERRSTS_OFFSET_MBFRE 5 /* 3 bits - first err row multibit */
94 #define I82443BXGX_ERRSTS_OFFSET_MEF BIT(4) /* 1 = MBE occurred */ 94 #define I82443BXGX_ERRSTS_OFFSET_MEF BIT(4) /* 1 = MBE occurred */
95 #define I82443BXGX_ERRSTS_OFFSET_SBFRE 1 /* 3 bits - first err row singlebit */ 95 #define I82443BXGX_ERRSTS_OFFSET_SBFRE 1 /* 3 bits - first err row singlebit */
96 #define I82443BXGX_ERRSTS_OFFSET_SEF BIT(0) /* 1 = SBE occurred */ 96 #define I82443BXGX_ERRSTS_OFFSET_SEF BIT(0) /* 1 = SBE occurred */
97 97
98 #define I82443BXGX_DRAMC 0x57 /* 8bit register starting at this PCI 98 #define I82443BXGX_DRAMC 0x57 /* 8bit register starting at this PCI
99 * config space offset. */ 99 * config space offset. */
100 #define I82443BXGX_DRAMC_OFFSET_DT 3 /* 2 bits, DRAM Type */ 100 #define I82443BXGX_DRAMC_OFFSET_DT 3 /* 2 bits, DRAM Type */
101 #define I82443BXGX_DRAMC_DRAM_IS_EDO 0 /* 00 = EDO */ 101 #define I82443BXGX_DRAMC_DRAM_IS_EDO 0 /* 00 = EDO */
102 #define I82443BXGX_DRAMC_DRAM_IS_SDRAM 1 /* 01 = SDRAM */ 102 #define I82443BXGX_DRAMC_DRAM_IS_SDRAM 1 /* 01 = SDRAM */
103 #define I82443BXGX_DRAMC_DRAM_IS_RSDRAM 2 /* 10 = Registered SDRAM */ 103 #define I82443BXGX_DRAMC_DRAM_IS_RSDRAM 2 /* 10 = Registered SDRAM */
104 104
105 #define I82443BXGX_DRB 0x60 /* 8x 8bit registers starting at this PCI 105 #define I82443BXGX_DRB 0x60 /* 8x 8bit registers starting at this PCI
106 * config space offset. */ 106 * config space offset. */
107 107
108 /* FIXME - don't poll when ECC disabled? */ 108 /* FIXME - don't poll when ECC disabled? */
109 109
110 struct i82443bxgx_edacmc_error_info { 110 struct i82443bxgx_edacmc_error_info {
111 u32 eap; 111 u32 eap;
112 }; 112 };
113 113
114 static struct edac_pci_ctl_info *i82443bxgx_pci; 114 static struct edac_pci_ctl_info *i82443bxgx_pci;
115 115
116 static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has 116 static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has
117 * already registered driver 117 * already registered driver
118 */ 118 */
119 119
120 static int i82443bxgx_registered = 1; 120 static int i82443bxgx_registered = 1;
121 121
122 static void i82443bxgx_edacmc_get_error_info(struct mem_ctl_info *mci, 122 static void i82443bxgx_edacmc_get_error_info(struct mem_ctl_info *mci,
123 struct i82443bxgx_edacmc_error_info 123 struct i82443bxgx_edacmc_error_info
124 *info) 124 *info)
125 { 125 {
126 struct pci_dev *pdev; 126 struct pci_dev *pdev;
127 pdev = to_pci_dev(mci->pdev); 127 pdev = to_pci_dev(mci->pdev);
128 pci_read_config_dword(pdev, I82443BXGX_EAP, &info->eap); 128 pci_read_config_dword(pdev, I82443BXGX_EAP, &info->eap);
129 if (info->eap & I82443BXGX_EAP_OFFSET_SBE) 129 if (info->eap & I82443BXGX_EAP_OFFSET_SBE)
130 /* Clear error to allow next error to be reported [p.61] */ 130 /* Clear error to allow next error to be reported [p.61] */
131 pci_write_bits32(pdev, I82443BXGX_EAP, 131 pci_write_bits32(pdev, I82443BXGX_EAP,
132 I82443BXGX_EAP_OFFSET_SBE, 132 I82443BXGX_EAP_OFFSET_SBE,
133 I82443BXGX_EAP_OFFSET_SBE); 133 I82443BXGX_EAP_OFFSET_SBE);
134 134
135 if (info->eap & I82443BXGX_EAP_OFFSET_MBE) 135 if (info->eap & I82443BXGX_EAP_OFFSET_MBE)
136 /* Clear error to allow next error to be reported [p.61] */ 136 /* Clear error to allow next error to be reported [p.61] */
137 pci_write_bits32(pdev, I82443BXGX_EAP, 137 pci_write_bits32(pdev, I82443BXGX_EAP,
138 I82443BXGX_EAP_OFFSET_MBE, 138 I82443BXGX_EAP_OFFSET_MBE,
139 I82443BXGX_EAP_OFFSET_MBE); 139 I82443BXGX_EAP_OFFSET_MBE);
140 } 140 }
141 141
142 static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci, 142 static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci,
143 struct 143 struct
144 i82443bxgx_edacmc_error_info 144 i82443bxgx_edacmc_error_info
145 *info, int handle_errors) 145 *info, int handle_errors)
146 { 146 {
147 int error_found = 0; 147 int error_found = 0;
148 u32 eapaddr, page, pageoffset; 148 u32 eapaddr, page, pageoffset;
149 149
150 /* bits 30:12 hold the 4kb block in which the error occurred 150 /* bits 30:12 hold the 4kb block in which the error occurred
151 * [p.61] */ 151 * [p.61] */
152 eapaddr = (info->eap & 0xfffff000); 152 eapaddr = (info->eap & 0xfffff000);
153 page = eapaddr >> PAGE_SHIFT; 153 page = eapaddr >> PAGE_SHIFT;
154 pageoffset = eapaddr - (page << PAGE_SHIFT); 154 pageoffset = eapaddr - (page << PAGE_SHIFT);
155 155
156 if (info->eap & I82443BXGX_EAP_OFFSET_SBE) { 156 if (info->eap & I82443BXGX_EAP_OFFSET_SBE) {
157 error_found = 1; 157 error_found = 1;
158 if (handle_errors) 158 if (handle_errors)
159 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 159 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
160 page, pageoffset, 0, 160 page, pageoffset, 0,
161 edac_mc_find_csrow_by_page(mci, page), 161 edac_mc_find_csrow_by_page(mci, page),
162 0, -1, mci->ctl_name, "", NULL); 162 0, -1, mci->ctl_name, "", NULL);
163 } 163 }
164 164
165 if (info->eap & I82443BXGX_EAP_OFFSET_MBE) { 165 if (info->eap & I82443BXGX_EAP_OFFSET_MBE) {
166 error_found = 1; 166 error_found = 1;
167 if (handle_errors) 167 if (handle_errors)
168 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 168 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
169 page, pageoffset, 0, 169 page, pageoffset, 0,
170 edac_mc_find_csrow_by_page(mci, page), 170 edac_mc_find_csrow_by_page(mci, page),
171 0, -1, mci->ctl_name, "", NULL); 171 0, -1, mci->ctl_name, "", NULL);
172 } 172 }
173 173
174 return error_found; 174 return error_found;
175 } 175 }
176 176
177 static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci) 177 static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci)
178 { 178 {
179 struct i82443bxgx_edacmc_error_info info; 179 struct i82443bxgx_edacmc_error_info info;
180 180
181 debugf1("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); 181 debugf1("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
182 i82443bxgx_edacmc_get_error_info(mci, &info); 182 i82443bxgx_edacmc_get_error_info(mci, &info);
183 i82443bxgx_edacmc_process_error_info(mci, &info, 1); 183 i82443bxgx_edacmc_process_error_info(mci, &info, 1);
184 } 184 }
185 185
186 static void i82443bxgx_init_csrows(struct mem_ctl_info *mci, 186 static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
187 struct pci_dev *pdev, 187 struct pci_dev *pdev,
188 enum edac_type edac_mode, 188 enum edac_type edac_mode,
189 enum mem_type mtype) 189 enum mem_type mtype)
190 { 190 {
191 struct csrow_info *csrow; 191 struct csrow_info *csrow;
192 struct dimm_info *dimm; 192 struct dimm_info *dimm;
193 int index; 193 int index;
194 u8 drbar, dramc; 194 u8 drbar, dramc;
195 u32 row_base, row_high_limit, row_high_limit_last; 195 u32 row_base, row_high_limit, row_high_limit_last;
196 196
197 pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc); 197 pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
198 row_high_limit_last = 0; 198 row_high_limit_last = 0;
199 for (index = 0; index < mci->nr_csrows; index++) { 199 for (index = 0; index < mci->nr_csrows; index++) {
200 csrow = &mci->csrows[index]; 200 csrow = mci->csrows[index];
201 dimm = csrow->channels[0].dimm; 201 dimm = csrow->channels[0]->dimm;
202 202
203 pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar); 203 pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
204 debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n", 204 debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
205 mci->mc_idx, __FILE__, __func__, index, drbar); 205 mci->mc_idx, __FILE__, __func__, index, drbar);
206 row_high_limit = ((u32) drbar << 23); 206 row_high_limit = ((u32) drbar << 23);
207 /* find the DRAM Chip Select Base address and mask */ 207 /* find the DRAM Chip Select Base address and mask */
208 debugf1("MC%d: %s: %s() Row=%d, " 208 debugf1("MC%d: %s: %s() Row=%d, "
209 "Boundary Address=%#0x, Last = %#0x\n", 209 "Boundary Address=%#0x, Last = %#0x\n",
210 mci->mc_idx, __FILE__, __func__, index, row_high_limit, 210 mci->mc_idx, __FILE__, __func__, index, row_high_limit,
211 row_high_limit_last); 211 row_high_limit_last);
212 212
213 /* 440GX goes to 2GB, represented with a DRB of 0. */ 213 /* 440GX goes to 2GB, represented with a DRB of 0. */
214 if (row_high_limit_last && !row_high_limit) 214 if (row_high_limit_last && !row_high_limit)
215 row_high_limit = 1UL << 31; 215 row_high_limit = 1UL << 31;
216 216
217 /* This row is empty [p.49] */ 217 /* This row is empty [p.49] */
218 if (row_high_limit == row_high_limit_last) 218 if (row_high_limit == row_high_limit_last)
219 continue; 219 continue;
220 row_base = row_high_limit_last; 220 row_base = row_high_limit_last;
221 csrow->first_page = row_base >> PAGE_SHIFT; 221 csrow->first_page = row_base >> PAGE_SHIFT;
222 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; 222 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
223 dimm->nr_pages = csrow->last_page - csrow->first_page + 1; 223 dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
224 /* EAP reports in 4kilobyte granularity [61] */ 224 /* EAP reports in 4kilobyte granularity [61] */
225 dimm->grain = 1 << 12; 225 dimm->grain = 1 << 12;
226 dimm->mtype = mtype; 226 dimm->mtype = mtype;
227 /* I don't think 440BX can tell you device type? FIXME? */ 227 /* I don't think 440BX can tell you device type? FIXME? */
228 dimm->dtype = DEV_UNKNOWN; 228 dimm->dtype = DEV_UNKNOWN;
229 /* Mode is global to all rows on 440BX */ 229 /* Mode is global to all rows on 440BX */
230 dimm->edac_mode = edac_mode; 230 dimm->edac_mode = edac_mode;
231 row_high_limit_last = row_high_limit; 231 row_high_limit_last = row_high_limit;
232 } 232 }
233 } 233 }
234 234
235 static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) 235 static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
236 { 236 {
237 struct mem_ctl_info *mci; 237 struct mem_ctl_info *mci;
238 struct edac_mc_layer layers[2]; 238 struct edac_mc_layer layers[2];
239 u8 dramc; 239 u8 dramc;
240 u32 nbxcfg, ecc_mode; 240 u32 nbxcfg, ecc_mode;
241 enum mem_type mtype; 241 enum mem_type mtype;
242 enum edac_type edac_mode; 242 enum edac_type edac_mode;
243 243
244 debugf0("MC: %s: %s()\n", __FILE__, __func__); 244 debugf0("MC: %s: %s()\n", __FILE__, __func__);
245 245
246 /* Something is really hosed if PCI config space reads from 246 /* Something is really hosed if PCI config space reads from
247 * the MC aren't working. 247 * the MC aren't working.
248 */ 248 */
249 if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg)) 249 if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg))
250 return -EIO; 250 return -EIO;
251 251
252 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 252 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
253 layers[0].size = I82443BXGX_NR_CSROWS; 253 layers[0].size = I82443BXGX_NR_CSROWS;
254 layers[0].is_virt_csrow = true; 254 layers[0].is_virt_csrow = true;
255 layers[1].type = EDAC_MC_LAYER_CHANNEL; 255 layers[1].type = EDAC_MC_LAYER_CHANNEL;
256 layers[1].size = I82443BXGX_NR_CHANS; 256 layers[1].size = I82443BXGX_NR_CHANS;
257 layers[1].is_virt_csrow = false; 257 layers[1].is_virt_csrow = false;
258 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); 258 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
259 if (mci == NULL) 259 if (mci == NULL)
260 return -ENOMEM; 260 return -ENOMEM;
261 261
262 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); 262 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
263 mci->pdev = &pdev->dev; 263 mci->pdev = &pdev->dev;
264 mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR; 264 mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR;
265 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 265 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
266 pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc); 266 pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
267 switch ((dramc >> I82443BXGX_DRAMC_OFFSET_DT) & (BIT(0) | BIT(1))) { 267 switch ((dramc >> I82443BXGX_DRAMC_OFFSET_DT) & (BIT(0) | BIT(1))) {
268 case I82443BXGX_DRAMC_DRAM_IS_EDO: 268 case I82443BXGX_DRAMC_DRAM_IS_EDO:
269 mtype = MEM_EDO; 269 mtype = MEM_EDO;
270 break; 270 break;
271 case I82443BXGX_DRAMC_DRAM_IS_SDRAM: 271 case I82443BXGX_DRAMC_DRAM_IS_SDRAM:
272 mtype = MEM_SDR; 272 mtype = MEM_SDR;
273 break; 273 break;
274 case I82443BXGX_DRAMC_DRAM_IS_RSDRAM: 274 case I82443BXGX_DRAMC_DRAM_IS_RSDRAM:
275 mtype = MEM_RDR; 275 mtype = MEM_RDR;
276 break; 276 break;
277 default: 277 default:
278 debugf0("Unknown/reserved DRAM type value " 278 debugf0("Unknown/reserved DRAM type value "
279 "in DRAMC register!\n"); 279 "in DRAMC register!\n");
280 mtype = -MEM_UNKNOWN; 280 mtype = -MEM_UNKNOWN;
281 } 281 }
282 282
283 if ((mtype == MEM_SDR) || (mtype == MEM_RDR)) 283 if ((mtype == MEM_SDR) || (mtype == MEM_RDR))
284 mci->edac_cap = mci->edac_ctl_cap; 284 mci->edac_cap = mci->edac_ctl_cap;
285 else 285 else
286 mci->edac_cap = EDAC_FLAG_NONE; 286 mci->edac_cap = EDAC_FLAG_NONE;
287 287
288 mci->scrub_cap = SCRUB_FLAG_HW_SRC; 288 mci->scrub_cap = SCRUB_FLAG_HW_SRC;
289 pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg); 289 pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg);
290 ecc_mode = ((nbxcfg >> I82443BXGX_NBXCFG_OFFSET_DRAM_INTEGRITY) & 290 ecc_mode = ((nbxcfg >> I82443BXGX_NBXCFG_OFFSET_DRAM_INTEGRITY) &
291 (BIT(0) | BIT(1))); 291 (BIT(0) | BIT(1)));
292 292
293 mci->scrub_mode = (ecc_mode == I82443BXGX_NBXCFG_INTEGRITY_SCRUB) 293 mci->scrub_mode = (ecc_mode == I82443BXGX_NBXCFG_INTEGRITY_SCRUB)
294 ? SCRUB_HW_SRC : SCRUB_NONE; 294 ? SCRUB_HW_SRC : SCRUB_NONE;
295 295
296 switch (ecc_mode) { 296 switch (ecc_mode) {
297 case I82443BXGX_NBXCFG_INTEGRITY_NONE: 297 case I82443BXGX_NBXCFG_INTEGRITY_NONE:
298 edac_mode = EDAC_NONE; 298 edac_mode = EDAC_NONE;
299 break; 299 break;
300 case I82443BXGX_NBXCFG_INTEGRITY_EC: 300 case I82443BXGX_NBXCFG_INTEGRITY_EC:
301 edac_mode = EDAC_EC; 301 edac_mode = EDAC_EC;
302 break; 302 break;
303 case I82443BXGX_NBXCFG_INTEGRITY_ECC: 303 case I82443BXGX_NBXCFG_INTEGRITY_ECC:
304 case I82443BXGX_NBXCFG_INTEGRITY_SCRUB: 304 case I82443BXGX_NBXCFG_INTEGRITY_SCRUB:
305 edac_mode = EDAC_SECDED; 305 edac_mode = EDAC_SECDED;
306 break; 306 break;
307 default: 307 default:
308 debugf0("%s(): Unknown/reserved ECC state " 308 debugf0("%s(): Unknown/reserved ECC state "
309 "in NBXCFG register!\n", __func__); 309 "in NBXCFG register!\n", __func__);
310 edac_mode = EDAC_UNKNOWN; 310 edac_mode = EDAC_UNKNOWN;
311 break; 311 break;
312 } 312 }
313 313
314 i82443bxgx_init_csrows(mci, pdev, edac_mode, mtype); 314 i82443bxgx_init_csrows(mci, pdev, edac_mode, mtype);
315 315
316 /* Many BIOSes don't clear error flags on boot, so do this 316 /* Many BIOSes don't clear error flags on boot, so do this
317 * here, or we get "phantom" errors occurring at module-load 317 * here, or we get "phantom" errors occurring at module-load
318 * time. */ 318 * time. */
319 pci_write_bits32(pdev, I82443BXGX_EAP, 319 pci_write_bits32(pdev, I82443BXGX_EAP,
320 (I82443BXGX_EAP_OFFSET_SBE | 320 (I82443BXGX_EAP_OFFSET_SBE |
321 I82443BXGX_EAP_OFFSET_MBE), 321 I82443BXGX_EAP_OFFSET_MBE),
322 (I82443BXGX_EAP_OFFSET_SBE | 322 (I82443BXGX_EAP_OFFSET_SBE |
323 I82443BXGX_EAP_OFFSET_MBE)); 323 I82443BXGX_EAP_OFFSET_MBE));
324 324
325 mci->mod_name = EDAC_MOD_STR; 325 mci->mod_name = EDAC_MOD_STR;
326 mci->mod_ver = I82443_REVISION; 326 mci->mod_ver = I82443_REVISION;
327 mci->ctl_name = "I82443BXGX"; 327 mci->ctl_name = "I82443BXGX";
328 mci->dev_name = pci_name(pdev); 328 mci->dev_name = pci_name(pdev);
329 mci->edac_check = i82443bxgx_edacmc_check; 329 mci->edac_check = i82443bxgx_edacmc_check;
330 mci->ctl_page_to_phys = NULL; 330 mci->ctl_page_to_phys = NULL;
331 331
332 if (edac_mc_add_mc(mci)) { 332 if (edac_mc_add_mc(mci)) {
333 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 333 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
334 goto fail; 334 goto fail;
335 } 335 }
336 336
337 /* allocating generic PCI control info */ 337 /* allocating generic PCI control info */
338 i82443bxgx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); 338 i82443bxgx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
339 if (!i82443bxgx_pci) { 339 if (!i82443bxgx_pci) {
340 printk(KERN_WARNING 340 printk(KERN_WARNING
341 "%s(): Unable to create PCI control\n", 341 "%s(): Unable to create PCI control\n",
342 __func__); 342 __func__);
343 printk(KERN_WARNING 343 printk(KERN_WARNING
344 "%s(): PCI error report via EDAC not setup\n", 344 "%s(): PCI error report via EDAC not setup\n",
345 __func__); 345 __func__);
346 } 346 }
347 347
348 debugf3("MC: %s: %s(): success\n", __FILE__, __func__); 348 debugf3("MC: %s: %s(): success\n", __FILE__, __func__);
349 return 0; 349 return 0;
350 350
351 fail: 351 fail:
352 edac_mc_free(mci); 352 edac_mc_free(mci);
353 return -ENODEV; 353 return -ENODEV;
354 } 354 }
355 355
356 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_probe1); 356 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_probe1);
357 357
358 /* returns count (>= 0), or negative on error */ 358 /* returns count (>= 0), or negative on error */
359 static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev, 359 static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
360 const struct pci_device_id *ent) 360 const struct pci_device_id *ent)
361 { 361 {
362 int rc; 362 int rc;
363 363
364 debugf0("MC: %s: %s()\n", __FILE__, __func__); 364 debugf0("MC: %s: %s()\n", __FILE__, __func__);
365 365
366 /* don't need to call pci_enable_device() */ 366 /* don't need to call pci_enable_device() */
367 rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data); 367 rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
368 368
369 if (mci_pdev == NULL) 369 if (mci_pdev == NULL)
370 mci_pdev = pci_dev_get(pdev); 370 mci_pdev = pci_dev_get(pdev);
371 371
372 return rc; 372 return rc;
373 } 373 }
374 374
375 static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev) 375 static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
376 { 376 {
377 struct mem_ctl_info *mci; 377 struct mem_ctl_info *mci;
378 378
379 debugf0("%s: %s()\n", __FILE__, __func__); 379 debugf0("%s: %s()\n", __FILE__, __func__);
380 380
381 if (i82443bxgx_pci) 381 if (i82443bxgx_pci)
382 edac_pci_release_generic_ctl(i82443bxgx_pci); 382 edac_pci_release_generic_ctl(i82443bxgx_pci);
383 383
384 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) 384 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
385 return; 385 return;
386 386
387 edac_mc_free(mci); 387 edac_mc_free(mci);
388 } 388 }
389 389
390 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one); 390 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
391 391
392 static DEFINE_PCI_DEVICE_TABLE(i82443bxgx_pci_tbl) = { 392 static DEFINE_PCI_DEVICE_TABLE(i82443bxgx_pci_tbl) = {
393 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)}, 393 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
394 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)}, 394 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
395 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)}, 395 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
396 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2)}, 396 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2)},
397 {0,} /* 0 terminated list. */ 397 {0,} /* 0 terminated list. */
398 }; 398 };
399 399
400 MODULE_DEVICE_TABLE(pci, i82443bxgx_pci_tbl); 400 MODULE_DEVICE_TABLE(pci, i82443bxgx_pci_tbl);
401 401
402 static struct pci_driver i82443bxgx_edacmc_driver = { 402 static struct pci_driver i82443bxgx_edacmc_driver = {
403 .name = EDAC_MOD_STR, 403 .name = EDAC_MOD_STR,
404 .probe = i82443bxgx_edacmc_init_one, 404 .probe = i82443bxgx_edacmc_init_one,
405 .remove = __devexit_p(i82443bxgx_edacmc_remove_one), 405 .remove = __devexit_p(i82443bxgx_edacmc_remove_one),
406 .id_table = i82443bxgx_pci_tbl, 406 .id_table = i82443bxgx_pci_tbl,
407 }; 407 };
408 408
409 static int __init i82443bxgx_edacmc_init(void) 409 static int __init i82443bxgx_edacmc_init(void)
410 { 410 {
411 int pci_rc; 411 int pci_rc;
412 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 412 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
413 opstate_init(); 413 opstate_init();
414 414
415 pci_rc = pci_register_driver(&i82443bxgx_edacmc_driver); 415 pci_rc = pci_register_driver(&i82443bxgx_edacmc_driver);
416 if (pci_rc < 0) 416 if (pci_rc < 0)
417 goto fail0; 417 goto fail0;
418 418
419 if (mci_pdev == NULL) { 419 if (mci_pdev == NULL) {
420 const struct pci_device_id *id = &i82443bxgx_pci_tbl[0]; 420 const struct pci_device_id *id = &i82443bxgx_pci_tbl[0];
421 int i = 0; 421 int i = 0;
422 i82443bxgx_registered = 0; 422 i82443bxgx_registered = 0;
423 423
424 while (mci_pdev == NULL && id->vendor != 0) { 424 while (mci_pdev == NULL && id->vendor != 0) {
425 mci_pdev = pci_get_device(id->vendor, 425 mci_pdev = pci_get_device(id->vendor,
426 id->device, NULL); 426 id->device, NULL);
427 i++; 427 i++;
428 id = &i82443bxgx_pci_tbl[i]; 428 id = &i82443bxgx_pci_tbl[i];
429 } 429 }
430 if (!mci_pdev) { 430 if (!mci_pdev) {
431 debugf0("i82443bxgx pci_get_device fail\n"); 431 debugf0("i82443bxgx pci_get_device fail\n");
432 pci_rc = -ENODEV; 432 pci_rc = -ENODEV;
433 goto fail1; 433 goto fail1;
434 } 434 }
435 435
436 pci_rc = i82443bxgx_edacmc_init_one(mci_pdev, i82443bxgx_pci_tbl); 436 pci_rc = i82443bxgx_edacmc_init_one(mci_pdev, i82443bxgx_pci_tbl);
437 437
438 if (pci_rc < 0) { 438 if (pci_rc < 0) {
439 debugf0("i82443bxgx init fail\n"); 439 debugf0("i82443bxgx init fail\n");
440 pci_rc = -ENODEV; 440 pci_rc = -ENODEV;
441 goto fail1; 441 goto fail1;
442 } 442 }
443 } 443 }
444 444
445 return 0; 445 return 0;
446 446
447 fail1: 447 fail1:
448 pci_unregister_driver(&i82443bxgx_edacmc_driver); 448 pci_unregister_driver(&i82443bxgx_edacmc_driver);
449 449
450 fail0: 450 fail0:
451 if (mci_pdev != NULL) 451 if (mci_pdev != NULL)
452 pci_dev_put(mci_pdev); 452 pci_dev_put(mci_pdev);
453 453
454 return pci_rc; 454 return pci_rc;
455 } 455 }
456 456
457 static void __exit i82443bxgx_edacmc_exit(void) 457 static void __exit i82443bxgx_edacmc_exit(void)
458 { 458 {
459 pci_unregister_driver(&i82443bxgx_edacmc_driver); 459 pci_unregister_driver(&i82443bxgx_edacmc_driver);
460 460
461 if (!i82443bxgx_registered) 461 if (!i82443bxgx_registered)
462 i82443bxgx_edacmc_remove_one(mci_pdev); 462 i82443bxgx_edacmc_remove_one(mci_pdev);
463 463
464 if (mci_pdev) 464 if (mci_pdev)
465 pci_dev_put(mci_pdev); 465 pci_dev_put(mci_pdev);
466 } 466 }
467 467
468 module_init(i82443bxgx_edacmc_init); 468 module_init(i82443bxgx_edacmc_init);
469 module_exit(i82443bxgx_edacmc_exit); 469 module_exit(i82443bxgx_edacmc_exit);
470 470
471 MODULE_LICENSE("GPL"); 471 MODULE_LICENSE("GPL");
472 MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD"); 472 MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD");
473 MODULE_DESCRIPTION("EDAC MC support for Intel 82443BX/GX memory controllers"); 473 MODULE_DESCRIPTION("EDAC MC support for Intel 82443BX/GX memory controllers");
474 474
475 module_param(edac_op_state, int, 0444); 475 module_param(edac_op_state, int, 0444);
476 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 476 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
477 477
drivers/edac/i82860_edac.c
1 /* 1 /*
2 * Intel 82860 Memory Controller kernel module 2 * Intel 82860 Memory Controller kernel module
3 * (C) 2005 Red Hat (http://www.redhat.com) 3 * (C) 2005 Red Hat (http://www.redhat.com)
4 * This file may be distributed under the terms of the 4 * This file may be distributed under the terms of the
5 * GNU General Public License. 5 * GNU General Public License.
6 * 6 *
7 * Written by Ben Woodard <woodard@redhat.com> 7 * Written by Ben Woodard <woodard@redhat.com>
8 * shamelessly copied from and based upon the edac_i82875 driver 8 * shamelessly copied from and based upon the edac_i82875 driver
9 * by Thayne Harbaugh of Linux Networx. (http://lnxi.com) 9 * by Thayne Harbaugh of Linux Networx. (http://lnxi.com)
10 */ 10 */
11 11
12 #include <linux/module.h> 12 #include <linux/module.h>
13 #include <linux/init.h> 13 #include <linux/init.h>
14 #include <linux/pci.h> 14 #include <linux/pci.h>
15 #include <linux/pci_ids.h> 15 #include <linux/pci_ids.h>
16 #include <linux/edac.h> 16 #include <linux/edac.h>
17 #include "edac_core.h" 17 #include "edac_core.h"
18 18
19 #define I82860_REVISION " Ver: 2.0.2" 19 #define I82860_REVISION " Ver: 2.0.2"
20 #define EDAC_MOD_STR "i82860_edac" 20 #define EDAC_MOD_STR "i82860_edac"
21 21
22 #define i82860_printk(level, fmt, arg...) \ 22 #define i82860_printk(level, fmt, arg...) \
23 edac_printk(level, "i82860", fmt, ##arg) 23 edac_printk(level, "i82860", fmt, ##arg)
24 24
25 #define i82860_mc_printk(mci, level, fmt, arg...) \ 25 #define i82860_mc_printk(mci, level, fmt, arg...) \
26 edac_mc_chipset_printk(mci, level, "i82860", fmt, ##arg) 26 edac_mc_chipset_printk(mci, level, "i82860", fmt, ##arg)
27 27
28 #ifndef PCI_DEVICE_ID_INTEL_82860_0 28 #ifndef PCI_DEVICE_ID_INTEL_82860_0
29 #define PCI_DEVICE_ID_INTEL_82860_0 0x2531 29 #define PCI_DEVICE_ID_INTEL_82860_0 0x2531
30 #endif /* PCI_DEVICE_ID_INTEL_82860_0 */ 30 #endif /* PCI_DEVICE_ID_INTEL_82860_0 */
31 31
32 #define I82860_MCHCFG 0x50 32 #define I82860_MCHCFG 0x50
33 #define I82860_GBA 0x60 33 #define I82860_GBA 0x60
34 #define I82860_GBA_MASK 0x7FF 34 #define I82860_GBA_MASK 0x7FF
35 #define I82860_GBA_SHIFT 24 35 #define I82860_GBA_SHIFT 24
36 #define I82860_ERRSTS 0xC8 36 #define I82860_ERRSTS 0xC8
37 #define I82860_EAP 0xE4 37 #define I82860_EAP 0xE4
38 #define I82860_DERRCTL_STS 0xE2 38 #define I82860_DERRCTL_STS 0xE2
39 39
40 enum i82860_chips { 40 enum i82860_chips {
41 I82860 = 0, 41 I82860 = 0,
42 }; 42 };
43 43
44 struct i82860_dev_info { 44 struct i82860_dev_info {
45 const char *ctl_name; 45 const char *ctl_name;
46 }; 46 };
47 47
48 struct i82860_error_info { 48 struct i82860_error_info {
49 u16 errsts; 49 u16 errsts;
50 u32 eap; 50 u32 eap;
51 u16 derrsyn; 51 u16 derrsyn;
52 u16 errsts2; 52 u16 errsts2;
53 }; 53 };
54 54
55 static const struct i82860_dev_info i82860_devs[] = { 55 static const struct i82860_dev_info i82860_devs[] = {
56 [I82860] = { 56 [I82860] = {
57 .ctl_name = "i82860"}, 57 .ctl_name = "i82860"},
58 }; 58 };
59 59
60 static struct pci_dev *mci_pdev; /* init dev: in case that AGP code 60 static struct pci_dev *mci_pdev; /* init dev: in case that AGP code
61 * has already registered driver 61 * has already registered driver
62 */ 62 */
63 static struct edac_pci_ctl_info *i82860_pci; 63 static struct edac_pci_ctl_info *i82860_pci;
64 64
65 static void i82860_get_error_info(struct mem_ctl_info *mci, 65 static void i82860_get_error_info(struct mem_ctl_info *mci,
66 struct i82860_error_info *info) 66 struct i82860_error_info *info)
67 { 67 {
68 struct pci_dev *pdev; 68 struct pci_dev *pdev;
69 69
70 pdev = to_pci_dev(mci->pdev); 70 pdev = to_pci_dev(mci->pdev);
71 71
72 /* 72 /*
73 * This is a mess because there is no atomic way to read all the 73 * This is a mess because there is no atomic way to read all the
74 * registers at once and the registers can transition from CE being 74 * registers at once and the registers can transition from CE being
75 * overwritten by UE. 75 * overwritten by UE.
76 */ 76 */
77 pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts); 77 pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts);
78 pci_read_config_dword(pdev, I82860_EAP, &info->eap); 78 pci_read_config_dword(pdev, I82860_EAP, &info->eap);
79 pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn); 79 pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn);
80 pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts2); 80 pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts2);
81 81
82 pci_write_bits16(pdev, I82860_ERRSTS, 0x0003, 0x0003); 82 pci_write_bits16(pdev, I82860_ERRSTS, 0x0003, 0x0003);
83 83
84 /* 84 /*
85 * If the error is the same for both reads then the first set of reads 85 * If the error is the same for both reads then the first set of reads
86 * is valid. If there is a change then there is a CE no info and the 86 * is valid. If there is a change then there is a CE no info and the
87 * second set of reads is valid and should be UE info. 87 * second set of reads is valid and should be UE info.
88 */ 88 */
89 if (!(info->errsts2 & 0x0003)) 89 if (!(info->errsts2 & 0x0003))
90 return; 90 return;
91 91
92 if ((info->errsts ^ info->errsts2) & 0x0003) { 92 if ((info->errsts ^ info->errsts2) & 0x0003) {
93 pci_read_config_dword(pdev, I82860_EAP, &info->eap); 93 pci_read_config_dword(pdev, I82860_EAP, &info->eap);
94 pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn); 94 pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn);
95 } 95 }
96 } 96 }
97 97
98 static int i82860_process_error_info(struct mem_ctl_info *mci, 98 static int i82860_process_error_info(struct mem_ctl_info *mci,
99 struct i82860_error_info *info, 99 struct i82860_error_info *info,
100 int handle_errors) 100 int handle_errors)
101 { 101 {
102 struct dimm_info *dimm; 102 struct dimm_info *dimm;
103 int row; 103 int row;
104 104
105 if (!(info->errsts2 & 0x0003)) 105 if (!(info->errsts2 & 0x0003))
106 return 0; 106 return 0;
107 107
108 if (!handle_errors) 108 if (!handle_errors)
109 return 1; 109 return 1;
110 110
111 if ((info->errsts ^ info->errsts2) & 0x0003) { 111 if ((info->errsts ^ info->errsts2) & 0x0003) {
112 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 112 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
113 -1, -1, -1, "UE overwrote CE", "", NULL); 113 -1, -1, -1, "UE overwrote CE", "", NULL);
114 info->errsts = info->errsts2; 114 info->errsts = info->errsts2;
115 } 115 }
116 116
117 info->eap >>= PAGE_SHIFT; 117 info->eap >>= PAGE_SHIFT;
118 row = edac_mc_find_csrow_by_page(mci, info->eap); 118 row = edac_mc_find_csrow_by_page(mci, info->eap);
119 dimm = mci->csrows[row].channels[0].dimm; 119 dimm = mci->csrows[row]->channels[0]->dimm;
120 120
121 if (info->errsts & 0x0002) 121 if (info->errsts & 0x0002)
122 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 122 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
123 info->eap, 0, 0, 123 info->eap, 0, 0,
124 dimm->location[0], dimm->location[1], -1, 124 dimm->location[0], dimm->location[1], -1,
125 "i82860 UE", "", NULL); 125 "i82860 UE", "", NULL);
126 else 126 else
127 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 127 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
128 info->eap, 0, info->derrsyn, 128 info->eap, 0, info->derrsyn,
129 dimm->location[0], dimm->location[1], -1, 129 dimm->location[0], dimm->location[1], -1,
130 "i82860 CE", "", NULL); 130 "i82860 CE", "", NULL);
131 131
132 return 1; 132 return 1;
133 } 133 }
134 134
135 static void i82860_check(struct mem_ctl_info *mci) 135 static void i82860_check(struct mem_ctl_info *mci)
136 { 136 {
137 struct i82860_error_info info; 137 struct i82860_error_info info;
138 138
139 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 139 debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
140 i82860_get_error_info(mci, &info); 140 i82860_get_error_info(mci, &info);
141 i82860_process_error_info(mci, &info, 1); 141 i82860_process_error_info(mci, &info, 1);
142 } 142 }
143 143
144 static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev) 144 static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
145 { 145 {
146 unsigned long last_cumul_size; 146 unsigned long last_cumul_size;
147 u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */ 147 u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
148 u16 value; 148 u16 value;
149 u32 cumul_size; 149 u32 cumul_size;
150 struct csrow_info *csrow; 150 struct csrow_info *csrow;
151 struct dimm_info *dimm; 151 struct dimm_info *dimm;
152 int index; 152 int index;
153 153
154 pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim); 154 pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim);
155 mchcfg_ddim = mchcfg_ddim & 0x180; 155 mchcfg_ddim = mchcfg_ddim & 0x180;
156 last_cumul_size = 0; 156 last_cumul_size = 0;
157 157
158 /* The group row boundary (GRA) reg values are boundary address 158 /* The group row boundary (GRA) reg values are boundary address
159 * for each DRAM row with a granularity of 16MB. GRA regs are 159 * for each DRAM row with a granularity of 16MB. GRA regs are
160 * cumulative; therefore GRA15 will contain the total memory contained 160 * cumulative; therefore GRA15 will contain the total memory contained
161 * in all eight rows. 161 * in all eight rows.
162 */ 162 */
163 for (index = 0; index < mci->nr_csrows; index++) { 163 for (index = 0; index < mci->nr_csrows; index++) {
164 csrow = &mci->csrows[index]; 164 csrow = mci->csrows[index];
165 dimm = csrow->channels[0].dimm; 165 dimm = csrow->channels[0]->dimm;
166 166
167 pci_read_config_word(pdev, I82860_GBA + index * 2, &value); 167 pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
168 cumul_size = (value & I82860_GBA_MASK) << 168 cumul_size = (value & I82860_GBA_MASK) <<
169 (I82860_GBA_SHIFT - PAGE_SHIFT); 169 (I82860_GBA_SHIFT - PAGE_SHIFT);
170 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 170 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
171 cumul_size); 171 cumul_size);
172 172
173 if (cumul_size == last_cumul_size) 173 if (cumul_size == last_cumul_size)
174 continue; /* not populated */ 174 continue; /* not populated */
175 175
176 csrow->first_page = last_cumul_size; 176 csrow->first_page = last_cumul_size;
177 csrow->last_page = cumul_size - 1; 177 csrow->last_page = cumul_size - 1;
178 dimm->nr_pages = cumul_size - last_cumul_size; 178 dimm->nr_pages = cumul_size - last_cumul_size;
179 last_cumul_size = cumul_size; 179 last_cumul_size = cumul_size;
180 dimm->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */ 180 dimm->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
181 dimm->mtype = MEM_RMBS; 181 dimm->mtype = MEM_RMBS;
182 dimm->dtype = DEV_UNKNOWN; 182 dimm->dtype = DEV_UNKNOWN;
183 dimm->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE; 183 dimm->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
184 } 184 }
185 } 185 }
186 186
187 static int i82860_probe1(struct pci_dev *pdev, int dev_idx) 187 static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
188 { 188 {
189 struct mem_ctl_info *mci; 189 struct mem_ctl_info *mci;
190 struct edac_mc_layer layers[2]; 190 struct edac_mc_layer layers[2];
191 struct i82860_error_info discard; 191 struct i82860_error_info discard;
192 192
193 /* 193 /*
194 * RDRAM has channels but these don't map onto the csrow abstraction. 194 * RDRAM has channels but these don't map onto the csrow abstraction.
195 * According with the datasheet, there are 2 Rambus channels, supporting 195 * According with the datasheet, there are 2 Rambus channels, supporting
196 * up to 16 direct RDRAM devices. 196 * up to 16 direct RDRAM devices.
197 * The device groups from the GRA registers seem to map reasonably 197 * The device groups from the GRA registers seem to map reasonably
198 * well onto the notion of a chip select row. 198 * well onto the notion of a chip select row.
199 * There are 16 GRA registers and since the name is associated with 199 * There are 16 GRA registers and since the name is associated with
200 * the channel and the GRA registers map to physical devices so we are 200 * the channel and the GRA registers map to physical devices so we are
201 * going to make 1 channel for group. 201 * going to make 1 channel for group.
202 */ 202 */
203 layers[0].type = EDAC_MC_LAYER_CHANNEL; 203 layers[0].type = EDAC_MC_LAYER_CHANNEL;
204 layers[0].size = 2; 204 layers[0].size = 2;
205 layers[0].is_virt_csrow = true; 205 layers[0].is_virt_csrow = true;
206 layers[1].type = EDAC_MC_LAYER_SLOT; 206 layers[1].type = EDAC_MC_LAYER_SLOT;
207 layers[1].size = 8; 207 layers[1].size = 8;
208 layers[1].is_virt_csrow = true; 208 layers[1].is_virt_csrow = true;
209 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); 209 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
210 if (!mci) 210 if (!mci)
211 return -ENOMEM; 211 return -ENOMEM;
212 212
213 debugf3("%s(): init mci\n", __func__); 213 debugf3("%s(): init mci\n", __func__);
214 mci->pdev = &pdev->dev; 214 mci->pdev = &pdev->dev;
215 mci->mtype_cap = MEM_FLAG_DDR; 215 mci->mtype_cap = MEM_FLAG_DDR;
216 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 216 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
217 /* I"m not sure about this but I think that all RDRAM is SECDED */ 217 /* I"m not sure about this but I think that all RDRAM is SECDED */
218 mci->edac_cap = EDAC_FLAG_SECDED; 218 mci->edac_cap = EDAC_FLAG_SECDED;
219 mci->mod_name = EDAC_MOD_STR; 219 mci->mod_name = EDAC_MOD_STR;
220 mci->mod_ver = I82860_REVISION; 220 mci->mod_ver = I82860_REVISION;
221 mci->ctl_name = i82860_devs[dev_idx].ctl_name; 221 mci->ctl_name = i82860_devs[dev_idx].ctl_name;
222 mci->dev_name = pci_name(pdev); 222 mci->dev_name = pci_name(pdev);
223 mci->edac_check = i82860_check; 223 mci->edac_check = i82860_check;
224 mci->ctl_page_to_phys = NULL; 224 mci->ctl_page_to_phys = NULL;
225 i82860_init_csrows(mci, pdev); 225 i82860_init_csrows(mci, pdev);
226 i82860_get_error_info(mci, &discard); /* clear counters */ 226 i82860_get_error_info(mci, &discard); /* clear counters */
227 227
228 /* Here we assume that we will never see multiple instances of this 228 /* Here we assume that we will never see multiple instances of this
229 * type of memory controller. The ID is therefore hardcoded to 0. 229 * type of memory controller. The ID is therefore hardcoded to 0.
230 */ 230 */
231 if (edac_mc_add_mc(mci)) { 231 if (edac_mc_add_mc(mci)) {
232 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 232 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
233 goto fail; 233 goto fail;
234 } 234 }
235 235
236 /* allocating generic PCI control info */ 236 /* allocating generic PCI control info */
237 i82860_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); 237 i82860_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
238 if (!i82860_pci) { 238 if (!i82860_pci) {
239 printk(KERN_WARNING 239 printk(KERN_WARNING
240 "%s(): Unable to create PCI control\n", 240 "%s(): Unable to create PCI control\n",
241 __func__); 241 __func__);
242 printk(KERN_WARNING 242 printk(KERN_WARNING
243 "%s(): PCI error report via EDAC not setup\n", 243 "%s(): PCI error report via EDAC not setup\n",
244 __func__); 244 __func__);
245 } 245 }
246 246
247 /* get this far and it's successful */ 247 /* get this far and it's successful */
248 debugf3("%s(): success\n", __func__); 248 debugf3("%s(): success\n", __func__);
249 249
250 return 0; 250 return 0;
251 251
252 fail: 252 fail:
253 edac_mc_free(mci); 253 edac_mc_free(mci);
254 return -ENODEV; 254 return -ENODEV;
255 } 255 }
256 256
257 /* returns count (>= 0), or negative on error */ 257 /* returns count (>= 0), or negative on error */
258 static int __devinit i82860_init_one(struct pci_dev *pdev, 258 static int __devinit i82860_init_one(struct pci_dev *pdev,
259 const struct pci_device_id *ent) 259 const struct pci_device_id *ent)
260 { 260 {
261 int rc; 261 int rc;
262 262
263 debugf0("%s()\n", __func__); 263 debugf0("%s()\n", __func__);
264 i82860_printk(KERN_INFO, "i82860 init one\n"); 264 i82860_printk(KERN_INFO, "i82860 init one\n");
265 265
266 if (pci_enable_device(pdev) < 0) 266 if (pci_enable_device(pdev) < 0)
267 return -EIO; 267 return -EIO;
268 268
269 rc = i82860_probe1(pdev, ent->driver_data); 269 rc = i82860_probe1(pdev, ent->driver_data);
270 270
271 if (rc == 0) 271 if (rc == 0)
272 mci_pdev = pci_dev_get(pdev); 272 mci_pdev = pci_dev_get(pdev);
273 273
274 return rc; 274 return rc;
275 } 275 }
276 276
277 static void __devexit i82860_remove_one(struct pci_dev *pdev) 277 static void __devexit i82860_remove_one(struct pci_dev *pdev)
278 { 278 {
279 struct mem_ctl_info *mci; 279 struct mem_ctl_info *mci;
280 280
281 debugf0("%s()\n", __func__); 281 debugf0("%s()\n", __func__);
282 282
283 if (i82860_pci) 283 if (i82860_pci)
284 edac_pci_release_generic_ctl(i82860_pci); 284 edac_pci_release_generic_ctl(i82860_pci);
285 285
286 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) 286 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
287 return; 287 return;
288 288
289 edac_mc_free(mci); 289 edac_mc_free(mci);
290 } 290 }
291 291
292 static DEFINE_PCI_DEVICE_TABLE(i82860_pci_tbl) = { 292 static DEFINE_PCI_DEVICE_TABLE(i82860_pci_tbl) = {
293 { 293 {
294 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 294 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
295 I82860}, 295 I82860},
296 { 296 {
297 0, 297 0,
298 } /* 0 terminated list. */ 298 } /* 0 terminated list. */
299 }; 299 };
300 300
301 MODULE_DEVICE_TABLE(pci, i82860_pci_tbl); 301 MODULE_DEVICE_TABLE(pci, i82860_pci_tbl);
302 302
303 static struct pci_driver i82860_driver = { 303 static struct pci_driver i82860_driver = {
304 .name = EDAC_MOD_STR, 304 .name = EDAC_MOD_STR,
305 .probe = i82860_init_one, 305 .probe = i82860_init_one,
306 .remove = __devexit_p(i82860_remove_one), 306 .remove = __devexit_p(i82860_remove_one),
307 .id_table = i82860_pci_tbl, 307 .id_table = i82860_pci_tbl,
308 }; 308 };
309 309
310 static int __init i82860_init(void) 310 static int __init i82860_init(void)
311 { 311 {
312 int pci_rc; 312 int pci_rc;
313 313
314 debugf3("%s()\n", __func__); 314 debugf3("%s()\n", __func__);
315 315
316 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 316 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
317 opstate_init(); 317 opstate_init();
318 318
319 if ((pci_rc = pci_register_driver(&i82860_driver)) < 0) 319 if ((pci_rc = pci_register_driver(&i82860_driver)) < 0)
320 goto fail0; 320 goto fail0;
321 321
322 if (!mci_pdev) { 322 if (!mci_pdev) {
323 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 323 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
324 PCI_DEVICE_ID_INTEL_82860_0, NULL); 324 PCI_DEVICE_ID_INTEL_82860_0, NULL);
325 325
326 if (mci_pdev == NULL) { 326 if (mci_pdev == NULL) {
327 debugf0("860 pci_get_device fail\n"); 327 debugf0("860 pci_get_device fail\n");
328 pci_rc = -ENODEV; 328 pci_rc = -ENODEV;
329 goto fail1; 329 goto fail1;
330 } 330 }
331 331
332 pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl); 332 pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl);
333 333
334 if (pci_rc < 0) { 334 if (pci_rc < 0) {
335 debugf0("860 init fail\n"); 335 debugf0("860 init fail\n");
336 pci_rc = -ENODEV; 336 pci_rc = -ENODEV;
337 goto fail1; 337 goto fail1;
338 } 338 }
339 } 339 }
340 340
341 return 0; 341 return 0;
342 342
343 fail1: 343 fail1:
344 pci_unregister_driver(&i82860_driver); 344 pci_unregister_driver(&i82860_driver);
345 345
346 fail0: 346 fail0:
347 if (mci_pdev != NULL) 347 if (mci_pdev != NULL)
348 pci_dev_put(mci_pdev); 348 pci_dev_put(mci_pdev);
349 349
350 return pci_rc; 350 return pci_rc;
351 } 351 }
352 352
353 static void __exit i82860_exit(void) 353 static void __exit i82860_exit(void)
354 { 354 {
355 debugf3("%s()\n", __func__); 355 debugf3("%s()\n", __func__);
356 356
357 pci_unregister_driver(&i82860_driver); 357 pci_unregister_driver(&i82860_driver);
358 358
359 if (mci_pdev != NULL) 359 if (mci_pdev != NULL)
360 pci_dev_put(mci_pdev); 360 pci_dev_put(mci_pdev);
361 } 361 }
362 362
363 module_init(i82860_init); 363 module_init(i82860_init);
364 module_exit(i82860_exit); 364 module_exit(i82860_exit);
365 365
366 MODULE_LICENSE("GPL"); 366 MODULE_LICENSE("GPL");
367 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) " 367 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) "
368 "Ben Woodard <woodard@redhat.com>"); 368 "Ben Woodard <woodard@redhat.com>");
369 MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers"); 369 MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
370 370
371 module_param(edac_op_state, int, 0444); 371 module_param(edac_op_state, int, 0444);
372 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 372 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
373 373
drivers/edac/i82875p_edac.c
1 /* 1 /*
2 * Intel D82875P Memory Controller kernel module 2 * Intel D82875P Memory Controller kernel module
3 * (C) 2003 Linux Networx (http://lnxi.com) 3 * (C) 2003 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the 4 * This file may be distributed under the terms of the
5 * GNU General Public License. 5 * GNU General Public License.
6 * 6 *
7 * Written by Thayne Harbaugh 7 * Written by Thayne Harbaugh
8 * Contributors: 8 * Contributors:
9 * Wang Zhenyu at intel.com 9 * Wang Zhenyu at intel.com
10 * 10 *
11 * $Id: edac_i82875p.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $ 11 * $Id: edac_i82875p.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
12 * 12 *
13 * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com 13 * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com
14 */ 14 */
15 15
16 #include <linux/module.h> 16 #include <linux/module.h>
17 #include <linux/init.h> 17 #include <linux/init.h>
18 #include <linux/pci.h> 18 #include <linux/pci.h>
19 #include <linux/pci_ids.h> 19 #include <linux/pci_ids.h>
20 #include <linux/edac.h> 20 #include <linux/edac.h>
21 #include "edac_core.h" 21 #include "edac_core.h"
22 22
23 #define I82875P_REVISION " Ver: 2.0.2" 23 #define I82875P_REVISION " Ver: 2.0.2"
24 #define EDAC_MOD_STR "i82875p_edac" 24 #define EDAC_MOD_STR "i82875p_edac"
25 25
26 #define i82875p_printk(level, fmt, arg...) \ 26 #define i82875p_printk(level, fmt, arg...) \
27 edac_printk(level, "i82875p", fmt, ##arg) 27 edac_printk(level, "i82875p", fmt, ##arg)
28 28
29 #define i82875p_mc_printk(mci, level, fmt, arg...) \ 29 #define i82875p_mc_printk(mci, level, fmt, arg...) \
30 edac_mc_chipset_printk(mci, level, "i82875p", fmt, ##arg) 30 edac_mc_chipset_printk(mci, level, "i82875p", fmt, ##arg)
31 31
32 #ifndef PCI_DEVICE_ID_INTEL_82875_0 32 #ifndef PCI_DEVICE_ID_INTEL_82875_0
33 #define PCI_DEVICE_ID_INTEL_82875_0 0x2578 33 #define PCI_DEVICE_ID_INTEL_82875_0 0x2578
34 #endif /* PCI_DEVICE_ID_INTEL_82875_0 */ 34 #endif /* PCI_DEVICE_ID_INTEL_82875_0 */
35 35
36 #ifndef PCI_DEVICE_ID_INTEL_82875_6 36 #ifndef PCI_DEVICE_ID_INTEL_82875_6
37 #define PCI_DEVICE_ID_INTEL_82875_6 0x257e 37 #define PCI_DEVICE_ID_INTEL_82875_6 0x257e
38 #endif /* PCI_DEVICE_ID_INTEL_82875_6 */ 38 #endif /* PCI_DEVICE_ID_INTEL_82875_6 */
39 39
40 /* four csrows in dual channel, eight in single channel */ 40 /* four csrows in dual channel, eight in single channel */
41 #define I82875P_NR_DIMMS 8 41 #define I82875P_NR_DIMMS 8
42 #define I82875P_NR_CSROWS(nr_chans) (I82875P_NR_DIMMS / (nr_chans)) 42 #define I82875P_NR_CSROWS(nr_chans) (I82875P_NR_DIMMS / (nr_chans))
43 43
44 /* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */ 44 /* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
45 #define I82875P_EAP 0x58 /* Error Address Pointer (32b) 45 #define I82875P_EAP 0x58 /* Error Address Pointer (32b)
46 * 46 *
47 * 31:12 block address 47 * 31:12 block address
48 * 11:0 reserved 48 * 11:0 reserved
49 */ 49 */
50 50
51 #define I82875P_DERRSYN 0x5c /* DRAM Error Syndrome (8b) 51 #define I82875P_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
52 * 52 *
53 * 7:0 DRAM ECC Syndrome 53 * 7:0 DRAM ECC Syndrome
54 */ 54 */
55 55
56 #define I82875P_DES 0x5d /* DRAM Error Status (8b) 56 #define I82875P_DES 0x5d /* DRAM Error Status (8b)
57 * 57 *
58 * 7:1 reserved 58 * 7:1 reserved
59 * 0 Error channel 0/1 59 * 0 Error channel 0/1
60 */ 60 */
61 61
62 #define I82875P_ERRSTS 0xc8 /* Error Status Register (16b) 62 #define I82875P_ERRSTS 0xc8 /* Error Status Register (16b)
63 * 63 *
64 * 15:10 reserved 64 * 15:10 reserved
65 * 9 non-DRAM lock error (ndlock) 65 * 9 non-DRAM lock error (ndlock)
66 * 8 Sftwr Generated SMI 66 * 8 Sftwr Generated SMI
67 * 7 ECC UE 67 * 7 ECC UE
68 * 6 reserved 68 * 6 reserved
69 * 5 MCH detects unimplemented cycle 69 * 5 MCH detects unimplemented cycle
70 * 4 AGP access outside GA 70 * 4 AGP access outside GA
71 * 3 Invalid AGP access 71 * 3 Invalid AGP access
72 * 2 Invalid GA translation table 72 * 2 Invalid GA translation table
73 * 1 Unsupported AGP command 73 * 1 Unsupported AGP command
74 * 0 ECC CE 74 * 0 ECC CE
75 */ 75 */
76 76
77 #define I82875P_ERRCMD 0xca /* Error Command (16b) 77 #define I82875P_ERRCMD 0xca /* Error Command (16b)
78 * 78 *
79 * 15:10 reserved 79 * 15:10 reserved
80 * 9 SERR on non-DRAM lock 80 * 9 SERR on non-DRAM lock
81 * 8 SERR on ECC UE 81 * 8 SERR on ECC UE
82 * 7 SERR on ECC CE 82 * 7 SERR on ECC CE
83 * 6 target abort on high exception 83 * 6 target abort on high exception
84 * 5 detect unimplemented cyc 84 * 5 detect unimplemented cyc
85 * 4 AGP access outside of GA 85 * 4 AGP access outside of GA
86 * 3 SERR on invalid AGP access 86 * 3 SERR on invalid AGP access
87 * 2 invalid translation table 87 * 2 invalid translation table
88 * 1 SERR on unsupported AGP command 88 * 1 SERR on unsupported AGP command
89 * 0 reserved 89 * 0 reserved
90 */ 90 */
91 91
92 /* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */ 92 /* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */
93 #define I82875P_PCICMD6 0x04 /* PCI Command Register (16b) 93 #define I82875P_PCICMD6 0x04 /* PCI Command Register (16b)
94 * 94 *
95 * 15:10 reserved 95 * 15:10 reserved
96 * 9 fast back-to-back - ro 0 96 * 9 fast back-to-back - ro 0
97 * 8 SERR enable - ro 0 97 * 8 SERR enable - ro 0
98 * 7 addr/data stepping - ro 0 98 * 7 addr/data stepping - ro 0
99 * 6 parity err enable - ro 0 99 * 6 parity err enable - ro 0
100 * 5 VGA palette snoop - ro 0 100 * 5 VGA palette snoop - ro 0
101 * 4 mem wr & invalidate - ro 0 101 * 4 mem wr & invalidate - ro 0
102 * 3 special cycle - ro 0 102 * 3 special cycle - ro 0
103 * 2 bus master - ro 0 103 * 2 bus master - ro 0
104 * 1 mem access dev6 - 0(dis),1(en) 104 * 1 mem access dev6 - 0(dis),1(en)
105 * 0 IO access dev3 - 0(dis),1(en) 105 * 0 IO access dev3 - 0(dis),1(en)
106 */ 106 */
107 107
108 #define I82875P_BAR6 0x10 /* Mem Delays Base ADDR Reg (32b) 108 #define I82875P_BAR6 0x10 /* Mem Delays Base ADDR Reg (32b)
109 * 109 *
110 * 31:12 mem base addr [31:12] 110 * 31:12 mem base addr [31:12]
111 * 11:4 address mask - ro 0 111 * 11:4 address mask - ro 0
112 * 3 prefetchable - ro 0(non),1(pre) 112 * 3 prefetchable - ro 0(non),1(pre)
113 * 2:1 mem type - ro 0 113 * 2:1 mem type - ro 0
114 * 0 mem space - ro 0 114 * 0 mem space - ro 0
115 */ 115 */
116 116
117 /* Intel 82875p MMIO register space - device 0 function 0 - MMR space */ 117 /* Intel 82875p MMIO register space - device 0 function 0 - MMR space */
118 118
119 #define I82875P_DRB_SHIFT 26 /* 64MiB grain */ 119 #define I82875P_DRB_SHIFT 26 /* 64MiB grain */
120 #define I82875P_DRB 0x00 /* DRAM Row Boundary (8b x 8) 120 #define I82875P_DRB 0x00 /* DRAM Row Boundary (8b x 8)
121 * 121 *
122 * 7 reserved 122 * 7 reserved
123 * 6:0 64MiB row boundary addr 123 * 6:0 64MiB row boundary addr
124 */ 124 */
125 125
126 #define I82875P_DRA 0x10 /* DRAM Row Attribute (4b x 8) 126 #define I82875P_DRA 0x10 /* DRAM Row Attribute (4b x 8)
127 * 127 *
128 * 7 reserved 128 * 7 reserved
129 * 6:4 row attr row 1 129 * 6:4 row attr row 1
130 * 3 reserved 130 * 3 reserved
131 * 2:0 row attr row 0 131 * 2:0 row attr row 0
132 * 132 *
133 * 000 = 4KiB 133 * 000 = 4KiB
134 * 001 = 8KiB 134 * 001 = 8KiB
135 * 010 = 16KiB 135 * 010 = 16KiB
136 * 011 = 32KiB 136 * 011 = 32KiB
137 */ 137 */
138 138
139 #define I82875P_DRC 0x68 /* DRAM Controller Mode (32b) 139 #define I82875P_DRC 0x68 /* DRAM Controller Mode (32b)
140 * 140 *
141 * 31:30 reserved 141 * 31:30 reserved
142 * 29 init complete 142 * 29 init complete
143 * 28:23 reserved 143 * 28:23 reserved
144 * 22:21 nr chan 00=1,01=2 144 * 22:21 nr chan 00=1,01=2
145 * 20 reserved 145 * 20 reserved
146 * 19:18 Data Integ Mode 00=none,01=ecc 146 * 19:18 Data Integ Mode 00=none,01=ecc
147 * 17:11 reserved 147 * 17:11 reserved
148 * 10:8 refresh mode 148 * 10:8 refresh mode
149 * 7 reserved 149 * 7 reserved
150 * 6:4 mode select 150 * 6:4 mode select
151 * 3:2 reserved 151 * 3:2 reserved
152 * 1:0 DRAM type 01=DDR 152 * 1:0 DRAM type 01=DDR
153 */ 153 */
154 154
155 enum i82875p_chips { 155 enum i82875p_chips {
156 I82875P = 0, 156 I82875P = 0,
157 }; 157 };
158 158
159 struct i82875p_pvt { 159 struct i82875p_pvt {
160 struct pci_dev *ovrfl_pdev; 160 struct pci_dev *ovrfl_pdev;
161 void __iomem *ovrfl_window; 161 void __iomem *ovrfl_window;
162 }; 162 };
163 163
164 struct i82875p_dev_info { 164 struct i82875p_dev_info {
165 const char *ctl_name; 165 const char *ctl_name;
166 }; 166 };
167 167
168 struct i82875p_error_info { 168 struct i82875p_error_info {
169 u16 errsts; 169 u16 errsts;
170 u32 eap; 170 u32 eap;
171 u8 des; 171 u8 des;
172 u8 derrsyn; 172 u8 derrsyn;
173 u16 errsts2; 173 u16 errsts2;
174 }; 174 };
175 175
176 static const struct i82875p_dev_info i82875p_devs[] = { 176 static const struct i82875p_dev_info i82875p_devs[] = {
177 [I82875P] = { 177 [I82875P] = {
178 .ctl_name = "i82875p"}, 178 .ctl_name = "i82875p"},
179 }; 179 };
180 180
181 static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has 181 static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has
182 * already registered driver 182 * already registered driver
183 */ 183 */
184 184
185 static struct edac_pci_ctl_info *i82875p_pci; 185 static struct edac_pci_ctl_info *i82875p_pci;
186 186
187 static void i82875p_get_error_info(struct mem_ctl_info *mci, 187 static void i82875p_get_error_info(struct mem_ctl_info *mci,
188 struct i82875p_error_info *info) 188 struct i82875p_error_info *info)
189 { 189 {
190 struct pci_dev *pdev; 190 struct pci_dev *pdev;
191 191
192 pdev = to_pci_dev(mci->pdev); 192 pdev = to_pci_dev(mci->pdev);
193 193
194 /* 194 /*
195 * This is a mess because there is no atomic way to read all the 195 * This is a mess because there is no atomic way to read all the
196 * registers at once and the registers can transition from CE being 196 * registers at once and the registers can transition from CE being
197 * overwritten by UE. 197 * overwritten by UE.
198 */ 198 */
199 pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts); 199 pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts);
200 200
201 if (!(info->errsts & 0x0081)) 201 if (!(info->errsts & 0x0081))
202 return; 202 return;
203 203
204 pci_read_config_dword(pdev, I82875P_EAP, &info->eap); 204 pci_read_config_dword(pdev, I82875P_EAP, &info->eap);
205 pci_read_config_byte(pdev, I82875P_DES, &info->des); 205 pci_read_config_byte(pdev, I82875P_DES, &info->des);
206 pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn); 206 pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn);
207 pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts2); 207 pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts2);
208 208
209 /* 209 /*
210 * If the error is the same then we can for both reads then 210 * If the error is the same then we can for both reads then
211 * the first set of reads is valid. If there is a change then 211 * the first set of reads is valid. If there is a change then
212 * there is a CE no info and the second set of reads is valid 212 * there is a CE no info and the second set of reads is valid
213 * and should be UE info. 213 * and should be UE info.
214 */ 214 */
215 if ((info->errsts ^ info->errsts2) & 0x0081) { 215 if ((info->errsts ^ info->errsts2) & 0x0081) {
216 pci_read_config_dword(pdev, I82875P_EAP, &info->eap); 216 pci_read_config_dword(pdev, I82875P_EAP, &info->eap);
217 pci_read_config_byte(pdev, I82875P_DES, &info->des); 217 pci_read_config_byte(pdev, I82875P_DES, &info->des);
218 pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn); 218 pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn);
219 } 219 }
220 220
221 pci_write_bits16(pdev, I82875P_ERRSTS, 0x0081, 0x0081); 221 pci_write_bits16(pdev, I82875P_ERRSTS, 0x0081, 0x0081);
222 } 222 }
223 223
224 static int i82875p_process_error_info(struct mem_ctl_info *mci, 224 static int i82875p_process_error_info(struct mem_ctl_info *mci,
225 struct i82875p_error_info *info, 225 struct i82875p_error_info *info,
226 int handle_errors) 226 int handle_errors)
227 { 227 {
228 int row, multi_chan; 228 int row, multi_chan;
229 229
230 multi_chan = mci->csrows[0].nr_channels - 1; 230 multi_chan = mci->csrows[0]->nr_channels - 1;
231 231
232 if (!(info->errsts & 0x0081)) 232 if (!(info->errsts & 0x0081))
233 return 0; 233 return 0;
234 234
235 if (!handle_errors) 235 if (!handle_errors)
236 return 1; 236 return 1;
237 237
238 if ((info->errsts ^ info->errsts2) & 0x0081) { 238 if ((info->errsts ^ info->errsts2) & 0x0081) {
239 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 239 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
240 -1, -1, -1, 240 -1, -1, -1,
241 "UE overwrote CE", "", NULL); 241 "UE overwrote CE", "", NULL);
242 info->errsts = info->errsts2; 242 info->errsts = info->errsts2;
243 } 243 }
244 244
245 info->eap >>= PAGE_SHIFT; 245 info->eap >>= PAGE_SHIFT;
246 row = edac_mc_find_csrow_by_page(mci, info->eap); 246 row = edac_mc_find_csrow_by_page(mci, info->eap);
247 247
248 if (info->errsts & 0x0080) 248 if (info->errsts & 0x0080)
249 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 249 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
250 info->eap, 0, 0, 250 info->eap, 0, 0,
251 row, -1, -1, 251 row, -1, -1,
252 "i82875p UE", "", NULL); 252 "i82875p UE", "", NULL);
253 else 253 else
254 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 254 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
255 info->eap, 0, info->derrsyn, 255 info->eap, 0, info->derrsyn,
256 row, multi_chan ? (info->des & 0x1) : 0, 256 row, multi_chan ? (info->des & 0x1) : 0,
257 -1, "i82875p CE", "", NULL); 257 -1, "i82875p CE", "", NULL);
258 258
259 return 1; 259 return 1;
260 } 260 }
261 261
262 static void i82875p_check(struct mem_ctl_info *mci) 262 static void i82875p_check(struct mem_ctl_info *mci)
263 { 263 {
264 struct i82875p_error_info info; 264 struct i82875p_error_info info;
265 265
266 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 266 debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
267 i82875p_get_error_info(mci, &info); 267 i82875p_get_error_info(mci, &info);
268 i82875p_process_error_info(mci, &info, 1); 268 i82875p_process_error_info(mci, &info, 1);
269 } 269 }
270 270
271 /* Return 0 on success or 1 on failure. */ 271 /* Return 0 on success or 1 on failure. */
272 static int i82875p_setup_overfl_dev(struct pci_dev *pdev, 272 static int i82875p_setup_overfl_dev(struct pci_dev *pdev,
273 struct pci_dev **ovrfl_pdev, 273 struct pci_dev **ovrfl_pdev,
274 void __iomem **ovrfl_window) 274 void __iomem **ovrfl_window)
275 { 275 {
276 struct pci_dev *dev; 276 struct pci_dev *dev;
277 void __iomem *window; 277 void __iomem *window;
278 int err; 278 int err;
279 279
280 *ovrfl_pdev = NULL; 280 *ovrfl_pdev = NULL;
281 *ovrfl_window = NULL; 281 *ovrfl_window = NULL;
282 dev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); 282 dev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
283 283
284 if (dev == NULL) { 284 if (dev == NULL) {
285 /* Intel tells BIOS developers to hide device 6 which 285 /* Intel tells BIOS developers to hide device 6 which
286 * configures the overflow device access containing 286 * configures the overflow device access containing
287 * the DRBs - this is where we expose device 6. 287 * the DRBs - this is where we expose device 6.
288 * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm 288 * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
289 */ 289 */
290 pci_write_bits8(pdev, 0xf4, 0x2, 0x2); 290 pci_write_bits8(pdev, 0xf4, 0x2, 0x2);
291 dev = pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0)); 291 dev = pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
292 292
293 if (dev == NULL) 293 if (dev == NULL)
294 return 1; 294 return 1;
295 295
296 err = pci_bus_add_device(dev); 296 err = pci_bus_add_device(dev);
297 if (err) { 297 if (err) {
298 i82875p_printk(KERN_ERR, 298 i82875p_printk(KERN_ERR,
299 "%s(): pci_bus_add_device() Failed\n", 299 "%s(): pci_bus_add_device() Failed\n",
300 __func__); 300 __func__);
301 } 301 }
302 pci_bus_assign_resources(dev->bus); 302 pci_bus_assign_resources(dev->bus);
303 } 303 }
304 304
305 *ovrfl_pdev = dev; 305 *ovrfl_pdev = dev;
306 306
307 if (pci_enable_device(dev)) { 307 if (pci_enable_device(dev)) {
308 i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow " 308 i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow "
309 "device\n", __func__); 309 "device\n", __func__);
310 return 1; 310 return 1;
311 } 311 }
312 312
313 if (pci_request_regions(dev, pci_name(dev))) { 313 if (pci_request_regions(dev, pci_name(dev))) {
314 #ifdef CORRECT_BIOS 314 #ifdef CORRECT_BIOS
315 goto fail0; 315 goto fail0;
316 #endif 316 #endif
317 } 317 }
318 318
319 /* cache is irrelevant for PCI bus reads/writes */ 319 /* cache is irrelevant for PCI bus reads/writes */
320 window = pci_ioremap_bar(dev, 0); 320 window = pci_ioremap_bar(dev, 0);
321 if (window == NULL) { 321 if (window == NULL) {
322 i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n", 322 i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n",
323 __func__); 323 __func__);
324 goto fail1; 324 goto fail1;
325 } 325 }
326 326
327 *ovrfl_window = window; 327 *ovrfl_window = window;
328 return 0; 328 return 0;
329 329
330 fail1: 330 fail1:
331 pci_release_regions(dev); 331 pci_release_regions(dev);
332 332
333 #ifdef CORRECT_BIOS 333 #ifdef CORRECT_BIOS
334 fail0: 334 fail0:
335 pci_disable_device(dev); 335 pci_disable_device(dev);
336 #endif 336 #endif
337 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ 337 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
338 return 1; 338 return 1;
339 } 339 }
340 340
341 /* Return 1 if dual channel mode is active. Else return 0. */ 341 /* Return 1 if dual channel mode is active. Else return 0. */
342 static inline int dual_channel_active(u32 drc) 342 static inline int dual_channel_active(u32 drc)
343 { 343 {
344 return (drc >> 21) & 0x1; 344 return (drc >> 21) & 0x1;
345 } 345 }
346 346
347 static void i82875p_init_csrows(struct mem_ctl_info *mci, 347 static void i82875p_init_csrows(struct mem_ctl_info *mci,
348 struct pci_dev *pdev, 348 struct pci_dev *pdev,
349 void __iomem * ovrfl_window, u32 drc) 349 void __iomem * ovrfl_window, u32 drc)
350 { 350 {
351 struct csrow_info *csrow; 351 struct csrow_info *csrow;
352 struct dimm_info *dimm; 352 struct dimm_info *dimm;
353 unsigned nr_chans = dual_channel_active(drc) + 1; 353 unsigned nr_chans = dual_channel_active(drc) + 1;
354 unsigned long last_cumul_size; 354 unsigned long last_cumul_size;
355 u8 value; 355 u8 value;
356 u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ 356 u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
357 u32 cumul_size, nr_pages; 357 u32 cumul_size, nr_pages;
358 int index, j; 358 int index, j;
359 359
360 drc_ddim = (drc >> 18) & 0x1; 360 drc_ddim = (drc >> 18) & 0x1;
361 last_cumul_size = 0; 361 last_cumul_size = 0;
362 362
363 /* The dram row boundary (DRB) reg values are boundary address 363 /* The dram row boundary (DRB) reg values are boundary address
364 * for each DRAM row with a granularity of 32 or 64MB (single/dual 364 * for each DRAM row with a granularity of 32 or 64MB (single/dual
365 * channel operation). DRB regs are cumulative; therefore DRB7 will 365 * channel operation). DRB regs are cumulative; therefore DRB7 will
366 * contain the total memory contained in all eight rows. 366 * contain the total memory contained in all eight rows.
367 */ 367 */
368 368
369 for (index = 0; index < mci->nr_csrows; index++) { 369 for (index = 0; index < mci->nr_csrows; index++) {
370 csrow = &mci->csrows[index]; 370 csrow = mci->csrows[index];
371 371
372 value = readb(ovrfl_window + I82875P_DRB + index); 372 value = readb(ovrfl_window + I82875P_DRB + index);
373 cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); 373 cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
374 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 374 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
375 cumul_size); 375 cumul_size);
376 if (cumul_size == last_cumul_size) 376 if (cumul_size == last_cumul_size)
377 continue; /* not populated */ 377 continue; /* not populated */
378 378
379 csrow->first_page = last_cumul_size; 379 csrow->first_page = last_cumul_size;
380 csrow->last_page = cumul_size - 1; 380 csrow->last_page = cumul_size - 1;
381 nr_pages = cumul_size - last_cumul_size; 381 nr_pages = cumul_size - last_cumul_size;
382 last_cumul_size = cumul_size; 382 last_cumul_size = cumul_size;
383 383
384 for (j = 0; j < nr_chans; j++) { 384 for (j = 0; j < nr_chans; j++) {
385 dimm = csrow->channels[j].dimm; 385 dimm = csrow->channels[j]->dimm;
386 386
387 dimm->nr_pages = nr_pages / nr_chans; 387 dimm->nr_pages = nr_pages / nr_chans;
388 dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ 388 dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
389 dimm->mtype = MEM_DDR; 389 dimm->mtype = MEM_DDR;
390 dimm->dtype = DEV_UNKNOWN; 390 dimm->dtype = DEV_UNKNOWN;
391 dimm->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; 391 dimm->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
392 } 392 }
393 } 393 }
394 } 394 }
395 395
396 static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) 396 static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
397 { 397 {
398 int rc = -ENODEV; 398 int rc = -ENODEV;
399 struct mem_ctl_info *mci; 399 struct mem_ctl_info *mci;
400 struct edac_mc_layer layers[2]; 400 struct edac_mc_layer layers[2];
401 struct i82875p_pvt *pvt; 401 struct i82875p_pvt *pvt;
402 struct pci_dev *ovrfl_pdev; 402 struct pci_dev *ovrfl_pdev;
403 void __iomem *ovrfl_window; 403 void __iomem *ovrfl_window;
404 u32 drc; 404 u32 drc;
405 u32 nr_chans; 405 u32 nr_chans;
406 struct i82875p_error_info discard; 406 struct i82875p_error_info discard;
407 407
408 debugf0("%s()\n", __func__); 408 debugf0("%s()\n", __func__);
409 409
410 ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); 410 ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
411 411
412 if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window)) 412 if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window))
413 return -ENODEV; 413 return -ENODEV;
414 drc = readl(ovrfl_window + I82875P_DRC); 414 drc = readl(ovrfl_window + I82875P_DRC);
415 nr_chans = dual_channel_active(drc) + 1; 415 nr_chans = dual_channel_active(drc) + 1;
416 416
417 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 417 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
418 layers[0].size = I82875P_NR_CSROWS(nr_chans); 418 layers[0].size = I82875P_NR_CSROWS(nr_chans);
419 layers[0].is_virt_csrow = true; 419 layers[0].is_virt_csrow = true;
420 layers[1].type = EDAC_MC_LAYER_CHANNEL; 420 layers[1].type = EDAC_MC_LAYER_CHANNEL;
421 layers[1].size = nr_chans; 421 layers[1].size = nr_chans;
422 layers[1].is_virt_csrow = false; 422 layers[1].is_virt_csrow = false;
423 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); 423 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
424 if (!mci) { 424 if (!mci) {
425 rc = -ENOMEM; 425 rc = -ENOMEM;
426 goto fail0; 426 goto fail0;
427 } 427 }
428 428
429 debugf3("%s(): init mci\n", __func__); 429 debugf3("%s(): init mci\n", __func__);
430 mci->pdev = &pdev->dev; 430 mci->pdev = &pdev->dev;
431 mci->mtype_cap = MEM_FLAG_DDR; 431 mci->mtype_cap = MEM_FLAG_DDR;
432 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 432 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
433 mci->edac_cap = EDAC_FLAG_UNKNOWN; 433 mci->edac_cap = EDAC_FLAG_UNKNOWN;
434 mci->mod_name = EDAC_MOD_STR; 434 mci->mod_name = EDAC_MOD_STR;
435 mci->mod_ver = I82875P_REVISION; 435 mci->mod_ver = I82875P_REVISION;
436 mci->ctl_name = i82875p_devs[dev_idx].ctl_name; 436 mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
437 mci->dev_name = pci_name(pdev); 437 mci->dev_name = pci_name(pdev);
438 mci->edac_check = i82875p_check; 438 mci->edac_check = i82875p_check;
439 mci->ctl_page_to_phys = NULL; 439 mci->ctl_page_to_phys = NULL;
440 debugf3("%s(): init pvt\n", __func__); 440 debugf3("%s(): init pvt\n", __func__);
441 pvt = (struct i82875p_pvt *)mci->pvt_info; 441 pvt = (struct i82875p_pvt *)mci->pvt_info;
442 pvt->ovrfl_pdev = ovrfl_pdev; 442 pvt->ovrfl_pdev = ovrfl_pdev;
443 pvt->ovrfl_window = ovrfl_window; 443 pvt->ovrfl_window = ovrfl_window;
444 i82875p_init_csrows(mci, pdev, ovrfl_window, drc); 444 i82875p_init_csrows(mci, pdev, ovrfl_window, drc);
445 i82875p_get_error_info(mci, &discard); /* clear counters */ 445 i82875p_get_error_info(mci, &discard); /* clear counters */
446 446
447 /* Here we assume that we will never see multiple instances of this 447 /* Here we assume that we will never see multiple instances of this
448 * type of memory controller. The ID is therefore hardcoded to 0. 448 * type of memory controller. The ID is therefore hardcoded to 0.
449 */ 449 */
450 if (edac_mc_add_mc(mci)) { 450 if (edac_mc_add_mc(mci)) {
451 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 451 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
452 goto fail1; 452 goto fail1;
453 } 453 }
454 454
455 /* allocating generic PCI control info */ 455 /* allocating generic PCI control info */
456 i82875p_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); 456 i82875p_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
457 if (!i82875p_pci) { 457 if (!i82875p_pci) {
458 printk(KERN_WARNING 458 printk(KERN_WARNING
459 "%s(): Unable to create PCI control\n", 459 "%s(): Unable to create PCI control\n",
460 __func__); 460 __func__);
461 printk(KERN_WARNING 461 printk(KERN_WARNING
462 "%s(): PCI error report via EDAC not setup\n", 462 "%s(): PCI error report via EDAC not setup\n",
463 __func__); 463 __func__);
464 } 464 }
465 465
466 /* get this far and it's successful */ 466 /* get this far and it's successful */
467 debugf3("%s(): success\n", __func__); 467 debugf3("%s(): success\n", __func__);
468 return 0; 468 return 0;
469 469
470 fail1: 470 fail1:
471 edac_mc_free(mci); 471 edac_mc_free(mci);
472 472
473 fail0: 473 fail0:
474 iounmap(ovrfl_window); 474 iounmap(ovrfl_window);
475 pci_release_regions(ovrfl_pdev); 475 pci_release_regions(ovrfl_pdev);
476 476
477 pci_disable_device(ovrfl_pdev); 477 pci_disable_device(ovrfl_pdev);
478 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ 478 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
479 return rc; 479 return rc;
480 } 480 }
481 481
482 /* returns count (>= 0), or negative on error */ 482 /* returns count (>= 0), or negative on error */
483 static int __devinit i82875p_init_one(struct pci_dev *pdev, 483 static int __devinit i82875p_init_one(struct pci_dev *pdev,
484 const struct pci_device_id *ent) 484 const struct pci_device_id *ent)
485 { 485 {
486 int rc; 486 int rc;
487 487
488 debugf0("%s()\n", __func__); 488 debugf0("%s()\n", __func__);
489 i82875p_printk(KERN_INFO, "i82875p init one\n"); 489 i82875p_printk(KERN_INFO, "i82875p init one\n");
490 490
491 if (pci_enable_device(pdev) < 0) 491 if (pci_enable_device(pdev) < 0)
492 return -EIO; 492 return -EIO;
493 493
494 rc = i82875p_probe1(pdev, ent->driver_data); 494 rc = i82875p_probe1(pdev, ent->driver_data);
495 495
496 if (mci_pdev == NULL) 496 if (mci_pdev == NULL)
497 mci_pdev = pci_dev_get(pdev); 497 mci_pdev = pci_dev_get(pdev);
498 498
499 return rc; 499 return rc;
500 } 500 }
501 501
502 static void __devexit i82875p_remove_one(struct pci_dev *pdev) 502 static void __devexit i82875p_remove_one(struct pci_dev *pdev)
503 { 503 {
504 struct mem_ctl_info *mci; 504 struct mem_ctl_info *mci;
505 struct i82875p_pvt *pvt = NULL; 505 struct i82875p_pvt *pvt = NULL;
506 506
507 debugf0("%s()\n", __func__); 507 debugf0("%s()\n", __func__);
508 508
509 if (i82875p_pci) 509 if (i82875p_pci)
510 edac_pci_release_generic_ctl(i82875p_pci); 510 edac_pci_release_generic_ctl(i82875p_pci);
511 511
512 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) 512 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
513 return; 513 return;
514 514
515 pvt = (struct i82875p_pvt *)mci->pvt_info; 515 pvt = (struct i82875p_pvt *)mci->pvt_info;
516 516
517 if (pvt->ovrfl_window) 517 if (pvt->ovrfl_window)
518 iounmap(pvt->ovrfl_window); 518 iounmap(pvt->ovrfl_window);
519 519
520 if (pvt->ovrfl_pdev) { 520 if (pvt->ovrfl_pdev) {
521 #ifdef CORRECT_BIOS 521 #ifdef CORRECT_BIOS
522 pci_release_regions(pvt->ovrfl_pdev); 522 pci_release_regions(pvt->ovrfl_pdev);
523 #endif /*CORRECT_BIOS */ 523 #endif /*CORRECT_BIOS */
524 pci_disable_device(pvt->ovrfl_pdev); 524 pci_disable_device(pvt->ovrfl_pdev);
525 pci_dev_put(pvt->ovrfl_pdev); 525 pci_dev_put(pvt->ovrfl_pdev);
526 } 526 }
527 527
528 edac_mc_free(mci); 528 edac_mc_free(mci);
529 } 529 }
530 530
531 static DEFINE_PCI_DEVICE_TABLE(i82875p_pci_tbl) = { 531 static DEFINE_PCI_DEVICE_TABLE(i82875p_pci_tbl) = {
532 { 532 {
533 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 533 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
534 I82875P}, 534 I82875P},
535 { 535 {
536 0, 536 0,
537 } /* 0 terminated list. */ 537 } /* 0 terminated list. */
538 }; 538 };
539 539
540 MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl); 540 MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl);
541 541
542 static struct pci_driver i82875p_driver = { 542 static struct pci_driver i82875p_driver = {
543 .name = EDAC_MOD_STR, 543 .name = EDAC_MOD_STR,
544 .probe = i82875p_init_one, 544 .probe = i82875p_init_one,
545 .remove = __devexit_p(i82875p_remove_one), 545 .remove = __devexit_p(i82875p_remove_one),
546 .id_table = i82875p_pci_tbl, 546 .id_table = i82875p_pci_tbl,
547 }; 547 };
548 548
549 static int __init i82875p_init(void) 549 static int __init i82875p_init(void)
550 { 550 {
551 int pci_rc; 551 int pci_rc;
552 552
553 debugf3("%s()\n", __func__); 553 debugf3("%s()\n", __func__);
554 554
555 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 555 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
556 opstate_init(); 556 opstate_init();
557 557
558 pci_rc = pci_register_driver(&i82875p_driver); 558 pci_rc = pci_register_driver(&i82875p_driver);
559 559
560 if (pci_rc < 0) 560 if (pci_rc < 0)
561 goto fail0; 561 goto fail0;
562 562
563 if (mci_pdev == NULL) { 563 if (mci_pdev == NULL) {
564 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 564 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
565 PCI_DEVICE_ID_INTEL_82875_0, NULL); 565 PCI_DEVICE_ID_INTEL_82875_0, NULL);
566 566
567 if (!mci_pdev) { 567 if (!mci_pdev) {
568 debugf0("875p pci_get_device fail\n"); 568 debugf0("875p pci_get_device fail\n");
569 pci_rc = -ENODEV; 569 pci_rc = -ENODEV;
570 goto fail1; 570 goto fail1;
571 } 571 }
572 572
573 pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl); 573 pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl);
574 574
575 if (pci_rc < 0) { 575 if (pci_rc < 0) {
576 debugf0("875p init fail\n"); 576 debugf0("875p init fail\n");
577 pci_rc = -ENODEV; 577 pci_rc = -ENODEV;
578 goto fail1; 578 goto fail1;
579 } 579 }
580 } 580 }
581 581
582 return 0; 582 return 0;
583 583
584 fail1: 584 fail1:
585 pci_unregister_driver(&i82875p_driver); 585 pci_unregister_driver(&i82875p_driver);
586 586
587 fail0: 587 fail0:
588 if (mci_pdev != NULL) 588 if (mci_pdev != NULL)
589 pci_dev_put(mci_pdev); 589 pci_dev_put(mci_pdev);
590 590
591 return pci_rc; 591 return pci_rc;
592 } 592 }
593 593
594 static void __exit i82875p_exit(void) 594 static void __exit i82875p_exit(void)
595 { 595 {
596 debugf3("%s()\n", __func__); 596 debugf3("%s()\n", __func__);
597 597
598 i82875p_remove_one(mci_pdev); 598 i82875p_remove_one(mci_pdev);
599 pci_dev_put(mci_pdev); 599 pci_dev_put(mci_pdev);
600 600
601 pci_unregister_driver(&i82875p_driver); 601 pci_unregister_driver(&i82875p_driver);
602 602
603 } 603 }
604 604
605 module_init(i82875p_init); 605 module_init(i82875p_init);
606 module_exit(i82875p_exit); 606 module_exit(i82875p_exit);
607 607
608 MODULE_LICENSE("GPL"); 608 MODULE_LICENSE("GPL");
609 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); 609 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
610 MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers"); 610 MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers");
611 611
612 module_param(edac_op_state, int, 0444); 612 module_param(edac_op_state, int, 0444);
613 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 613 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
614 614
drivers/edac/i82975x_edac.c
1 /* 1 /*
2 * Intel 82975X Memory Controller kernel module 2 * Intel 82975X Memory Controller kernel module
3 * (C) 2007 aCarLab (India) Pvt. Ltd. (http://acarlab.com) 3 * (C) 2007 aCarLab (India) Pvt. Ltd. (http://acarlab.com)
4 * (C) 2007 jetzbroadband (http://jetzbroadband.com) 4 * (C) 2007 jetzbroadband (http://jetzbroadband.com)
5 * This file may be distributed under the terms of the 5 * This file may be distributed under the terms of the
6 * GNU General Public License. 6 * GNU General Public License.
7 * 7 *
8 * Written by Arvind R. 8 * Written by Arvind R.
9 * Copied from i82875p_edac.c source: 9 * Copied from i82875p_edac.c source:
10 */ 10 */
11 11
12 #include <linux/module.h> 12 #include <linux/module.h>
13 #include <linux/init.h> 13 #include <linux/init.h>
14 #include <linux/pci.h> 14 #include <linux/pci.h>
15 #include <linux/pci_ids.h> 15 #include <linux/pci_ids.h>
16 #include <linux/edac.h> 16 #include <linux/edac.h>
17 #include "edac_core.h" 17 #include "edac_core.h"
18 18
19 #define I82975X_REVISION " Ver: 1.0.0" 19 #define I82975X_REVISION " Ver: 1.0.0"
20 #define EDAC_MOD_STR "i82975x_edac" 20 #define EDAC_MOD_STR "i82975x_edac"
21 21
22 #define i82975x_printk(level, fmt, arg...) \ 22 #define i82975x_printk(level, fmt, arg...) \
23 edac_printk(level, "i82975x", fmt, ##arg) 23 edac_printk(level, "i82975x", fmt, ##arg)
24 24
25 #define i82975x_mc_printk(mci, level, fmt, arg...) \ 25 #define i82975x_mc_printk(mci, level, fmt, arg...) \
26 edac_mc_chipset_printk(mci, level, "i82975x", fmt, ##arg) 26 edac_mc_chipset_printk(mci, level, "i82975x", fmt, ##arg)
27 27
28 #ifndef PCI_DEVICE_ID_INTEL_82975_0 28 #ifndef PCI_DEVICE_ID_INTEL_82975_0
29 #define PCI_DEVICE_ID_INTEL_82975_0 0x277c 29 #define PCI_DEVICE_ID_INTEL_82975_0 0x277c
30 #endif /* PCI_DEVICE_ID_INTEL_82975_0 */ 30 #endif /* PCI_DEVICE_ID_INTEL_82975_0 */
31 31
32 #define I82975X_NR_DIMMS 8 32 #define I82975X_NR_DIMMS 8
33 #define I82975X_NR_CSROWS(nr_chans) (I82975X_NR_DIMMS / (nr_chans)) 33 #define I82975X_NR_CSROWS(nr_chans) (I82975X_NR_DIMMS / (nr_chans))
34 34
35 /* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */ 35 /* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */
36 #define I82975X_EAP 0x58 /* Dram Error Address Pointer (32b) 36 #define I82975X_EAP 0x58 /* Dram Error Address Pointer (32b)
37 * 37 *
38 * 31:7 128 byte cache-line address 38 * 31:7 128 byte cache-line address
39 * 6:1 reserved 39 * 6:1 reserved
40 * 0 0: CH0; 1: CH1 40 * 0 0: CH0; 1: CH1
41 */ 41 */
42 42
43 #define I82975X_DERRSYN 0x5c /* Dram Error SYNdrome (8b) 43 #define I82975X_DERRSYN 0x5c /* Dram Error SYNdrome (8b)
44 * 44 *
45 * 7:0 DRAM ECC Syndrome 45 * 7:0 DRAM ECC Syndrome
46 */ 46 */
47 47
48 #define I82975X_DES 0x5d /* Dram ERRor DeSTination (8b) 48 #define I82975X_DES 0x5d /* Dram ERRor DeSTination (8b)
49 * 0h: Processor Memory Reads 49 * 0h: Processor Memory Reads
50 * 1h:7h reserved 50 * 1h:7h reserved
51 * More - See Page 65 of Intel DocSheet. 51 * More - See Page 65 of Intel DocSheet.
52 */ 52 */
53 53
54 #define I82975X_ERRSTS 0xc8 /* Error Status Register (16b) 54 #define I82975X_ERRSTS 0xc8 /* Error Status Register (16b)
55 * 55 *
56 * 15:12 reserved 56 * 15:12 reserved
57 * 11 Thermal Sensor Event 57 * 11 Thermal Sensor Event
58 * 10 reserved 58 * 10 reserved
59 * 9 non-DRAM lock error (ndlock) 59 * 9 non-DRAM lock error (ndlock)
60 * 8 Refresh Timeout 60 * 8 Refresh Timeout
61 * 7:2 reserved 61 * 7:2 reserved
62 * 1 ECC UE (multibit DRAM error) 62 * 1 ECC UE (multibit DRAM error)
63 * 0 ECC CE (singlebit DRAM error) 63 * 0 ECC CE (singlebit DRAM error)
64 */ 64 */
65 65
66 /* Error Reporting is supported by 3 mechanisms: 66 /* Error Reporting is supported by 3 mechanisms:
67 1. DMI SERR generation ( ERRCMD ) 67 1. DMI SERR generation ( ERRCMD )
68 2. SMI DMI generation ( SMICMD ) 68 2. SMI DMI generation ( SMICMD )
69 3. SCI DMI generation ( SCICMD ) 69 3. SCI DMI generation ( SCICMD )
70 NOTE: Only ONE of the three must be enabled 70 NOTE: Only ONE of the three must be enabled
71 */ 71 */
72 #define I82975X_ERRCMD 0xca /* Error Command (16b) 72 #define I82975X_ERRCMD 0xca /* Error Command (16b)
73 * 73 *
74 * 15:12 reserved 74 * 15:12 reserved
75 * 11 Thermal Sensor Event 75 * 11 Thermal Sensor Event
76 * 10 reserved 76 * 10 reserved
77 * 9 non-DRAM lock error (ndlock) 77 * 9 non-DRAM lock error (ndlock)
78 * 8 Refresh Timeout 78 * 8 Refresh Timeout
79 * 7:2 reserved 79 * 7:2 reserved
80 * 1 ECC UE (multibit DRAM error) 80 * 1 ECC UE (multibit DRAM error)
81 * 0 ECC CE (singlebit DRAM error) 81 * 0 ECC CE (singlebit DRAM error)
82 */ 82 */
83 83
84 #define I82975X_SMICMD 0xcc /* Error Command (16b) 84 #define I82975X_SMICMD 0xcc /* Error Command (16b)
85 * 85 *
86 * 15:2 reserved 86 * 15:2 reserved
87 * 1 ECC UE (multibit DRAM error) 87 * 1 ECC UE (multibit DRAM error)
88 * 0 ECC CE (singlebit DRAM error) 88 * 0 ECC CE (singlebit DRAM error)
89 */ 89 */
90 90
91 #define I82975X_SCICMD 0xce /* Error Command (16b) 91 #define I82975X_SCICMD 0xce /* Error Command (16b)
92 * 92 *
93 * 15:2 reserved 93 * 15:2 reserved
94 * 1 ECC UE (multibit DRAM error) 94 * 1 ECC UE (multibit DRAM error)
95 * 0 ECC CE (singlebit DRAM error) 95 * 0 ECC CE (singlebit DRAM error)
96 */ 96 */
97 97
98 #define I82975X_XEAP 0xfc /* Extended Dram Error Address Pointer (8b) 98 #define I82975X_XEAP 0xfc /* Extended Dram Error Address Pointer (8b)
99 * 99 *
100 * 7:1 reserved 100 * 7:1 reserved
101 * 0 Bit32 of the Dram Error Address 101 * 0 Bit32 of the Dram Error Address
102 */ 102 */
103 103
104 #define I82975X_MCHBAR 0x44 /* 104 #define I82975X_MCHBAR 0x44 /*
105 * 105 *
106 * 31:14 Base Addr of 16K memory-mapped 106 * 31:14 Base Addr of 16K memory-mapped
107 * configuration space 107 * configuration space
108 * 13:1 reserverd 108 * 13:1 reserverd
109 * 0 mem-mapped config space enable 109 * 0 mem-mapped config space enable
110 */ 110 */
111 111
112 /* NOTE: Following addresses have to indexed using MCHBAR offset (44h, 32b) */ 112 /* NOTE: Following addresses have to indexed using MCHBAR offset (44h, 32b) */
113 /* Intel 82975x memory mapped register space */ 113 /* Intel 82975x memory mapped register space */
114 114
115 #define I82975X_DRB_SHIFT 25 /* fixed 32MiB grain */ 115 #define I82975X_DRB_SHIFT 25 /* fixed 32MiB grain */
116 116
117 #define I82975X_DRB 0x100 /* DRAM Row Boundary (8b x 8) 117 #define I82975X_DRB 0x100 /* DRAM Row Boundary (8b x 8)
118 * 118 *
119 * 7 set to 1 in highest DRB of 119 * 7 set to 1 in highest DRB of
120 * channel if 4GB in ch. 120 * channel if 4GB in ch.
121 * 6:2 upper boundary of rank in 121 * 6:2 upper boundary of rank in
122 * 32MB grains 122 * 32MB grains
123 * 1:0 set to 0 123 * 1:0 set to 0
124 */ 124 */
125 #define I82975X_DRB_CH0R0 0x100 125 #define I82975X_DRB_CH0R0 0x100
126 #define I82975X_DRB_CH0R1 0x101 126 #define I82975X_DRB_CH0R1 0x101
127 #define I82975X_DRB_CH0R2 0x102 127 #define I82975X_DRB_CH0R2 0x102
128 #define I82975X_DRB_CH0R3 0x103 128 #define I82975X_DRB_CH0R3 0x103
129 #define I82975X_DRB_CH1R0 0x180 129 #define I82975X_DRB_CH1R0 0x180
130 #define I82975X_DRB_CH1R1 0x181 130 #define I82975X_DRB_CH1R1 0x181
131 #define I82975X_DRB_CH1R2 0x182 131 #define I82975X_DRB_CH1R2 0x182
132 #define I82975X_DRB_CH1R3 0x183 132 #define I82975X_DRB_CH1R3 0x183
133 133
134 134
135 #define I82975X_DRA 0x108 /* DRAM Row Attribute (4b x 8) 135 #define I82975X_DRA 0x108 /* DRAM Row Attribute (4b x 8)
136 * defines the PAGE SIZE to be used 136 * defines the PAGE SIZE to be used
137 * for the rank 137 * for the rank
138 * 7 reserved 138 * 7 reserved
139 * 6:4 row attr of odd rank, i.e. 1 139 * 6:4 row attr of odd rank, i.e. 1
140 * 3 reserved 140 * 3 reserved
141 * 2:0 row attr of even rank, i.e. 0 141 * 2:0 row attr of even rank, i.e. 0
142 * 142 *
143 * 000 = unpopulated 143 * 000 = unpopulated
144 * 001 = reserved 144 * 001 = reserved
145 * 010 = 4KiB 145 * 010 = 4KiB
146 * 011 = 8KiB 146 * 011 = 8KiB
147 * 100 = 16KiB 147 * 100 = 16KiB
148 * others = reserved 148 * others = reserved
149 */ 149 */
150 #define I82975X_DRA_CH0R01 0x108 150 #define I82975X_DRA_CH0R01 0x108
151 #define I82975X_DRA_CH0R23 0x109 151 #define I82975X_DRA_CH0R23 0x109
152 #define I82975X_DRA_CH1R01 0x188 152 #define I82975X_DRA_CH1R01 0x188
153 #define I82975X_DRA_CH1R23 0x189 153 #define I82975X_DRA_CH1R23 0x189
154 154
155 155
156 #define I82975X_BNKARC 0x10e /* Type of device in each rank - Bank Arch (16b) 156 #define I82975X_BNKARC 0x10e /* Type of device in each rank - Bank Arch (16b)
157 * 157 *
158 * 15:8 reserved 158 * 15:8 reserved
159 * 7:6 Rank 3 architecture 159 * 7:6 Rank 3 architecture
160 * 5:4 Rank 2 architecture 160 * 5:4 Rank 2 architecture
161 * 3:2 Rank 1 architecture 161 * 3:2 Rank 1 architecture
162 * 1:0 Rank 0 architecture 162 * 1:0 Rank 0 architecture
163 * 163 *
164 * 00 => 4 banks 164 * 00 => 4 banks
165 * 01 => 8 banks 165 * 01 => 8 banks
166 */ 166 */
167 #define I82975X_C0BNKARC 0x10e 167 #define I82975X_C0BNKARC 0x10e
168 #define I82975X_C1BNKARC 0x18e 168 #define I82975X_C1BNKARC 0x18e
169 169
170 170
171 171
172 #define I82975X_DRC 0x120 /* DRAM Controller Mode0 (32b) 172 #define I82975X_DRC 0x120 /* DRAM Controller Mode0 (32b)
173 * 173 *
174 * 31:30 reserved 174 * 31:30 reserved
175 * 29 init complete 175 * 29 init complete
176 * 28:11 reserved, according to Intel 176 * 28:11 reserved, according to Intel
177 * 22:21 number of channels 177 * 22:21 number of channels
178 * 00=1 01=2 in 82875 178 * 00=1 01=2 in 82875
179 * seems to be ECC mode 179 * seems to be ECC mode
180 * bits in 82975 in Asus 180 * bits in 82975 in Asus
181 * P5W 181 * P5W
182 * 19:18 Data Integ Mode 182 * 19:18 Data Integ Mode
183 * 00=none 01=ECC in 82875 183 * 00=none 01=ECC in 82875
184 * 10:8 refresh mode 184 * 10:8 refresh mode
185 * 7 reserved 185 * 7 reserved
186 * 6:4 mode select 186 * 6:4 mode select
187 * 3:2 reserved 187 * 3:2 reserved
188 * 1:0 DRAM type 10=Second Revision 188 * 1:0 DRAM type 10=Second Revision
189 * DDR2 SDRAM 189 * DDR2 SDRAM
190 * 00, 01, 11 reserved 190 * 00, 01, 11 reserved
191 */ 191 */
192 #define I82975X_DRC_CH0M0 0x120 192 #define I82975X_DRC_CH0M0 0x120
193 #define I82975X_DRC_CH1M0 0x1A0 193 #define I82975X_DRC_CH1M0 0x1A0
194 194
195 195
196 #define I82975X_DRC_M1 0x124 /* DRAM Controller Mode1 (32b) 196 #define I82975X_DRC_M1 0x124 /* DRAM Controller Mode1 (32b)
197 * 31 0=Standard Address Map 197 * 31 0=Standard Address Map
198 * 1=Enhanced Address Map 198 * 1=Enhanced Address Map
199 * 30:0 reserved 199 * 30:0 reserved
200 */ 200 */
201 201
202 #define I82975X_DRC_CH0M1 0x124 202 #define I82975X_DRC_CH0M1 0x124
203 #define I82975X_DRC_CH1M1 0x1A4 203 #define I82975X_DRC_CH1M1 0x1A4
204 204
205 enum i82975x_chips { 205 enum i82975x_chips {
206 I82975X = 0, 206 I82975X = 0,
207 }; 207 };
208 208
209 struct i82975x_pvt { 209 struct i82975x_pvt {
210 void __iomem *mch_window; 210 void __iomem *mch_window;
211 }; 211 };
212 212
213 struct i82975x_dev_info { 213 struct i82975x_dev_info {
214 const char *ctl_name; 214 const char *ctl_name;
215 }; 215 };
216 216
217 struct i82975x_error_info { 217 struct i82975x_error_info {
218 u16 errsts; 218 u16 errsts;
219 u32 eap; 219 u32 eap;
220 u8 des; 220 u8 des;
221 u8 derrsyn; 221 u8 derrsyn;
222 u16 errsts2; 222 u16 errsts2;
223 u8 chan; /* the channel is bit 0 of EAP */ 223 u8 chan; /* the channel is bit 0 of EAP */
224 u8 xeap; /* extended eap bit */ 224 u8 xeap; /* extended eap bit */
225 }; 225 };
226 226
227 static const struct i82975x_dev_info i82975x_devs[] = { 227 static const struct i82975x_dev_info i82975x_devs[] = {
228 [I82975X] = { 228 [I82975X] = {
229 .ctl_name = "i82975x" 229 .ctl_name = "i82975x"
230 }, 230 },
231 }; 231 };
232 232
233 static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has 233 static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has
234 * already registered driver 234 * already registered driver
235 */ 235 */
236 236
237 static int i82975x_registered = 1; 237 static int i82975x_registered = 1;
238 238
239 static void i82975x_get_error_info(struct mem_ctl_info *mci, 239 static void i82975x_get_error_info(struct mem_ctl_info *mci,
240 struct i82975x_error_info *info) 240 struct i82975x_error_info *info)
241 { 241 {
242 struct pci_dev *pdev; 242 struct pci_dev *pdev;
243 243
244 pdev = to_pci_dev(mci->pdev); 244 pdev = to_pci_dev(mci->pdev);
245 245
246 /* 246 /*
247 * This is a mess because there is no atomic way to read all the 247 * This is a mess because there is no atomic way to read all the
248 * registers at once and the registers can transition from CE being 248 * registers at once and the registers can transition from CE being
249 * overwritten by UE. 249 * overwritten by UE.
250 */ 250 */
251 pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts); 251 pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts);
252 pci_read_config_dword(pdev, I82975X_EAP, &info->eap); 252 pci_read_config_dword(pdev, I82975X_EAP, &info->eap);
253 pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap); 253 pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap);
254 pci_read_config_byte(pdev, I82975X_DES, &info->des); 254 pci_read_config_byte(pdev, I82975X_DES, &info->des);
255 pci_read_config_byte(pdev, I82975X_DERRSYN, &info->derrsyn); 255 pci_read_config_byte(pdev, I82975X_DERRSYN, &info->derrsyn);
256 pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts2); 256 pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts2);
257 257
258 pci_write_bits16(pdev, I82975X_ERRSTS, 0x0003, 0x0003); 258 pci_write_bits16(pdev, I82975X_ERRSTS, 0x0003, 0x0003);
259 259
260 /* 260 /*
261 * If the error is the same then we can for both reads then 261 * If the error is the same then we can for both reads then
262 * the first set of reads is valid. If there is a change then 262 * the first set of reads is valid. If there is a change then
263 * there is a CE no info and the second set of reads is valid 263 * there is a CE no info and the second set of reads is valid
264 * and should be UE info. 264 * and should be UE info.
265 */ 265 */
266 if (!(info->errsts2 & 0x0003)) 266 if (!(info->errsts2 & 0x0003))
267 return; 267 return;
268 268
269 if ((info->errsts ^ info->errsts2) & 0x0003) { 269 if ((info->errsts ^ info->errsts2) & 0x0003) {
270 pci_read_config_dword(pdev, I82975X_EAP, &info->eap); 270 pci_read_config_dword(pdev, I82975X_EAP, &info->eap);
271 pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap); 271 pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap);
272 pci_read_config_byte(pdev, I82975X_DES, &info->des); 272 pci_read_config_byte(pdev, I82975X_DES, &info->des);
273 pci_read_config_byte(pdev, I82975X_DERRSYN, 273 pci_read_config_byte(pdev, I82975X_DERRSYN,
274 &info->derrsyn); 274 &info->derrsyn);
275 } 275 }
276 } 276 }
277 277
278 static int i82975x_process_error_info(struct mem_ctl_info *mci, 278 static int i82975x_process_error_info(struct mem_ctl_info *mci,
279 struct i82975x_error_info *info, int handle_errors) 279 struct i82975x_error_info *info, int handle_errors)
280 { 280 {
281 int row, chan; 281 int row, chan;
282 unsigned long offst, page; 282 unsigned long offst, page;
283 283
284 if (!(info->errsts2 & 0x0003)) 284 if (!(info->errsts2 & 0x0003))
285 return 0; 285 return 0;
286 286
287 if (!handle_errors) 287 if (!handle_errors)
288 return 1; 288 return 1;
289 289
290 if ((info->errsts ^ info->errsts2) & 0x0003) { 290 if ((info->errsts ^ info->errsts2) & 0x0003) {
291 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 291 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
292 -1, -1, -1, "UE overwrote CE", "", NULL); 292 -1, -1, -1, "UE overwrote CE", "", NULL);
293 info->errsts = info->errsts2; 293 info->errsts = info->errsts2;
294 } 294 }
295 295
296 page = (unsigned long) info->eap; 296 page = (unsigned long) info->eap;
297 page >>= 1; 297 page >>= 1;
298 if (info->xeap & 1) 298 if (info->xeap & 1)
299 page |= 0x80000000; 299 page |= 0x80000000;
300 page >>= (PAGE_SHIFT - 1); 300 page >>= (PAGE_SHIFT - 1);
301 row = edac_mc_find_csrow_by_page(mci, page); 301 row = edac_mc_find_csrow_by_page(mci, page);
302 302
303 if (row == -1) { 303 if (row == -1) {
304 i82975x_mc_printk(mci, KERN_ERR, "error processing EAP:\n" 304 i82975x_mc_printk(mci, KERN_ERR, "error processing EAP:\n"
305 "\tXEAP=%u\n" 305 "\tXEAP=%u\n"
306 "\t EAP=0x%08x\n" 306 "\t EAP=0x%08x\n"
307 "\tPAGE=0x%08x\n", 307 "\tPAGE=0x%08x\n",
308 (info->xeap & 1) ? 1 : 0, info->eap, (unsigned int) page); 308 (info->xeap & 1) ? 1 : 0, info->eap, (unsigned int) page);
309 return 0; 309 return 0;
310 } 310 }
311 chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1; 311 chan = (mci->csrows[row]->nr_channels == 1) ? 0 : info->eap & 1;
312 offst = info->eap 312 offst = info->eap
313 & ((1 << PAGE_SHIFT) - 313 & ((1 << PAGE_SHIFT) -
314 (1 << mci->csrows[row].channels[chan].dimm->grain)); 314 (1 << mci->csrows[row]->channels[chan]->dimm->grain));
315 315
316 if (info->errsts & 0x0002) 316 if (info->errsts & 0x0002)
317 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 317 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
318 page, offst, 0, 318 page, offst, 0,
319 row, -1, -1, 319 row, -1, -1,
320 "i82975x UE", "", NULL); 320 "i82975x UE", "", NULL);
321 else 321 else
322 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 322 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
323 page, offst, info->derrsyn, 323 page, offst, info->derrsyn,
324 row, chan ? chan : 0, -1, 324 row, chan ? chan : 0, -1,
325 "i82975x CE", "", NULL); 325 "i82975x CE", "", NULL);
326 326
327 return 1; 327 return 1;
328 } 328 }
329 329
330 static void i82975x_check(struct mem_ctl_info *mci) 330 static void i82975x_check(struct mem_ctl_info *mci)
331 { 331 {
332 struct i82975x_error_info info; 332 struct i82975x_error_info info;
333 333
334 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 334 debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
335 i82975x_get_error_info(mci, &info); 335 i82975x_get_error_info(mci, &info);
336 i82975x_process_error_info(mci, &info, 1); 336 i82975x_process_error_info(mci, &info, 1);
337 } 337 }
338 338
339 /* Return 1 if dual channel mode is active. Else return 0. */ 339 /* Return 1 if dual channel mode is active. Else return 0. */
340 static int dual_channel_active(void __iomem *mch_window) 340 static int dual_channel_active(void __iomem *mch_window)
341 { 341 {
342 /* 342 /*
343 * We treat interleaved-symmetric configuration as dual-channel - EAP's 343 * We treat interleaved-symmetric configuration as dual-channel - EAP's
344 * bit-0 giving the channel of the error location. 344 * bit-0 giving the channel of the error location.
345 * 345 *
346 * All other configurations are treated as single channel - the EAP's 346 * All other configurations are treated as single channel - the EAP's
347 * bit-0 will resolve ok in symmetric area of mixed 347 * bit-0 will resolve ok in symmetric area of mixed
348 * (symmetric/asymmetric) configurations 348 * (symmetric/asymmetric) configurations
349 */ 349 */
350 u8 drb[4][2]; 350 u8 drb[4][2];
351 int row; 351 int row;
352 int dualch; 352 int dualch;
353 353
354 for (dualch = 1, row = 0; dualch && (row < 4); row++) { 354 for (dualch = 1, row = 0; dualch && (row < 4); row++) {
355 drb[row][0] = readb(mch_window + I82975X_DRB + row); 355 drb[row][0] = readb(mch_window + I82975X_DRB + row);
356 drb[row][1] = readb(mch_window + I82975X_DRB + row + 0x80); 356 drb[row][1] = readb(mch_window + I82975X_DRB + row + 0x80);
357 dualch = dualch && (drb[row][0] == drb[row][1]); 357 dualch = dualch && (drb[row][0] == drb[row][1]);
358 } 358 }
359 return dualch; 359 return dualch;
360 } 360 }
361 361
362 static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank) 362 static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
363 { 363 {
364 /* 364 /*
365 * ECC is possible on i92975x ONLY with DEV_X8 365 * ECC is possible on i92975x ONLY with DEV_X8
366 */ 366 */
367 return DEV_X8; 367 return DEV_X8;
368 } 368 }
369 369
370 static void i82975x_init_csrows(struct mem_ctl_info *mci, 370 static void i82975x_init_csrows(struct mem_ctl_info *mci,
371 struct pci_dev *pdev, void __iomem *mch_window) 371 struct pci_dev *pdev, void __iomem *mch_window)
372 { 372 {
373 static const char *labels[4] = { 373 static const char *labels[4] = {
374 "DIMM A1", "DIMM A2", 374 "DIMM A1", "DIMM A2",
375 "DIMM B1", "DIMM B2" 375 "DIMM B1", "DIMM B2"
376 }; 376 };
377 struct csrow_info *csrow; 377 struct csrow_info *csrow;
378 unsigned long last_cumul_size; 378 unsigned long last_cumul_size;
379 u8 value; 379 u8 value;
380 u32 cumul_size, nr_pages; 380 u32 cumul_size, nr_pages;
381 int index, chan; 381 int index, chan;
382 struct dimm_info *dimm; 382 struct dimm_info *dimm;
383 enum dev_type dtype; 383 enum dev_type dtype;
384 384
385 last_cumul_size = 0; 385 last_cumul_size = 0;
386 386
387 /* 387 /*
388 * 82875 comment: 388 * 82875 comment:
389 * The dram row boundary (DRB) reg values are boundary address 389 * The dram row boundary (DRB) reg values are boundary address
390 * for each DRAM row with a granularity of 32 or 64MB (single/dual 390 * for each DRAM row with a granularity of 32 or 64MB (single/dual
391 * channel operation). DRB regs are cumulative; therefore DRB7 will 391 * channel operation). DRB regs are cumulative; therefore DRB7 will
392 * contain the total memory contained in all rows. 392 * contain the total memory contained in all rows.
393 * 393 *
394 */ 394 */
395 395
396 for (index = 0; index < mci->nr_csrows; index++) { 396 for (index = 0; index < mci->nr_csrows; index++) {
397 csrow = &mci->csrows[index]; 397 csrow = mci->csrows[index];
398 398
399 value = readb(mch_window + I82975X_DRB + index + 399 value = readb(mch_window + I82975X_DRB + index +
400 ((index >= 4) ? 0x80 : 0)); 400 ((index >= 4) ? 0x80 : 0));
401 cumul_size = value; 401 cumul_size = value;
402 cumul_size <<= (I82975X_DRB_SHIFT - PAGE_SHIFT); 402 cumul_size <<= (I82975X_DRB_SHIFT - PAGE_SHIFT);
403 /* 403 /*
404 * Adjust cumul_size w.r.t number of channels 404 * Adjust cumul_size w.r.t number of channels
405 * 405 *
406 */ 406 */
407 if (csrow->nr_channels > 1) 407 if (csrow->nr_channels > 1)
408 cumul_size <<= 1; 408 cumul_size <<= 1;
409 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 409 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
410 cumul_size); 410 cumul_size);
411 411
412 nr_pages = cumul_size - last_cumul_size; 412 nr_pages = cumul_size - last_cumul_size;
413 if (!nr_pages) 413 if (!nr_pages)
414 continue; 414 continue;
415 415
416 /* 416 /*
417 * Initialise dram labels 417 * Initialise dram labels
418 * index values: 418 * index values:
419 * [0-7] for single-channel; i.e. csrow->nr_channels = 1 419 * [0-7] for single-channel; i.e. csrow->nr_channels = 1
420 * [0-3] for dual-channel; i.e. csrow->nr_channels = 2 420 * [0-3] for dual-channel; i.e. csrow->nr_channels = 2
421 */ 421 */
422 dtype = i82975x_dram_type(mch_window, index); 422 dtype = i82975x_dram_type(mch_window, index);
423 for (chan = 0; chan < csrow->nr_channels; chan++) { 423 for (chan = 0; chan < csrow->nr_channels; chan++) {
424 dimm = mci->csrows[index].channels[chan].dimm; 424 dimm = mci->csrows[index]->channels[chan]->dimm;
425 425
426 dimm->nr_pages = nr_pages / csrow->nr_channels; 426 dimm->nr_pages = nr_pages / csrow->nr_channels;
427 strncpy(csrow->channels[chan].dimm->label, 427 strncpy(csrow->channels[chan]->dimm->label,
428 labels[(index >> 1) + (chan * 2)], 428 labels[(index >> 1) + (chan * 2)],
429 EDAC_MC_LABEL_LEN); 429 EDAC_MC_LABEL_LEN);
430 dimm->grain = 1 << 7; /* 128Byte cache-line resolution */ 430 dimm->grain = 1 << 7; /* 128Byte cache-line resolution */
431 dimm->dtype = i82975x_dram_type(mch_window, index); 431 dimm->dtype = i82975x_dram_type(mch_window, index);
432 dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */ 432 dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
433 dimm->edac_mode = EDAC_SECDED; /* only supported */ 433 dimm->edac_mode = EDAC_SECDED; /* only supported */
434 } 434 }
435 435
436 csrow->first_page = last_cumul_size; 436 csrow->first_page = last_cumul_size;
437 csrow->last_page = cumul_size - 1; 437 csrow->last_page = cumul_size - 1;
438 last_cumul_size = cumul_size; 438 last_cumul_size = cumul_size;
439 } 439 }
440 } 440 }
441 441
442 /* #define i82975x_DEBUG_IOMEM */ 442 /* #define i82975x_DEBUG_IOMEM */
443 443
444 #ifdef i82975x_DEBUG_IOMEM 444 #ifdef i82975x_DEBUG_IOMEM
445 static void i82975x_print_dram_timings(void __iomem *mch_window) 445 static void i82975x_print_dram_timings(void __iomem *mch_window)
446 { 446 {
447 /* 447 /*
448 * The register meanings are from Intel specs; 448 * The register meanings are from Intel specs;
449 * (shows 13-5-5-5 for 800-DDR2) 449 * (shows 13-5-5-5 for 800-DDR2)
450 * Asus P5W Bios reports 15-5-4-4 450 * Asus P5W Bios reports 15-5-4-4
451 * What's your religion? 451 * What's your religion?
452 */ 452 */
453 static const int caslats[4] = { 5, 4, 3, 6 }; 453 static const int caslats[4] = { 5, 4, 3, 6 };
454 u32 dtreg[2]; 454 u32 dtreg[2];
455 455
456 dtreg[0] = readl(mch_window + 0x114); 456 dtreg[0] = readl(mch_window + 0x114);
457 dtreg[1] = readl(mch_window + 0x194); 457 dtreg[1] = readl(mch_window + 0x194);
458 i82975x_printk(KERN_INFO, "DRAM Timings : Ch0 Ch1\n" 458 i82975x_printk(KERN_INFO, "DRAM Timings : Ch0 Ch1\n"
459 " RAS Active Min = %d %d\n" 459 " RAS Active Min = %d %d\n"
460 " CAS latency = %d %d\n" 460 " CAS latency = %d %d\n"
461 " RAS to CAS = %d %d\n" 461 " RAS to CAS = %d %d\n"
462 " RAS precharge = %d %d\n", 462 " RAS precharge = %d %d\n",
463 (dtreg[0] >> 19 ) & 0x0f, 463 (dtreg[0] >> 19 ) & 0x0f,
464 (dtreg[1] >> 19) & 0x0f, 464 (dtreg[1] >> 19) & 0x0f,
465 caslats[(dtreg[0] >> 8) & 0x03], 465 caslats[(dtreg[0] >> 8) & 0x03],
466 caslats[(dtreg[1] >> 8) & 0x03], 466 caslats[(dtreg[1] >> 8) & 0x03],
467 ((dtreg[0] >> 4) & 0x07) + 2, 467 ((dtreg[0] >> 4) & 0x07) + 2,
468 ((dtreg[1] >> 4) & 0x07) + 2, 468 ((dtreg[1] >> 4) & 0x07) + 2,
469 (dtreg[0] & 0x07) + 2, 469 (dtreg[0] & 0x07) + 2,
470 (dtreg[1] & 0x07) + 2 470 (dtreg[1] & 0x07) + 2
471 ); 471 );
472 472
473 } 473 }
474 #endif 474 #endif
475 475
476 static int i82975x_probe1(struct pci_dev *pdev, int dev_idx) 476 static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
477 { 477 {
478 int rc = -ENODEV; 478 int rc = -ENODEV;
479 struct mem_ctl_info *mci; 479 struct mem_ctl_info *mci;
480 struct edac_mc_layer layers[2]; 480 struct edac_mc_layer layers[2];
481 struct i82975x_pvt *pvt; 481 struct i82975x_pvt *pvt;
482 void __iomem *mch_window; 482 void __iomem *mch_window;
483 u32 mchbar; 483 u32 mchbar;
484 u32 drc[2]; 484 u32 drc[2];
485 struct i82975x_error_info discard; 485 struct i82975x_error_info discard;
486 int chans; 486 int chans;
487 #ifdef i82975x_DEBUG_IOMEM 487 #ifdef i82975x_DEBUG_IOMEM
488 u8 c0drb[4]; 488 u8 c0drb[4];
489 u8 c1drb[4]; 489 u8 c1drb[4];
490 #endif 490 #endif
491 491
492 debugf0("%s()\n", __func__); 492 debugf0("%s()\n", __func__);
493 493
494 pci_read_config_dword(pdev, I82975X_MCHBAR, &mchbar); 494 pci_read_config_dword(pdev, I82975X_MCHBAR, &mchbar);
495 if (!(mchbar & 1)) { 495 if (!(mchbar & 1)) {
496 debugf3("%s(): failed, MCHBAR disabled!\n", __func__); 496 debugf3("%s(): failed, MCHBAR disabled!\n", __func__);
497 goto fail0; 497 goto fail0;
498 } 498 }
499 mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */ 499 mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */
500 mch_window = ioremap_nocache(mchbar, 0x1000); 500 mch_window = ioremap_nocache(mchbar, 0x1000);
501 501
502 #ifdef i82975x_DEBUG_IOMEM 502 #ifdef i82975x_DEBUG_IOMEM
503 i82975x_printk(KERN_INFO, "MCHBAR real = %0x, remapped = %p\n", 503 i82975x_printk(KERN_INFO, "MCHBAR real = %0x, remapped = %p\n",
504 mchbar, mch_window); 504 mchbar, mch_window);
505 505
506 c0drb[0] = readb(mch_window + I82975X_DRB_CH0R0); 506 c0drb[0] = readb(mch_window + I82975X_DRB_CH0R0);
507 c0drb[1] = readb(mch_window + I82975X_DRB_CH0R1); 507 c0drb[1] = readb(mch_window + I82975X_DRB_CH0R1);
508 c0drb[2] = readb(mch_window + I82975X_DRB_CH0R2); 508 c0drb[2] = readb(mch_window + I82975X_DRB_CH0R2);
509 c0drb[3] = readb(mch_window + I82975X_DRB_CH0R3); 509 c0drb[3] = readb(mch_window + I82975X_DRB_CH0R3);
510 c1drb[0] = readb(mch_window + I82975X_DRB_CH1R0); 510 c1drb[0] = readb(mch_window + I82975X_DRB_CH1R0);
511 c1drb[1] = readb(mch_window + I82975X_DRB_CH1R1); 511 c1drb[1] = readb(mch_window + I82975X_DRB_CH1R1);
512 c1drb[2] = readb(mch_window + I82975X_DRB_CH1R2); 512 c1drb[2] = readb(mch_window + I82975X_DRB_CH1R2);
513 c1drb[3] = readb(mch_window + I82975X_DRB_CH1R3); 513 c1drb[3] = readb(mch_window + I82975X_DRB_CH1R3);
514 i82975x_printk(KERN_INFO, "DRBCH0R0 = 0x%02x\n", c0drb[0]); 514 i82975x_printk(KERN_INFO, "DRBCH0R0 = 0x%02x\n", c0drb[0]);
515 i82975x_printk(KERN_INFO, "DRBCH0R1 = 0x%02x\n", c0drb[1]); 515 i82975x_printk(KERN_INFO, "DRBCH0R1 = 0x%02x\n", c0drb[1]);
516 i82975x_printk(KERN_INFO, "DRBCH0R2 = 0x%02x\n", c0drb[2]); 516 i82975x_printk(KERN_INFO, "DRBCH0R2 = 0x%02x\n", c0drb[2]);
517 i82975x_printk(KERN_INFO, "DRBCH0R3 = 0x%02x\n", c0drb[3]); 517 i82975x_printk(KERN_INFO, "DRBCH0R3 = 0x%02x\n", c0drb[3]);
518 i82975x_printk(KERN_INFO, "DRBCH1R0 = 0x%02x\n", c1drb[0]); 518 i82975x_printk(KERN_INFO, "DRBCH1R0 = 0x%02x\n", c1drb[0]);
519 i82975x_printk(KERN_INFO, "DRBCH1R1 = 0x%02x\n", c1drb[1]); 519 i82975x_printk(KERN_INFO, "DRBCH1R1 = 0x%02x\n", c1drb[1]);
520 i82975x_printk(KERN_INFO, "DRBCH1R2 = 0x%02x\n", c1drb[2]); 520 i82975x_printk(KERN_INFO, "DRBCH1R2 = 0x%02x\n", c1drb[2]);
521 i82975x_printk(KERN_INFO, "DRBCH1R3 = 0x%02x\n", c1drb[3]); 521 i82975x_printk(KERN_INFO, "DRBCH1R3 = 0x%02x\n", c1drb[3]);
522 #endif 522 #endif
523 523
524 drc[0] = readl(mch_window + I82975X_DRC_CH0M0); 524 drc[0] = readl(mch_window + I82975X_DRC_CH0M0);
525 drc[1] = readl(mch_window + I82975X_DRC_CH1M0); 525 drc[1] = readl(mch_window + I82975X_DRC_CH1M0);
526 #ifdef i82975x_DEBUG_IOMEM 526 #ifdef i82975x_DEBUG_IOMEM
527 i82975x_printk(KERN_INFO, "DRC_CH0 = %0x, %s\n", drc[0], 527 i82975x_printk(KERN_INFO, "DRC_CH0 = %0x, %s\n", drc[0],
528 ((drc[0] >> 21) & 3) == 1 ? 528 ((drc[0] >> 21) & 3) == 1 ?
529 "ECC enabled" : "ECC disabled"); 529 "ECC enabled" : "ECC disabled");
530 i82975x_printk(KERN_INFO, "DRC_CH1 = %0x, %s\n", drc[1], 530 i82975x_printk(KERN_INFO, "DRC_CH1 = %0x, %s\n", drc[1],
531 ((drc[1] >> 21) & 3) == 1 ? 531 ((drc[1] >> 21) & 3) == 1 ?
532 "ECC enabled" : "ECC disabled"); 532 "ECC enabled" : "ECC disabled");
533 533
534 i82975x_printk(KERN_INFO, "C0 BNKARC = %0x\n", 534 i82975x_printk(KERN_INFO, "C0 BNKARC = %0x\n",
535 readw(mch_window + I82975X_C0BNKARC)); 535 readw(mch_window + I82975X_C0BNKARC));
536 i82975x_printk(KERN_INFO, "C1 BNKARC = %0x\n", 536 i82975x_printk(KERN_INFO, "C1 BNKARC = %0x\n",
537 readw(mch_window + I82975X_C1BNKARC)); 537 readw(mch_window + I82975X_C1BNKARC));
538 i82975x_print_dram_timings(mch_window); 538 i82975x_print_dram_timings(mch_window);
539 goto fail1; 539 goto fail1;
540 #endif 540 #endif
541 if (!(((drc[0] >> 21) & 3) == 1 || ((drc[1] >> 21) & 3) == 1)) { 541 if (!(((drc[0] >> 21) & 3) == 1 || ((drc[1] >> 21) & 3) == 1)) {
542 i82975x_printk(KERN_INFO, "ECC disabled on both channels.\n"); 542 i82975x_printk(KERN_INFO, "ECC disabled on both channels.\n");
543 goto fail1; 543 goto fail1;
544 } 544 }
545 545
546 chans = dual_channel_active(mch_window) + 1; 546 chans = dual_channel_active(mch_window) + 1;
547 547
548 /* assuming only one controller, index thus is 0 */ 548 /* assuming only one controller, index thus is 0 */
549 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 549 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
550 layers[0].size = I82975X_NR_DIMMS; 550 layers[0].size = I82975X_NR_DIMMS;
551 layers[0].is_virt_csrow = true; 551 layers[0].is_virt_csrow = true;
552 layers[1].type = EDAC_MC_LAYER_CHANNEL; 552 layers[1].type = EDAC_MC_LAYER_CHANNEL;
553 layers[1].size = I82975X_NR_CSROWS(chans); 553 layers[1].size = I82975X_NR_CSROWS(chans);
554 layers[1].is_virt_csrow = false; 554 layers[1].is_virt_csrow = false;
555 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); 555 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
556 if (!mci) { 556 if (!mci) {
557 rc = -ENOMEM; 557 rc = -ENOMEM;
558 goto fail1; 558 goto fail1;
559 } 559 }
560 560
561 debugf3("%s(): init mci\n", __func__); 561 debugf3("%s(): init mci\n", __func__);
562 mci->pdev = &pdev->dev; 562 mci->pdev = &pdev->dev;
563 mci->mtype_cap = MEM_FLAG_DDR2; 563 mci->mtype_cap = MEM_FLAG_DDR2;
564 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 564 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
565 mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 565 mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
566 mci->mod_name = EDAC_MOD_STR; 566 mci->mod_name = EDAC_MOD_STR;
567 mci->mod_ver = I82975X_REVISION; 567 mci->mod_ver = I82975X_REVISION;
568 mci->ctl_name = i82975x_devs[dev_idx].ctl_name; 568 mci->ctl_name = i82975x_devs[dev_idx].ctl_name;
569 mci->dev_name = pci_name(pdev); 569 mci->dev_name = pci_name(pdev);
570 mci->edac_check = i82975x_check; 570 mci->edac_check = i82975x_check;
571 mci->ctl_page_to_phys = NULL; 571 mci->ctl_page_to_phys = NULL;
572 debugf3("%s(): init pvt\n", __func__); 572 debugf3("%s(): init pvt\n", __func__);
573 pvt = (struct i82975x_pvt *) mci->pvt_info; 573 pvt = (struct i82975x_pvt *) mci->pvt_info;
574 pvt->mch_window = mch_window; 574 pvt->mch_window = mch_window;
575 i82975x_init_csrows(mci, pdev, mch_window); 575 i82975x_init_csrows(mci, pdev, mch_window);
576 mci->scrub_mode = SCRUB_HW_SRC; 576 mci->scrub_mode = SCRUB_HW_SRC;
577 i82975x_get_error_info(mci, &discard); /* clear counters */ 577 i82975x_get_error_info(mci, &discard); /* clear counters */
578 578
579 /* finalize this instance of memory controller with edac core */ 579 /* finalize this instance of memory controller with edac core */
580 if (edac_mc_add_mc(mci)) { 580 if (edac_mc_add_mc(mci)) {
581 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 581 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
582 goto fail2; 582 goto fail2;
583 } 583 }
584 584
585 /* get this far and it's successful */ 585 /* get this far and it's successful */
586 debugf3("%s(): success\n", __func__); 586 debugf3("%s(): success\n", __func__);
587 return 0; 587 return 0;
588 588
589 fail2: 589 fail2:
590 edac_mc_free(mci); 590 edac_mc_free(mci);
591 591
592 fail1: 592 fail1:
593 iounmap(mch_window); 593 iounmap(mch_window);
594 fail0: 594 fail0:
595 return rc; 595 return rc;
596 } 596 }
597 597
598 /* returns count (>= 0), or negative on error */ 598 /* returns count (>= 0), or negative on error */
599 static int __devinit i82975x_init_one(struct pci_dev *pdev, 599 static int __devinit i82975x_init_one(struct pci_dev *pdev,
600 const struct pci_device_id *ent) 600 const struct pci_device_id *ent)
601 { 601 {
602 int rc; 602 int rc;
603 603
604 debugf0("%s()\n", __func__); 604 debugf0("%s()\n", __func__);
605 605
606 if (pci_enable_device(pdev) < 0) 606 if (pci_enable_device(pdev) < 0)
607 return -EIO; 607 return -EIO;
608 608
609 rc = i82975x_probe1(pdev, ent->driver_data); 609 rc = i82975x_probe1(pdev, ent->driver_data);
610 610
611 if (mci_pdev == NULL) 611 if (mci_pdev == NULL)
612 mci_pdev = pci_dev_get(pdev); 612 mci_pdev = pci_dev_get(pdev);
613 613
614 return rc; 614 return rc;
615 } 615 }
616 616
617 static void __devexit i82975x_remove_one(struct pci_dev *pdev) 617 static void __devexit i82975x_remove_one(struct pci_dev *pdev)
618 { 618 {
619 struct mem_ctl_info *mci; 619 struct mem_ctl_info *mci;
620 struct i82975x_pvt *pvt; 620 struct i82975x_pvt *pvt;
621 621
622 debugf0("%s()\n", __func__); 622 debugf0("%s()\n", __func__);
623 623
624 mci = edac_mc_del_mc(&pdev->dev); 624 mci = edac_mc_del_mc(&pdev->dev);
625 if (mci == NULL) 625 if (mci == NULL)
626 return; 626 return;
627 627
628 pvt = mci->pvt_info; 628 pvt = mci->pvt_info;
629 if (pvt->mch_window) 629 if (pvt->mch_window)
630 iounmap( pvt->mch_window ); 630 iounmap( pvt->mch_window );
631 631
632 edac_mc_free(mci); 632 edac_mc_free(mci);
633 } 633 }
634 634
635 static DEFINE_PCI_DEVICE_TABLE(i82975x_pci_tbl) = { 635 static DEFINE_PCI_DEVICE_TABLE(i82975x_pci_tbl) = {
636 { 636 {
637 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 637 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
638 I82975X 638 I82975X
639 }, 639 },
640 { 640 {
641 0, 641 0,
642 } /* 0 terminated list. */ 642 } /* 0 terminated list. */
643 }; 643 };
644 644
645 MODULE_DEVICE_TABLE(pci, i82975x_pci_tbl); 645 MODULE_DEVICE_TABLE(pci, i82975x_pci_tbl);
646 646
647 static struct pci_driver i82975x_driver = { 647 static struct pci_driver i82975x_driver = {
648 .name = EDAC_MOD_STR, 648 .name = EDAC_MOD_STR,
649 .probe = i82975x_init_one, 649 .probe = i82975x_init_one,
650 .remove = __devexit_p(i82975x_remove_one), 650 .remove = __devexit_p(i82975x_remove_one),
651 .id_table = i82975x_pci_tbl, 651 .id_table = i82975x_pci_tbl,
652 }; 652 };
653 653
654 static int __init i82975x_init(void) 654 static int __init i82975x_init(void)
655 { 655 {
656 int pci_rc; 656 int pci_rc;
657 657
658 debugf3("%s()\n", __func__); 658 debugf3("%s()\n", __func__);
659 659
660 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 660 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
661 opstate_init(); 661 opstate_init();
662 662
663 pci_rc = pci_register_driver(&i82975x_driver); 663 pci_rc = pci_register_driver(&i82975x_driver);
664 if (pci_rc < 0) 664 if (pci_rc < 0)
665 goto fail0; 665 goto fail0;
666 666
667 if (mci_pdev == NULL) { 667 if (mci_pdev == NULL) {
668 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 668 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
669 PCI_DEVICE_ID_INTEL_82975_0, NULL); 669 PCI_DEVICE_ID_INTEL_82975_0, NULL);
670 670
671 if (!mci_pdev) { 671 if (!mci_pdev) {
672 debugf0("i82975x pci_get_device fail\n"); 672 debugf0("i82975x pci_get_device fail\n");
673 pci_rc = -ENODEV; 673 pci_rc = -ENODEV;
674 goto fail1; 674 goto fail1;
675 } 675 }
676 676
677 pci_rc = i82975x_init_one(mci_pdev, i82975x_pci_tbl); 677 pci_rc = i82975x_init_one(mci_pdev, i82975x_pci_tbl);
678 678
679 if (pci_rc < 0) { 679 if (pci_rc < 0) {
680 debugf0("i82975x init fail\n"); 680 debugf0("i82975x init fail\n");
681 pci_rc = -ENODEV; 681 pci_rc = -ENODEV;
682 goto fail1; 682 goto fail1;
683 } 683 }
684 } 684 }
685 685
686 return 0; 686 return 0;
687 687
688 fail1: 688 fail1:
689 pci_unregister_driver(&i82975x_driver); 689 pci_unregister_driver(&i82975x_driver);
690 690
691 fail0: 691 fail0:
692 if (mci_pdev != NULL) 692 if (mci_pdev != NULL)
693 pci_dev_put(mci_pdev); 693 pci_dev_put(mci_pdev);
694 694
695 return pci_rc; 695 return pci_rc;
696 } 696 }
697 697
698 static void __exit i82975x_exit(void) 698 static void __exit i82975x_exit(void)
699 { 699 {
700 debugf3("%s()\n", __func__); 700 debugf3("%s()\n", __func__);
701 701
702 pci_unregister_driver(&i82975x_driver); 702 pci_unregister_driver(&i82975x_driver);
703 703
704 if (!i82975x_registered) { 704 if (!i82975x_registered) {
705 i82975x_remove_one(mci_pdev); 705 i82975x_remove_one(mci_pdev);
706 pci_dev_put(mci_pdev); 706 pci_dev_put(mci_pdev);
707 } 707 }
708 } 708 }
709 709
710 module_init(i82975x_init); 710 module_init(i82975x_init);
711 module_exit(i82975x_exit); 711 module_exit(i82975x_exit);
712 712
713 MODULE_LICENSE("GPL"); 713 MODULE_LICENSE("GPL");
714 MODULE_AUTHOR("Arvind R. <arvino55@gmail.com>"); 714 MODULE_AUTHOR("Arvind R. <arvino55@gmail.com>");
715 MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers"); 715 MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers");
716 716
717 module_param(edac_op_state, int, 0444); 717 module_param(edac_op_state, int, 0444);
718 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 718 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
719 719
drivers/edac/mpc85xx_edac.c
1 /* 1 /*
2 * Freescale MPC85xx Memory Controller kenel module 2 * Freescale MPC85xx Memory Controller kenel module
3 * 3 *
4 * Author: Dave Jiang <djiang@mvista.com> 4 * Author: Dave Jiang <djiang@mvista.com>
5 * 5 *
6 * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under 6 * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program 7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express 8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied. 9 * or implied.
10 * 10 *
11 */ 11 */
12 #include <linux/module.h> 12 #include <linux/module.h>
13 #include <linux/init.h> 13 #include <linux/init.h>
14 #include <linux/interrupt.h> 14 #include <linux/interrupt.h>
15 #include <linux/ctype.h> 15 #include <linux/ctype.h>
16 #include <linux/io.h> 16 #include <linux/io.h>
17 #include <linux/mod_devicetable.h> 17 #include <linux/mod_devicetable.h>
18 #include <linux/edac.h> 18 #include <linux/edac.h>
19 #include <linux/smp.h> 19 #include <linux/smp.h>
20 #include <linux/gfp.h> 20 #include <linux/gfp.h>
21 21
22 #include <linux/of_platform.h> 22 #include <linux/of_platform.h>
23 #include <linux/of_device.h> 23 #include <linux/of_device.h>
24 #include "edac_module.h" 24 #include "edac_module.h"
25 #include "edac_core.h" 25 #include "edac_core.h"
26 #include "mpc85xx_edac.h" 26 #include "mpc85xx_edac.h"
27 27
28 static int edac_dev_idx; 28 static int edac_dev_idx;
29 #ifdef CONFIG_PCI 29 #ifdef CONFIG_PCI
30 static int edac_pci_idx; 30 static int edac_pci_idx;
31 #endif 31 #endif
32 static int edac_mc_idx; 32 static int edac_mc_idx;
33 33
34 static u32 orig_ddr_err_disable; 34 static u32 orig_ddr_err_disable;
35 static u32 orig_ddr_err_sbe; 35 static u32 orig_ddr_err_sbe;
36 36
37 /* 37 /*
38 * PCI Err defines 38 * PCI Err defines
39 */ 39 */
40 #ifdef CONFIG_PCI 40 #ifdef CONFIG_PCI
41 static u32 orig_pci_err_cap_dr; 41 static u32 orig_pci_err_cap_dr;
42 static u32 orig_pci_err_en; 42 static u32 orig_pci_err_en;
43 #endif 43 #endif
44 44
45 static u32 orig_l2_err_disable; 45 static u32 orig_l2_err_disable;
46 #ifdef CONFIG_FSL_SOC_BOOKE 46 #ifdef CONFIG_FSL_SOC_BOOKE
47 static u32 orig_hid1[2]; 47 static u32 orig_hid1[2];
48 #endif 48 #endif
49 49
50 /************************ MC SYSFS parts ***********************************/ 50 /************************ MC SYSFS parts ***********************************/
51 51
52 #define to_mci(k) container_of(k, struct mem_ctl_info, dev) 52 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
53 53
54 static ssize_t mpc85xx_mc_inject_data_hi_show(struct device *dev, 54 static ssize_t mpc85xx_mc_inject_data_hi_show(struct device *dev,
55 struct device_attribute *mattr, 55 struct device_attribute *mattr,
56 char *data) 56 char *data)
57 { 57 {
58 struct mem_ctl_info *mci = to_mci(dev); 58 struct mem_ctl_info *mci = to_mci(dev);
59 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 59 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
60 return sprintf(data, "0x%08x", 60 return sprintf(data, "0x%08x",
61 in_be32(pdata->mc_vbase + 61 in_be32(pdata->mc_vbase +
62 MPC85XX_MC_DATA_ERR_INJECT_HI)); 62 MPC85XX_MC_DATA_ERR_INJECT_HI));
63 } 63 }
64 64
65 static ssize_t mpc85xx_mc_inject_data_lo_show(struct device *dev, 65 static ssize_t mpc85xx_mc_inject_data_lo_show(struct device *dev,
66 struct device_attribute *mattr, 66 struct device_attribute *mattr,
67 char *data) 67 char *data)
68 { 68 {
69 struct mem_ctl_info *mci = to_mci(dev); 69 struct mem_ctl_info *mci = to_mci(dev);
70 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 70 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
71 return sprintf(data, "0x%08x", 71 return sprintf(data, "0x%08x",
72 in_be32(pdata->mc_vbase + 72 in_be32(pdata->mc_vbase +
73 MPC85XX_MC_DATA_ERR_INJECT_LO)); 73 MPC85XX_MC_DATA_ERR_INJECT_LO));
74 } 74 }
75 75
76 static ssize_t mpc85xx_mc_inject_ctrl_show(struct device *dev, 76 static ssize_t mpc85xx_mc_inject_ctrl_show(struct device *dev,
77 struct device_attribute *mattr, 77 struct device_attribute *mattr,
78 char *data) 78 char *data)
79 { 79 {
80 struct mem_ctl_info *mci = to_mci(dev); 80 struct mem_ctl_info *mci = to_mci(dev);
81 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 81 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
82 return sprintf(data, "0x%08x", 82 return sprintf(data, "0x%08x",
83 in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT)); 83 in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT));
84 } 84 }
85 85
86 static ssize_t mpc85xx_mc_inject_data_hi_store(struct device *dev, 86 static ssize_t mpc85xx_mc_inject_data_hi_store(struct device *dev,
87 struct device_attribute *mattr, 87 struct device_attribute *mattr,
88 const char *data, size_t count) 88 const char *data, size_t count)
89 { 89 {
90 struct mem_ctl_info *mci = to_mci(dev); 90 struct mem_ctl_info *mci = to_mci(dev);
91 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 91 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
92 if (isdigit(*data)) { 92 if (isdigit(*data)) {
93 out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI, 93 out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI,
94 simple_strtoul(data, NULL, 0)); 94 simple_strtoul(data, NULL, 0));
95 return count; 95 return count;
96 } 96 }
97 return 0; 97 return 0;
98 } 98 }
99 99
100 static ssize_t mpc85xx_mc_inject_data_lo_store(struct device *dev, 100 static ssize_t mpc85xx_mc_inject_data_lo_store(struct device *dev,
101 struct device_attribute *mattr, 101 struct device_attribute *mattr,
102 const char *data, size_t count) 102 const char *data, size_t count)
103 { 103 {
104 struct mem_ctl_info *mci = to_mci(dev); 104 struct mem_ctl_info *mci = to_mci(dev);
105 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 105 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
106 if (isdigit(*data)) { 106 if (isdigit(*data)) {
107 out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO, 107 out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO,
108 simple_strtoul(data, NULL, 0)); 108 simple_strtoul(data, NULL, 0));
109 return count; 109 return count;
110 } 110 }
111 return 0; 111 return 0;
112 } 112 }
113 113
114 static ssize_t mpc85xx_mc_inject_ctrl_store(struct device *dev, 114 static ssize_t mpc85xx_mc_inject_ctrl_store(struct device *dev,
115 struct device_attribute *mattr, 115 struct device_attribute *mattr,
116 const char *data, size_t count) 116 const char *data, size_t count)
117 { 117 {
118 struct mem_ctl_info *mci = to_mci(dev); 118 struct mem_ctl_info *mci = to_mci(dev);
119 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 119 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
120 if (isdigit(*data)) { 120 if (isdigit(*data)) {
121 out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT, 121 out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT,
122 simple_strtoul(data, NULL, 0)); 122 simple_strtoul(data, NULL, 0));
123 return count; 123 return count;
124 } 124 }
125 return 0; 125 return 0;
126 } 126 }
127 127
128 DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR, 128 DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
129 mpc85xx_mc_inject_data_hi_show, mpc85xx_mc_inject_data_hi_store); 129 mpc85xx_mc_inject_data_hi_show, mpc85xx_mc_inject_data_hi_store);
130 DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR, 130 DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
131 mpc85xx_mc_inject_data_lo_show, mpc85xx_mc_inject_data_lo_store); 131 mpc85xx_mc_inject_data_lo_show, mpc85xx_mc_inject_data_lo_store);
132 DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR, 132 DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
133 mpc85xx_mc_inject_ctrl_show, mpc85xx_mc_inject_ctrl_store); 133 mpc85xx_mc_inject_ctrl_show, mpc85xx_mc_inject_ctrl_store);
134 134
135 static int mpc85xx_create_sysfs_attributes(struct mem_ctl_info *mci) 135 static int mpc85xx_create_sysfs_attributes(struct mem_ctl_info *mci)
136 { 136 {
137 int rc; 137 int rc;
138 138
139 rc = device_create_file(&mci->dev, &dev_attr_inject_data_hi); 139 rc = device_create_file(&mci->dev, &dev_attr_inject_data_hi);
140 if (rc < 0) 140 if (rc < 0)
141 return rc; 141 return rc;
142 rc = device_create_file(&mci->dev, &dev_attr_inject_data_lo); 142 rc = device_create_file(&mci->dev, &dev_attr_inject_data_lo);
143 if (rc < 0) 143 if (rc < 0)
144 return rc; 144 return rc;
145 rc = device_create_file(&mci->dev, &dev_attr_inject_ctrl); 145 rc = device_create_file(&mci->dev, &dev_attr_inject_ctrl);
146 if (rc < 0) 146 if (rc < 0)
147 return rc; 147 return rc;
148 148
149 return 0; 149 return 0;
150 } 150 }
151 151
152 static void mpc85xx_remove_sysfs_attributes(struct mem_ctl_info *mci) 152 static void mpc85xx_remove_sysfs_attributes(struct mem_ctl_info *mci)
153 { 153 {
154 device_remove_file(&mci->dev, &dev_attr_inject_data_hi); 154 device_remove_file(&mci->dev, &dev_attr_inject_data_hi);
155 device_remove_file(&mci->dev, &dev_attr_inject_data_lo); 155 device_remove_file(&mci->dev, &dev_attr_inject_data_lo);
156 device_remove_file(&mci->dev, &dev_attr_inject_ctrl); 156 device_remove_file(&mci->dev, &dev_attr_inject_ctrl);
157 } 157 }
158 158
159 /**************************** PCI Err device ***************************/ 159 /**************************** PCI Err device ***************************/
160 #ifdef CONFIG_PCI 160 #ifdef CONFIG_PCI
161 161
162 static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci) 162 static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci)
163 { 163 {
164 struct mpc85xx_pci_pdata *pdata = pci->pvt_info; 164 struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
165 u32 err_detect; 165 u32 err_detect;
166 166
167 err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR); 167 err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
168 168
169 /* master aborts can happen during PCI config cycles */ 169 /* master aborts can happen during PCI config cycles */
170 if (!(err_detect & ~(PCI_EDE_MULTI_ERR | PCI_EDE_MST_ABRT))) { 170 if (!(err_detect & ~(PCI_EDE_MULTI_ERR | PCI_EDE_MST_ABRT))) {
171 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect); 171 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
172 return; 172 return;
173 } 173 }
174 174
175 printk(KERN_ERR "PCI error(s) detected\n"); 175 printk(KERN_ERR "PCI error(s) detected\n");
176 printk(KERN_ERR "PCI/X ERR_DR register: %#08x\n", err_detect); 176 printk(KERN_ERR "PCI/X ERR_DR register: %#08x\n", err_detect);
177 177
178 printk(KERN_ERR "PCI/X ERR_ATTRIB register: %#08x\n", 178 printk(KERN_ERR "PCI/X ERR_ATTRIB register: %#08x\n",
179 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ATTRIB)); 179 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ATTRIB));
180 printk(KERN_ERR "PCI/X ERR_ADDR register: %#08x\n", 180 printk(KERN_ERR "PCI/X ERR_ADDR register: %#08x\n",
181 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR)); 181 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR));
182 printk(KERN_ERR "PCI/X ERR_EXT_ADDR register: %#08x\n", 182 printk(KERN_ERR "PCI/X ERR_EXT_ADDR register: %#08x\n",
183 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EXT_ADDR)); 183 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EXT_ADDR));
184 printk(KERN_ERR "PCI/X ERR_DL register: %#08x\n", 184 printk(KERN_ERR "PCI/X ERR_DL register: %#08x\n",
185 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DL)); 185 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DL));
186 printk(KERN_ERR "PCI/X ERR_DH register: %#08x\n", 186 printk(KERN_ERR "PCI/X ERR_DH register: %#08x\n",
187 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DH)); 187 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DH));
188 188
189 /* clear error bits */ 189 /* clear error bits */
190 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect); 190 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
191 191
192 if (err_detect & PCI_EDE_PERR_MASK) 192 if (err_detect & PCI_EDE_PERR_MASK)
193 edac_pci_handle_pe(pci, pci->ctl_name); 193 edac_pci_handle_pe(pci, pci->ctl_name);
194 194
195 if ((err_detect & ~PCI_EDE_MULTI_ERR) & ~PCI_EDE_PERR_MASK) 195 if ((err_detect & ~PCI_EDE_MULTI_ERR) & ~PCI_EDE_PERR_MASK)
196 edac_pci_handle_npe(pci, pci->ctl_name); 196 edac_pci_handle_npe(pci, pci->ctl_name);
197 } 197 }
198 198
199 static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id) 199 static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
200 { 200 {
201 struct edac_pci_ctl_info *pci = dev_id; 201 struct edac_pci_ctl_info *pci = dev_id;
202 struct mpc85xx_pci_pdata *pdata = pci->pvt_info; 202 struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
203 u32 err_detect; 203 u32 err_detect;
204 204
205 err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR); 205 err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
206 206
207 if (!err_detect) 207 if (!err_detect)
208 return IRQ_NONE; 208 return IRQ_NONE;
209 209
210 mpc85xx_pci_check(pci); 210 mpc85xx_pci_check(pci);
211 211
212 return IRQ_HANDLED; 212 return IRQ_HANDLED;
213 } 213 }
214 214
215 static int __devinit mpc85xx_pci_err_probe(struct platform_device *op) 215 static int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
216 { 216 {
217 struct edac_pci_ctl_info *pci; 217 struct edac_pci_ctl_info *pci;
218 struct mpc85xx_pci_pdata *pdata; 218 struct mpc85xx_pci_pdata *pdata;
219 struct resource r; 219 struct resource r;
220 int res = 0; 220 int res = 0;
221 221
222 if (!devres_open_group(&op->dev, mpc85xx_pci_err_probe, GFP_KERNEL)) 222 if (!devres_open_group(&op->dev, mpc85xx_pci_err_probe, GFP_KERNEL))
223 return -ENOMEM; 223 return -ENOMEM;
224 224
225 pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err"); 225 pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err");
226 if (!pci) 226 if (!pci)
227 return -ENOMEM; 227 return -ENOMEM;
228 228
229 pdata = pci->pvt_info; 229 pdata = pci->pvt_info;
230 pdata->name = "mpc85xx_pci_err"; 230 pdata->name = "mpc85xx_pci_err";
231 pdata->irq = NO_IRQ; 231 pdata->irq = NO_IRQ;
232 dev_set_drvdata(&op->dev, pci); 232 dev_set_drvdata(&op->dev, pci);
233 pci->dev = &op->dev; 233 pci->dev = &op->dev;
234 pci->mod_name = EDAC_MOD_STR; 234 pci->mod_name = EDAC_MOD_STR;
235 pci->ctl_name = pdata->name; 235 pci->ctl_name = pdata->name;
236 pci->dev_name = dev_name(&op->dev); 236 pci->dev_name = dev_name(&op->dev);
237 237
238 if (edac_op_state == EDAC_OPSTATE_POLL) 238 if (edac_op_state == EDAC_OPSTATE_POLL)
239 pci->edac_check = mpc85xx_pci_check; 239 pci->edac_check = mpc85xx_pci_check;
240 240
241 pdata->edac_idx = edac_pci_idx++; 241 pdata->edac_idx = edac_pci_idx++;
242 242
243 res = of_address_to_resource(op->dev.of_node, 0, &r); 243 res = of_address_to_resource(op->dev.of_node, 0, &r);
244 if (res) { 244 if (res) {
245 printk(KERN_ERR "%s: Unable to get resource for " 245 printk(KERN_ERR "%s: Unable to get resource for "
246 "PCI err regs\n", __func__); 246 "PCI err regs\n", __func__);
247 goto err; 247 goto err;
248 } 248 }
249 249
250 /* we only need the error registers */ 250 /* we only need the error registers */
251 r.start += 0xe00; 251 r.start += 0xe00;
252 252
253 if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r), 253 if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
254 pdata->name)) { 254 pdata->name)) {
255 printk(KERN_ERR "%s: Error while requesting mem region\n", 255 printk(KERN_ERR "%s: Error while requesting mem region\n",
256 __func__); 256 __func__);
257 res = -EBUSY; 257 res = -EBUSY;
258 goto err; 258 goto err;
259 } 259 }
260 260
261 pdata->pci_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r)); 261 pdata->pci_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
262 if (!pdata->pci_vbase) { 262 if (!pdata->pci_vbase) {
263 printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__); 263 printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
264 res = -ENOMEM; 264 res = -ENOMEM;
265 goto err; 265 goto err;
266 } 266 }
267 267
268 orig_pci_err_cap_dr = 268 orig_pci_err_cap_dr =
269 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR); 269 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);
270 270
271 /* PCI master abort is expected during config cycles */ 271 /* PCI master abort is expected during config cycles */
272 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40); 272 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);
273 273
274 orig_pci_err_en = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN); 274 orig_pci_err_en = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
275 275
276 /* disable master abort reporting */ 276 /* disable master abort reporting */
277 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40); 277 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);
278 278
279 /* clear error bits */ 279 /* clear error bits */
280 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0); 280 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
281 281
282 if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { 282 if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
283 debugf3("%s(): failed edac_pci_add_device()\n", __func__); 283 debugf3("%s(): failed edac_pci_add_device()\n", __func__);
284 goto err; 284 goto err;
285 } 285 }
286 286
287 if (edac_op_state == EDAC_OPSTATE_INT) { 287 if (edac_op_state == EDAC_OPSTATE_INT) {
288 pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0); 288 pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
289 res = devm_request_irq(&op->dev, pdata->irq, 289 res = devm_request_irq(&op->dev, pdata->irq,
290 mpc85xx_pci_isr, IRQF_DISABLED, 290 mpc85xx_pci_isr, IRQF_DISABLED,
291 "[EDAC] PCI err", pci); 291 "[EDAC] PCI err", pci);
292 if (res < 0) { 292 if (res < 0) {
293 printk(KERN_ERR 293 printk(KERN_ERR
294 "%s: Unable to requiest irq %d for " 294 "%s: Unable to requiest irq %d for "
295 "MPC85xx PCI err\n", __func__, pdata->irq); 295 "MPC85xx PCI err\n", __func__, pdata->irq);
296 irq_dispose_mapping(pdata->irq); 296 irq_dispose_mapping(pdata->irq);
297 res = -ENODEV; 297 res = -ENODEV;
298 goto err2; 298 goto err2;
299 } 299 }
300 300
301 printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n", 301 printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
302 pdata->irq); 302 pdata->irq);
303 } 303 }
304 304
305 devres_remove_group(&op->dev, mpc85xx_pci_err_probe); 305 devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
306 debugf3("%s(): success\n", __func__); 306 debugf3("%s(): success\n", __func__);
307 printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n"); 307 printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
308 308
309 return 0; 309 return 0;
310 310
311 err2: 311 err2:
312 edac_pci_del_device(&op->dev); 312 edac_pci_del_device(&op->dev);
313 err: 313 err:
314 edac_pci_free_ctl_info(pci); 314 edac_pci_free_ctl_info(pci);
315 devres_release_group(&op->dev, mpc85xx_pci_err_probe); 315 devres_release_group(&op->dev, mpc85xx_pci_err_probe);
316 return res; 316 return res;
317 } 317 }
318 318
319 static int mpc85xx_pci_err_remove(struct platform_device *op) 319 static int mpc85xx_pci_err_remove(struct platform_device *op)
320 { 320 {
321 struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev); 321 struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
322 struct mpc85xx_pci_pdata *pdata = pci->pvt_info; 322 struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
323 323
324 debugf0("%s()\n", __func__); 324 debugf0("%s()\n", __func__);
325 325
326 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 326 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR,
327 orig_pci_err_cap_dr); 327 orig_pci_err_cap_dr);
328 328
329 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en); 329 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en);
330 330
331 edac_pci_del_device(pci->dev); 331 edac_pci_del_device(pci->dev);
332 332
333 if (edac_op_state == EDAC_OPSTATE_INT) 333 if (edac_op_state == EDAC_OPSTATE_INT)
334 irq_dispose_mapping(pdata->irq); 334 irq_dispose_mapping(pdata->irq);
335 335
336 edac_pci_free_ctl_info(pci); 336 edac_pci_free_ctl_info(pci);
337 337
338 return 0; 338 return 0;
339 } 339 }
340 340
341 static struct of_device_id mpc85xx_pci_err_of_match[] = { 341 static struct of_device_id mpc85xx_pci_err_of_match[] = {
342 { 342 {
343 .compatible = "fsl,mpc8540-pcix", 343 .compatible = "fsl,mpc8540-pcix",
344 }, 344 },
345 { 345 {
346 .compatible = "fsl,mpc8540-pci", 346 .compatible = "fsl,mpc8540-pci",
347 }, 347 },
348 {}, 348 {},
349 }; 349 };
350 MODULE_DEVICE_TABLE(of, mpc85xx_pci_err_of_match); 350 MODULE_DEVICE_TABLE(of, mpc85xx_pci_err_of_match);
351 351
352 static struct platform_driver mpc85xx_pci_err_driver = { 352 static struct platform_driver mpc85xx_pci_err_driver = {
353 .probe = mpc85xx_pci_err_probe, 353 .probe = mpc85xx_pci_err_probe,
354 .remove = __devexit_p(mpc85xx_pci_err_remove), 354 .remove = __devexit_p(mpc85xx_pci_err_remove),
355 .driver = { 355 .driver = {
356 .name = "mpc85xx_pci_err", 356 .name = "mpc85xx_pci_err",
357 .owner = THIS_MODULE, 357 .owner = THIS_MODULE,
358 .of_match_table = mpc85xx_pci_err_of_match, 358 .of_match_table = mpc85xx_pci_err_of_match,
359 }, 359 },
360 }; 360 };
361 361
362 #endif /* CONFIG_PCI */ 362 #endif /* CONFIG_PCI */
363 363
364 /**************************** L2 Err device ***************************/ 364 /**************************** L2 Err device ***************************/
365 365
366 /************************ L2 SYSFS parts ***********************************/ 366 /************************ L2 SYSFS parts ***********************************/
367 367
368 static ssize_t mpc85xx_l2_inject_data_hi_show(struct edac_device_ctl_info 368 static ssize_t mpc85xx_l2_inject_data_hi_show(struct edac_device_ctl_info
369 *edac_dev, char *data) 369 *edac_dev, char *data)
370 { 370 {
371 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 371 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
372 return sprintf(data, "0x%08x", 372 return sprintf(data, "0x%08x",
373 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI)); 373 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI));
374 } 374 }
375 375
376 static ssize_t mpc85xx_l2_inject_data_lo_show(struct edac_device_ctl_info 376 static ssize_t mpc85xx_l2_inject_data_lo_show(struct edac_device_ctl_info
377 *edac_dev, char *data) 377 *edac_dev, char *data)
378 { 378 {
379 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 379 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
380 return sprintf(data, "0x%08x", 380 return sprintf(data, "0x%08x",
381 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO)); 381 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO));
382 } 382 }
383 383
384 static ssize_t mpc85xx_l2_inject_ctrl_show(struct edac_device_ctl_info 384 static ssize_t mpc85xx_l2_inject_ctrl_show(struct edac_device_ctl_info
385 *edac_dev, char *data) 385 *edac_dev, char *data)
386 { 386 {
387 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 387 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
388 return sprintf(data, "0x%08x", 388 return sprintf(data, "0x%08x",
389 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL)); 389 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL));
390 } 390 }
391 391
392 static ssize_t mpc85xx_l2_inject_data_hi_store(struct edac_device_ctl_info 392 static ssize_t mpc85xx_l2_inject_data_hi_store(struct edac_device_ctl_info
393 *edac_dev, const char *data, 393 *edac_dev, const char *data,
394 size_t count) 394 size_t count)
395 { 395 {
396 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 396 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
397 if (isdigit(*data)) { 397 if (isdigit(*data)) {
398 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI, 398 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI,
399 simple_strtoul(data, NULL, 0)); 399 simple_strtoul(data, NULL, 0));
400 return count; 400 return count;
401 } 401 }
402 return 0; 402 return 0;
403 } 403 }
404 404
405 static ssize_t mpc85xx_l2_inject_data_lo_store(struct edac_device_ctl_info 405 static ssize_t mpc85xx_l2_inject_data_lo_store(struct edac_device_ctl_info
406 *edac_dev, const char *data, 406 *edac_dev, const char *data,
407 size_t count) 407 size_t count)
408 { 408 {
409 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 409 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
410 if (isdigit(*data)) { 410 if (isdigit(*data)) {
411 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO, 411 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO,
412 simple_strtoul(data, NULL, 0)); 412 simple_strtoul(data, NULL, 0));
413 return count; 413 return count;
414 } 414 }
415 return 0; 415 return 0;
416 } 416 }
417 417
418 static ssize_t mpc85xx_l2_inject_ctrl_store(struct edac_device_ctl_info 418 static ssize_t mpc85xx_l2_inject_ctrl_store(struct edac_device_ctl_info
419 *edac_dev, const char *data, 419 *edac_dev, const char *data,
420 size_t count) 420 size_t count)
421 { 421 {
422 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 422 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
423 if (isdigit(*data)) { 423 if (isdigit(*data)) {
424 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL, 424 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL,
425 simple_strtoul(data, NULL, 0)); 425 simple_strtoul(data, NULL, 0));
426 return count; 426 return count;
427 } 427 }
428 return 0; 428 return 0;
429 } 429 }
430 430
431 static struct edac_dev_sysfs_attribute mpc85xx_l2_sysfs_attributes[] = { 431 static struct edac_dev_sysfs_attribute mpc85xx_l2_sysfs_attributes[] = {
432 { 432 {
433 .attr = { 433 .attr = {
434 .name = "inject_data_hi", 434 .name = "inject_data_hi",
435 .mode = (S_IRUGO | S_IWUSR) 435 .mode = (S_IRUGO | S_IWUSR)
436 }, 436 },
437 .show = mpc85xx_l2_inject_data_hi_show, 437 .show = mpc85xx_l2_inject_data_hi_show,
438 .store = mpc85xx_l2_inject_data_hi_store}, 438 .store = mpc85xx_l2_inject_data_hi_store},
439 { 439 {
440 .attr = { 440 .attr = {
441 .name = "inject_data_lo", 441 .name = "inject_data_lo",
442 .mode = (S_IRUGO | S_IWUSR) 442 .mode = (S_IRUGO | S_IWUSR)
443 }, 443 },
444 .show = mpc85xx_l2_inject_data_lo_show, 444 .show = mpc85xx_l2_inject_data_lo_show,
445 .store = mpc85xx_l2_inject_data_lo_store}, 445 .store = mpc85xx_l2_inject_data_lo_store},
446 { 446 {
447 .attr = { 447 .attr = {
448 .name = "inject_ctrl", 448 .name = "inject_ctrl",
449 .mode = (S_IRUGO | S_IWUSR) 449 .mode = (S_IRUGO | S_IWUSR)
450 }, 450 },
451 .show = mpc85xx_l2_inject_ctrl_show, 451 .show = mpc85xx_l2_inject_ctrl_show,
452 .store = mpc85xx_l2_inject_ctrl_store}, 452 .store = mpc85xx_l2_inject_ctrl_store},
453 453
454 /* End of list */ 454 /* End of list */
455 { 455 {
456 .attr = {.name = NULL} 456 .attr = {.name = NULL}
457 } 457 }
458 }; 458 };
459 459
460 static void mpc85xx_set_l2_sysfs_attributes(struct edac_device_ctl_info 460 static void mpc85xx_set_l2_sysfs_attributes(struct edac_device_ctl_info
461 *edac_dev) 461 *edac_dev)
462 { 462 {
463 edac_dev->sysfs_attributes = mpc85xx_l2_sysfs_attributes; 463 edac_dev->sysfs_attributes = mpc85xx_l2_sysfs_attributes;
464 } 464 }
465 465
466 /***************************** L2 ops ***********************************/ 466 /***************************** L2 ops ***********************************/
467 467
468 static void mpc85xx_l2_check(struct edac_device_ctl_info *edac_dev) 468 static void mpc85xx_l2_check(struct edac_device_ctl_info *edac_dev)
469 { 469 {
470 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 470 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
471 u32 err_detect; 471 u32 err_detect;
472 472
473 err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET); 473 err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
474 474
475 if (!(err_detect & L2_EDE_MASK)) 475 if (!(err_detect & L2_EDE_MASK))
476 return; 476 return;
477 477
478 printk(KERN_ERR "ECC Error in CPU L2 cache\n"); 478 printk(KERN_ERR "ECC Error in CPU L2 cache\n");
479 printk(KERN_ERR "L2 Error Detect Register: 0x%08x\n", err_detect); 479 printk(KERN_ERR "L2 Error Detect Register: 0x%08x\n", err_detect);
480 printk(KERN_ERR "L2 Error Capture Data High Register: 0x%08x\n", 480 printk(KERN_ERR "L2 Error Capture Data High Register: 0x%08x\n",
481 in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATAHI)); 481 in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATAHI));
482 printk(KERN_ERR "L2 Error Capture Data Lo Register: 0x%08x\n", 482 printk(KERN_ERR "L2 Error Capture Data Lo Register: 0x%08x\n",
483 in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATALO)); 483 in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATALO));
484 printk(KERN_ERR "L2 Error Syndrome Register: 0x%08x\n", 484 printk(KERN_ERR "L2 Error Syndrome Register: 0x%08x\n",
485 in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTECC)); 485 in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTECC));
486 printk(KERN_ERR "L2 Error Attributes Capture Register: 0x%08x\n", 486 printk(KERN_ERR "L2 Error Attributes Capture Register: 0x%08x\n",
487 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRATTR)); 487 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRATTR));
488 printk(KERN_ERR "L2 Error Address Capture Register: 0x%08x\n", 488 printk(KERN_ERR "L2 Error Address Capture Register: 0x%08x\n",
489 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRADDR)); 489 in_be32(pdata->l2_vbase + MPC85XX_L2_ERRADDR));
490 490
491 /* clear error detect register */ 491 /* clear error detect register */
492 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, err_detect); 492 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, err_detect);
493 493
494 if (err_detect & L2_EDE_CE_MASK) 494 if (err_detect & L2_EDE_CE_MASK)
495 edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name); 495 edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
496 496
497 if (err_detect & L2_EDE_UE_MASK) 497 if (err_detect & L2_EDE_UE_MASK)
498 edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); 498 edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
499 } 499 }
500 500
501 static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id) 501 static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id)
502 { 502 {
503 struct edac_device_ctl_info *edac_dev = dev_id; 503 struct edac_device_ctl_info *edac_dev = dev_id;
504 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 504 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
505 u32 err_detect; 505 u32 err_detect;
506 506
507 err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET); 507 err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
508 508
509 if (!(err_detect & L2_EDE_MASK)) 509 if (!(err_detect & L2_EDE_MASK))
510 return IRQ_NONE; 510 return IRQ_NONE;
511 511
512 mpc85xx_l2_check(edac_dev); 512 mpc85xx_l2_check(edac_dev);
513 513
514 return IRQ_HANDLED; 514 return IRQ_HANDLED;
515 } 515 }
516 516
517 static int __devinit mpc85xx_l2_err_probe(struct platform_device *op) 517 static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
518 { 518 {
519 struct edac_device_ctl_info *edac_dev; 519 struct edac_device_ctl_info *edac_dev;
520 struct mpc85xx_l2_pdata *pdata; 520 struct mpc85xx_l2_pdata *pdata;
521 struct resource r; 521 struct resource r;
522 int res; 522 int res;
523 523
524 if (!devres_open_group(&op->dev, mpc85xx_l2_err_probe, GFP_KERNEL)) 524 if (!devres_open_group(&op->dev, mpc85xx_l2_err_probe, GFP_KERNEL))
525 return -ENOMEM; 525 return -ENOMEM;
526 526
527 edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata), 527 edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
528 "cpu", 1, "L", 1, 2, NULL, 0, 528 "cpu", 1, "L", 1, 2, NULL, 0,
529 edac_dev_idx); 529 edac_dev_idx);
530 if (!edac_dev) { 530 if (!edac_dev) {
531 devres_release_group(&op->dev, mpc85xx_l2_err_probe); 531 devres_release_group(&op->dev, mpc85xx_l2_err_probe);
532 return -ENOMEM; 532 return -ENOMEM;
533 } 533 }
534 534
535 pdata = edac_dev->pvt_info; 535 pdata = edac_dev->pvt_info;
536 pdata->name = "mpc85xx_l2_err"; 536 pdata->name = "mpc85xx_l2_err";
537 pdata->irq = NO_IRQ; 537 pdata->irq = NO_IRQ;
538 edac_dev->dev = &op->dev; 538 edac_dev->dev = &op->dev;
539 dev_set_drvdata(edac_dev->dev, edac_dev); 539 dev_set_drvdata(edac_dev->dev, edac_dev);
540 edac_dev->ctl_name = pdata->name; 540 edac_dev->ctl_name = pdata->name;
541 edac_dev->dev_name = pdata->name; 541 edac_dev->dev_name = pdata->name;
542 542
543 res = of_address_to_resource(op->dev.of_node, 0, &r); 543 res = of_address_to_resource(op->dev.of_node, 0, &r);
544 if (res) { 544 if (res) {
545 printk(KERN_ERR "%s: Unable to get resource for " 545 printk(KERN_ERR "%s: Unable to get resource for "
546 "L2 err regs\n", __func__); 546 "L2 err regs\n", __func__);
547 goto err; 547 goto err;
548 } 548 }
549 549
550 /* we only need the error registers */ 550 /* we only need the error registers */
551 r.start += 0xe00; 551 r.start += 0xe00;
552 552
553 if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r), 553 if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
554 pdata->name)) { 554 pdata->name)) {
555 printk(KERN_ERR "%s: Error while requesting mem region\n", 555 printk(KERN_ERR "%s: Error while requesting mem region\n",
556 __func__); 556 __func__);
557 res = -EBUSY; 557 res = -EBUSY;
558 goto err; 558 goto err;
559 } 559 }
560 560
561 pdata->l2_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r)); 561 pdata->l2_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
562 if (!pdata->l2_vbase) { 562 if (!pdata->l2_vbase) {
563 printk(KERN_ERR "%s: Unable to setup L2 err regs\n", __func__); 563 printk(KERN_ERR "%s: Unable to setup L2 err regs\n", __func__);
564 res = -ENOMEM; 564 res = -ENOMEM;
565 goto err; 565 goto err;
566 } 566 }
567 567
568 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, ~0); 568 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, ~0);
569 569
570 orig_l2_err_disable = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS); 570 orig_l2_err_disable = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS);
571 571
572 /* clear the err_dis */ 572 /* clear the err_dis */
573 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, 0); 573 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, 0);
574 574
575 edac_dev->mod_name = EDAC_MOD_STR; 575 edac_dev->mod_name = EDAC_MOD_STR;
576 576
577 if (edac_op_state == EDAC_OPSTATE_POLL) 577 if (edac_op_state == EDAC_OPSTATE_POLL)
578 edac_dev->edac_check = mpc85xx_l2_check; 578 edac_dev->edac_check = mpc85xx_l2_check;
579 579
580 mpc85xx_set_l2_sysfs_attributes(edac_dev); 580 mpc85xx_set_l2_sysfs_attributes(edac_dev);
581 581
582 pdata->edac_idx = edac_dev_idx++; 582 pdata->edac_idx = edac_dev_idx++;
583 583
584 if (edac_device_add_device(edac_dev) > 0) { 584 if (edac_device_add_device(edac_dev) > 0) {
585 debugf3("%s(): failed edac_device_add_device()\n", __func__); 585 debugf3("%s(): failed edac_device_add_device()\n", __func__);
586 goto err; 586 goto err;
587 } 587 }
588 588
589 if (edac_op_state == EDAC_OPSTATE_INT) { 589 if (edac_op_state == EDAC_OPSTATE_INT) {
590 pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0); 590 pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
591 res = devm_request_irq(&op->dev, pdata->irq, 591 res = devm_request_irq(&op->dev, pdata->irq,
592 mpc85xx_l2_isr, IRQF_DISABLED, 592 mpc85xx_l2_isr, IRQF_DISABLED,
593 "[EDAC] L2 err", edac_dev); 593 "[EDAC] L2 err", edac_dev);
594 if (res < 0) { 594 if (res < 0) {
595 printk(KERN_ERR 595 printk(KERN_ERR
596 "%s: Unable to requiest irq %d for " 596 "%s: Unable to requiest irq %d for "
597 "MPC85xx L2 err\n", __func__, pdata->irq); 597 "MPC85xx L2 err\n", __func__, pdata->irq);
598 irq_dispose_mapping(pdata->irq); 598 irq_dispose_mapping(pdata->irq);
599 res = -ENODEV; 599 res = -ENODEV;
600 goto err2; 600 goto err2;
601 } 601 }
602 602
603 printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for L2 Err\n", 603 printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for L2 Err\n",
604 pdata->irq); 604 pdata->irq);
605 605
606 edac_dev->op_state = OP_RUNNING_INTERRUPT; 606 edac_dev->op_state = OP_RUNNING_INTERRUPT;
607 607
608 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, L2_EIE_MASK); 608 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, L2_EIE_MASK);
609 } 609 }
610 610
611 devres_remove_group(&op->dev, mpc85xx_l2_err_probe); 611 devres_remove_group(&op->dev, mpc85xx_l2_err_probe);
612 612
613 debugf3("%s(): success\n", __func__); 613 debugf3("%s(): success\n", __func__);
614 printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n"); 614 printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n");
615 615
616 return 0; 616 return 0;
617 617
618 err2: 618 err2:
619 edac_device_del_device(&op->dev); 619 edac_device_del_device(&op->dev);
620 err: 620 err:
621 devres_release_group(&op->dev, mpc85xx_l2_err_probe); 621 devres_release_group(&op->dev, mpc85xx_l2_err_probe);
622 edac_device_free_ctl_info(edac_dev); 622 edac_device_free_ctl_info(edac_dev);
623 return res; 623 return res;
624 } 624 }
625 625
626 static int mpc85xx_l2_err_remove(struct platform_device *op) 626 static int mpc85xx_l2_err_remove(struct platform_device *op)
627 { 627 {
628 struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev); 628 struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
629 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; 629 struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
630 630
631 debugf0("%s()\n", __func__); 631 debugf0("%s()\n", __func__);
632 632
633 if (edac_op_state == EDAC_OPSTATE_INT) { 633 if (edac_op_state == EDAC_OPSTATE_INT) {
634 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0); 634 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0);
635 irq_dispose_mapping(pdata->irq); 635 irq_dispose_mapping(pdata->irq);
636 } 636 }
637 637
638 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable); 638 out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable);
639 edac_device_del_device(&op->dev); 639 edac_device_del_device(&op->dev);
640 edac_device_free_ctl_info(edac_dev); 640 edac_device_free_ctl_info(edac_dev);
641 return 0; 641 return 0;
642 } 642 }
643 643
644 static struct of_device_id mpc85xx_l2_err_of_match[] = { 644 static struct of_device_id mpc85xx_l2_err_of_match[] = {
645 /* deprecate the fsl,85.. forms in the future, 2.6.30? */ 645 /* deprecate the fsl,85.. forms in the future, 2.6.30? */
646 { .compatible = "fsl,8540-l2-cache-controller", }, 646 { .compatible = "fsl,8540-l2-cache-controller", },
647 { .compatible = "fsl,8541-l2-cache-controller", }, 647 { .compatible = "fsl,8541-l2-cache-controller", },
648 { .compatible = "fsl,8544-l2-cache-controller", }, 648 { .compatible = "fsl,8544-l2-cache-controller", },
649 { .compatible = "fsl,8548-l2-cache-controller", }, 649 { .compatible = "fsl,8548-l2-cache-controller", },
650 { .compatible = "fsl,8555-l2-cache-controller", }, 650 { .compatible = "fsl,8555-l2-cache-controller", },
651 { .compatible = "fsl,8568-l2-cache-controller", }, 651 { .compatible = "fsl,8568-l2-cache-controller", },
652 { .compatible = "fsl,mpc8536-l2-cache-controller", }, 652 { .compatible = "fsl,mpc8536-l2-cache-controller", },
653 { .compatible = "fsl,mpc8540-l2-cache-controller", }, 653 { .compatible = "fsl,mpc8540-l2-cache-controller", },
654 { .compatible = "fsl,mpc8541-l2-cache-controller", }, 654 { .compatible = "fsl,mpc8541-l2-cache-controller", },
655 { .compatible = "fsl,mpc8544-l2-cache-controller", }, 655 { .compatible = "fsl,mpc8544-l2-cache-controller", },
656 { .compatible = "fsl,mpc8548-l2-cache-controller", }, 656 { .compatible = "fsl,mpc8548-l2-cache-controller", },
657 { .compatible = "fsl,mpc8555-l2-cache-controller", }, 657 { .compatible = "fsl,mpc8555-l2-cache-controller", },
658 { .compatible = "fsl,mpc8560-l2-cache-controller", }, 658 { .compatible = "fsl,mpc8560-l2-cache-controller", },
659 { .compatible = "fsl,mpc8568-l2-cache-controller", }, 659 { .compatible = "fsl,mpc8568-l2-cache-controller", },
660 { .compatible = "fsl,mpc8569-l2-cache-controller", }, 660 { .compatible = "fsl,mpc8569-l2-cache-controller", },
661 { .compatible = "fsl,mpc8572-l2-cache-controller", }, 661 { .compatible = "fsl,mpc8572-l2-cache-controller", },
662 { .compatible = "fsl,p1020-l2-cache-controller", }, 662 { .compatible = "fsl,p1020-l2-cache-controller", },
663 { .compatible = "fsl,p1021-l2-cache-controller", }, 663 { .compatible = "fsl,p1021-l2-cache-controller", },
664 { .compatible = "fsl,p2020-l2-cache-controller", }, 664 { .compatible = "fsl,p2020-l2-cache-controller", },
665 {}, 665 {},
666 }; 666 };
667 MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match); 667 MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
668 668
669 static struct platform_driver mpc85xx_l2_err_driver = { 669 static struct platform_driver mpc85xx_l2_err_driver = {
670 .probe = mpc85xx_l2_err_probe, 670 .probe = mpc85xx_l2_err_probe,
671 .remove = mpc85xx_l2_err_remove, 671 .remove = mpc85xx_l2_err_remove,
672 .driver = { 672 .driver = {
673 .name = "mpc85xx_l2_err", 673 .name = "mpc85xx_l2_err",
674 .owner = THIS_MODULE, 674 .owner = THIS_MODULE,
675 .of_match_table = mpc85xx_l2_err_of_match, 675 .of_match_table = mpc85xx_l2_err_of_match,
676 }, 676 },
677 }; 677 };
678 678
679 /**************************** MC Err device ***************************/ 679 /**************************** MC Err device ***************************/
680 680
681 /* 681 /*
682 * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the 682 * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
683 * MPC8572 User's Manual. Each line represents a syndrome bit column as a 683 * MPC8572 User's Manual. Each line represents a syndrome bit column as a
684 * 64-bit value, but split into an upper and lower 32-bit chunk. The labels 684 * 64-bit value, but split into an upper and lower 32-bit chunk. The labels
685 * below correspond to Freescale's manuals. 685 * below correspond to Freescale's manuals.
686 */ 686 */
687 static unsigned int ecc_table[16] = { 687 static unsigned int ecc_table[16] = {
688 /* MSB LSB */ 688 /* MSB LSB */
689 /* [0:31] [32:63] */ 689 /* [0:31] [32:63] */
690 0xf00fe11e, 0xc33c0ff7, /* Syndrome bit 7 */ 690 0xf00fe11e, 0xc33c0ff7, /* Syndrome bit 7 */
691 0x00ff00ff, 0x00fff0ff, 691 0x00ff00ff, 0x00fff0ff,
692 0x0f0f0f0f, 0x0f0fff00, 692 0x0f0f0f0f, 0x0f0fff00,
693 0x11113333, 0x7777000f, 693 0x11113333, 0x7777000f,
694 0x22224444, 0x8888222f, 694 0x22224444, 0x8888222f,
695 0x44448888, 0xffff4441, 695 0x44448888, 0xffff4441,
696 0x8888ffff, 0x11118882, 696 0x8888ffff, 0x11118882,
697 0xffff1111, 0x22221114, /* Syndrome bit 0 */ 697 0xffff1111, 0x22221114, /* Syndrome bit 0 */
698 }; 698 };
699 699
700 /* 700 /*
701 * Calculate the correct ECC value for a 64-bit value specified by high:low 701 * Calculate the correct ECC value for a 64-bit value specified by high:low
702 */ 702 */
703 static u8 calculate_ecc(u32 high, u32 low) 703 static u8 calculate_ecc(u32 high, u32 low)
704 { 704 {
705 u32 mask_low; 705 u32 mask_low;
706 u32 mask_high; 706 u32 mask_high;
707 int bit_cnt; 707 int bit_cnt;
708 u8 ecc = 0; 708 u8 ecc = 0;
709 int i; 709 int i;
710 int j; 710 int j;
711 711
712 for (i = 0; i < 8; i++) { 712 for (i = 0; i < 8; i++) {
713 mask_high = ecc_table[i * 2]; 713 mask_high = ecc_table[i * 2];
714 mask_low = ecc_table[i * 2 + 1]; 714 mask_low = ecc_table[i * 2 + 1];
715 bit_cnt = 0; 715 bit_cnt = 0;
716 716
717 for (j = 0; j < 32; j++) { 717 for (j = 0; j < 32; j++) {
718 if ((mask_high >> j) & 1) 718 if ((mask_high >> j) & 1)
719 bit_cnt ^= (high >> j) & 1; 719 bit_cnt ^= (high >> j) & 1;
720 if ((mask_low >> j) & 1) 720 if ((mask_low >> j) & 1)
721 bit_cnt ^= (low >> j) & 1; 721 bit_cnt ^= (low >> j) & 1;
722 } 722 }
723 723
724 ecc |= bit_cnt << i; 724 ecc |= bit_cnt << i;
725 } 725 }
726 726
727 return ecc; 727 return ecc;
728 } 728 }
729 729
730 /* 730 /*
731 * Create the syndrome code which is generated if the data line specified by 731 * Create the syndrome code which is generated if the data line specified by
732 * 'bit' failed. Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641 732 * 'bit' failed. Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
733 * User's Manual and 9-61 in the MPC8572 User's Manual. 733 * User's Manual and 9-61 in the MPC8572 User's Manual.
734 */ 734 */
735 static u8 syndrome_from_bit(unsigned int bit) { 735 static u8 syndrome_from_bit(unsigned int bit) {
736 int i; 736 int i;
737 u8 syndrome = 0; 737 u8 syndrome = 0;
738 738
739 /* 739 /*
740 * Cycle through the upper or lower 32-bit portion of each value in 740 * Cycle through the upper or lower 32-bit portion of each value in
741 * ecc_table depending on if 'bit' is in the upper or lower half of 741 * ecc_table depending on if 'bit' is in the upper or lower half of
742 * 64-bit data. 742 * 64-bit data.
743 */ 743 */
744 for (i = bit < 32; i < 16; i += 2) 744 for (i = bit < 32; i < 16; i += 2)
745 syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2); 745 syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
746 746
747 return syndrome; 747 return syndrome;
748 } 748 }
749 749
750 /* 750 /*
751 * Decode data and ecc syndrome to determine what went wrong 751 * Decode data and ecc syndrome to determine what went wrong
752 * Note: This can only decode single-bit errors 752 * Note: This can only decode single-bit errors
753 */ 753 */
754 static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc, 754 static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
755 int *bad_data_bit, int *bad_ecc_bit) 755 int *bad_data_bit, int *bad_ecc_bit)
756 { 756 {
757 int i; 757 int i;
758 u8 syndrome; 758 u8 syndrome;
759 759
760 *bad_data_bit = -1; 760 *bad_data_bit = -1;
761 *bad_ecc_bit = -1; 761 *bad_ecc_bit = -1;
762 762
763 /* 763 /*
764 * Calculate the ECC of the captured data and XOR it with the captured 764 * Calculate the ECC of the captured data and XOR it with the captured
765 * ECC to find an ECC syndrome value we can search for 765 * ECC to find an ECC syndrome value we can search for
766 */ 766 */
767 syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc; 767 syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
768 768
769 /* Check if a data line is stuck... */ 769 /* Check if a data line is stuck... */
770 for (i = 0; i < 64; i++) { 770 for (i = 0; i < 64; i++) {
771 if (syndrome == syndrome_from_bit(i)) { 771 if (syndrome == syndrome_from_bit(i)) {
772 *bad_data_bit = i; 772 *bad_data_bit = i;
773 return; 773 return;
774 } 774 }
775 } 775 }
776 776
777 /* If data is correct, check ECC bits for errors... */ 777 /* If data is correct, check ECC bits for errors... */
778 for (i = 0; i < 8; i++) { 778 for (i = 0; i < 8; i++) {
779 if ((syndrome >> i) & 0x1) { 779 if ((syndrome >> i) & 0x1) {
780 *bad_ecc_bit = i; 780 *bad_ecc_bit = i;
781 return; 781 return;
782 } 782 }
783 } 783 }
784 } 784 }
785 785
786 static void mpc85xx_mc_check(struct mem_ctl_info *mci) 786 static void mpc85xx_mc_check(struct mem_ctl_info *mci)
787 { 787 {
788 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 788 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
789 struct csrow_info *csrow; 789 struct csrow_info *csrow;
790 u32 bus_width; 790 u32 bus_width;
791 u32 err_detect; 791 u32 err_detect;
792 u32 syndrome; 792 u32 syndrome;
793 u32 err_addr; 793 u32 err_addr;
794 u32 pfn; 794 u32 pfn;
795 int row_index; 795 int row_index;
796 u32 cap_high; 796 u32 cap_high;
797 u32 cap_low; 797 u32 cap_low;
798 int bad_data_bit; 798 int bad_data_bit;
799 int bad_ecc_bit; 799 int bad_ecc_bit;
800 800
801 err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT); 801 err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
802 if (!err_detect) 802 if (!err_detect)
803 return; 803 return;
804 804
805 mpc85xx_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n", 805 mpc85xx_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
806 err_detect); 806 err_detect);
807 807
808 /* no more processing if not ECC bit errors */ 808 /* no more processing if not ECC bit errors */
809 if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) { 809 if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
810 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect); 810 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
811 return; 811 return;
812 } 812 }
813 813
814 syndrome = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ECC); 814 syndrome = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ECC);
815 815
816 /* Mask off appropriate bits of syndrome based on bus width */ 816 /* Mask off appropriate bits of syndrome based on bus width */
817 bus_width = (in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG) & 817 bus_width = (in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG) &
818 DSC_DBW_MASK) ? 32 : 64; 818 DSC_DBW_MASK) ? 32 : 64;
819 if (bus_width == 64) 819 if (bus_width == 64)
820 syndrome &= 0xff; 820 syndrome &= 0xff;
821 else 821 else
822 syndrome &= 0xffff; 822 syndrome &= 0xffff;
823 823
824 err_addr = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ADDRESS); 824 err_addr = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ADDRESS);
825 pfn = err_addr >> PAGE_SHIFT; 825 pfn = err_addr >> PAGE_SHIFT;
826 826
827 for (row_index = 0; row_index < mci->nr_csrows; row_index++) { 827 for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
828 csrow = &mci->csrows[row_index]; 828 csrow = mci->csrows[row_index];
829 if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page)) 829 if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
830 break; 830 break;
831 } 831 }
832 832
833 cap_high = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_HI); 833 cap_high = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_HI);
834 cap_low = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_LO); 834 cap_low = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_DATA_LO);
835 835
836 /* 836 /*
837 * Analyze single-bit errors on 64-bit wide buses 837 * Analyze single-bit errors on 64-bit wide buses
838 * TODO: Add support for 32-bit wide buses 838 * TODO: Add support for 32-bit wide buses
839 */ 839 */
840 if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) { 840 if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
841 sbe_ecc_decode(cap_high, cap_low, syndrome, 841 sbe_ecc_decode(cap_high, cap_low, syndrome,
842 &bad_data_bit, &bad_ecc_bit); 842 &bad_data_bit, &bad_ecc_bit);
843 843
844 if (bad_data_bit != -1) 844 if (bad_data_bit != -1)
845 mpc85xx_mc_printk(mci, KERN_ERR, 845 mpc85xx_mc_printk(mci, KERN_ERR,
846 "Faulty Data bit: %d\n", bad_data_bit); 846 "Faulty Data bit: %d\n", bad_data_bit);
847 if (bad_ecc_bit != -1) 847 if (bad_ecc_bit != -1)
848 mpc85xx_mc_printk(mci, KERN_ERR, 848 mpc85xx_mc_printk(mci, KERN_ERR,
849 "Faulty ECC bit: %d\n", bad_ecc_bit); 849 "Faulty ECC bit: %d\n", bad_ecc_bit);
850 850
851 mpc85xx_mc_printk(mci, KERN_ERR, 851 mpc85xx_mc_printk(mci, KERN_ERR,
852 "Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n", 852 "Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
853 cap_high ^ (1 << (bad_data_bit - 32)), 853 cap_high ^ (1 << (bad_data_bit - 32)),
854 cap_low ^ (1 << bad_data_bit), 854 cap_low ^ (1 << bad_data_bit),
855 syndrome ^ (1 << bad_ecc_bit)); 855 syndrome ^ (1 << bad_ecc_bit));
856 } 856 }
857 857
858 mpc85xx_mc_printk(mci, KERN_ERR, 858 mpc85xx_mc_printk(mci, KERN_ERR,
859 "Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n", 859 "Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
860 cap_high, cap_low, syndrome); 860 cap_high, cap_low, syndrome);
861 mpc85xx_mc_printk(mci, KERN_ERR, "Err addr: %#8.8x\n", err_addr); 861 mpc85xx_mc_printk(mci, KERN_ERR, "Err addr: %#8.8x\n", err_addr);
862 mpc85xx_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn); 862 mpc85xx_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
863 863
864 /* we are out of range */ 864 /* we are out of range */
865 if (row_index == mci->nr_csrows) 865 if (row_index == mci->nr_csrows)
866 mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n"); 866 mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
867 867
868 if (err_detect & DDR_EDE_SBE) 868 if (err_detect & DDR_EDE_SBE)
869 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 869 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
870 pfn, err_addr & ~PAGE_MASK, syndrome, 870 pfn, err_addr & ~PAGE_MASK, syndrome,
871 row_index, 0, -1, 871 row_index, 0, -1,
872 mci->ctl_name, "", NULL); 872 mci->ctl_name, "", NULL);
873 873
874 if (err_detect & DDR_EDE_MBE) 874 if (err_detect & DDR_EDE_MBE)
875 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 875 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
876 pfn, err_addr & ~PAGE_MASK, syndrome, 876 pfn, err_addr & ~PAGE_MASK, syndrome,
877 row_index, 0, -1, 877 row_index, 0, -1,
878 mci->ctl_name, "", NULL); 878 mci->ctl_name, "", NULL);
879 879
880 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect); 880 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
881 } 881 }
882 882
883 static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id) 883 static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id)
884 { 884 {
885 struct mem_ctl_info *mci = dev_id; 885 struct mem_ctl_info *mci = dev_id;
886 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 886 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
887 u32 err_detect; 887 u32 err_detect;
888 888
889 err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT); 889 err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
890 if (!err_detect) 890 if (!err_detect)
891 return IRQ_NONE; 891 return IRQ_NONE;
892 892
893 mpc85xx_mc_check(mci); 893 mpc85xx_mc_check(mci);
894 894
895 return IRQ_HANDLED; 895 return IRQ_HANDLED;
896 } 896 }
897 897
898 static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci) 898 static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
899 { 899 {
900 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 900 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
901 struct csrow_info *csrow; 901 struct csrow_info *csrow;
902 struct dimm_info *dimm; 902 struct dimm_info *dimm;
903 u32 sdram_ctl; 903 u32 sdram_ctl;
904 u32 sdtype; 904 u32 sdtype;
905 enum mem_type mtype; 905 enum mem_type mtype;
906 u32 cs_bnds; 906 u32 cs_bnds;
907 int index; 907 int index;
908 908
909 sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG); 909 sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
910 910
911 sdtype = sdram_ctl & DSC_SDTYPE_MASK; 911 sdtype = sdram_ctl & DSC_SDTYPE_MASK;
912 if (sdram_ctl & DSC_RD_EN) { 912 if (sdram_ctl & DSC_RD_EN) {
913 switch (sdtype) { 913 switch (sdtype) {
914 case DSC_SDTYPE_DDR: 914 case DSC_SDTYPE_DDR:
915 mtype = MEM_RDDR; 915 mtype = MEM_RDDR;
916 break; 916 break;
917 case DSC_SDTYPE_DDR2: 917 case DSC_SDTYPE_DDR2:
918 mtype = MEM_RDDR2; 918 mtype = MEM_RDDR2;
919 break; 919 break;
920 case DSC_SDTYPE_DDR3: 920 case DSC_SDTYPE_DDR3:
921 mtype = MEM_RDDR3; 921 mtype = MEM_RDDR3;
922 break; 922 break;
923 default: 923 default:
924 mtype = MEM_UNKNOWN; 924 mtype = MEM_UNKNOWN;
925 break; 925 break;
926 } 926 }
927 } else { 927 } else {
928 switch (sdtype) { 928 switch (sdtype) {
929 case DSC_SDTYPE_DDR: 929 case DSC_SDTYPE_DDR:
930 mtype = MEM_DDR; 930 mtype = MEM_DDR;
931 break; 931 break;
932 case DSC_SDTYPE_DDR2: 932 case DSC_SDTYPE_DDR2:
933 mtype = MEM_DDR2; 933 mtype = MEM_DDR2;
934 break; 934 break;
935 case DSC_SDTYPE_DDR3: 935 case DSC_SDTYPE_DDR3:
936 mtype = MEM_DDR3; 936 mtype = MEM_DDR3;
937 break; 937 break;
938 default: 938 default:
939 mtype = MEM_UNKNOWN; 939 mtype = MEM_UNKNOWN;
940 break; 940 break;
941 } 941 }
942 } 942 }
943 943
944 for (index = 0; index < mci->nr_csrows; index++) { 944 for (index = 0; index < mci->nr_csrows; index++) {
945 u32 start; 945 u32 start;
946 u32 end; 946 u32 end;
947 947
948 csrow = &mci->csrows[index]; 948 csrow = mci->csrows[index];
949 dimm = csrow->channels[0].dimm; 949 dimm = csrow->channels[0]->dimm;
950 950
951 cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 + 951 cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
952 (index * MPC85XX_MC_CS_BNDS_OFS)); 952 (index * MPC85XX_MC_CS_BNDS_OFS));
953 953
954 start = (cs_bnds & 0xffff0000) >> 16; 954 start = (cs_bnds & 0xffff0000) >> 16;
955 end = (cs_bnds & 0x0000ffff); 955 end = (cs_bnds & 0x0000ffff);
956 956
957 if (start == end) 957 if (start == end)
958 continue; /* not populated */ 958 continue; /* not populated */
959 959
960 start <<= (24 - PAGE_SHIFT); 960 start <<= (24 - PAGE_SHIFT);
961 end <<= (24 - PAGE_SHIFT); 961 end <<= (24 - PAGE_SHIFT);
962 end |= (1 << (24 - PAGE_SHIFT)) - 1; 962 end |= (1 << (24 - PAGE_SHIFT)) - 1;
963 963
964 csrow->first_page = start; 964 csrow->first_page = start;
965 csrow->last_page = end; 965 csrow->last_page = end;
966 966
967 dimm->nr_pages = end + 1 - start; 967 dimm->nr_pages = end + 1 - start;
968 dimm->grain = 8; 968 dimm->grain = 8;
969 dimm->mtype = mtype; 969 dimm->mtype = mtype;
970 dimm->dtype = DEV_UNKNOWN; 970 dimm->dtype = DEV_UNKNOWN;
971 if (sdram_ctl & DSC_X32_EN) 971 if (sdram_ctl & DSC_X32_EN)
972 dimm->dtype = DEV_X32; 972 dimm->dtype = DEV_X32;
973 dimm->edac_mode = EDAC_SECDED; 973 dimm->edac_mode = EDAC_SECDED;
974 } 974 }
975 } 975 }
976 976
977 static int __devinit mpc85xx_mc_err_probe(struct platform_device *op) 977 static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
978 { 978 {
979 struct mem_ctl_info *mci; 979 struct mem_ctl_info *mci;
980 struct edac_mc_layer layers[2]; 980 struct edac_mc_layer layers[2];
981 struct mpc85xx_mc_pdata *pdata; 981 struct mpc85xx_mc_pdata *pdata;
982 struct resource r; 982 struct resource r;
983 u32 sdram_ctl; 983 u32 sdram_ctl;
984 int res; 984 int res;
985 985
986 if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL)) 986 if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
987 return -ENOMEM; 987 return -ENOMEM;
988 988
989 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 989 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
990 layers[0].size = 4; 990 layers[0].size = 4;
991 layers[0].is_virt_csrow = true; 991 layers[0].is_virt_csrow = true;
992 layers[1].type = EDAC_MC_LAYER_CHANNEL; 992 layers[1].type = EDAC_MC_LAYER_CHANNEL;
993 layers[1].size = 1; 993 layers[1].size = 1;
994 layers[1].is_virt_csrow = false; 994 layers[1].is_virt_csrow = false;
995 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), sizeof(*pdata)); 995 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), sizeof(*pdata));
996 if (!mci) { 996 if (!mci) {
997 devres_release_group(&op->dev, mpc85xx_mc_err_probe); 997 devres_release_group(&op->dev, mpc85xx_mc_err_probe);
998 return -ENOMEM; 998 return -ENOMEM;
999 } 999 }
1000 1000
1001 pdata = mci->pvt_info; 1001 pdata = mci->pvt_info;
1002 pdata->name = "mpc85xx_mc_err"; 1002 pdata->name = "mpc85xx_mc_err";
1003 pdata->irq = NO_IRQ; 1003 pdata->irq = NO_IRQ;
1004 mci->pdev = &op->dev; 1004 mci->pdev = &op->dev;
1005 pdata->edac_idx = edac_mc_idx++; 1005 pdata->edac_idx = edac_mc_idx++;
1006 dev_set_drvdata(mci->pdev, mci); 1006 dev_set_drvdata(mci->pdev, mci);
1007 mci->ctl_name = pdata->name; 1007 mci->ctl_name = pdata->name;
1008 mci->dev_name = pdata->name; 1008 mci->dev_name = pdata->name;
1009 1009
1010 res = of_address_to_resource(op->dev.of_node, 0, &r); 1010 res = of_address_to_resource(op->dev.of_node, 0, &r);
1011 if (res) { 1011 if (res) {
1012 printk(KERN_ERR "%s: Unable to get resource for MC err regs\n", 1012 printk(KERN_ERR "%s: Unable to get resource for MC err regs\n",
1013 __func__); 1013 __func__);
1014 goto err; 1014 goto err;
1015 } 1015 }
1016 1016
1017 if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r), 1017 if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
1018 pdata->name)) { 1018 pdata->name)) {
1019 printk(KERN_ERR "%s: Error while requesting mem region\n", 1019 printk(KERN_ERR "%s: Error while requesting mem region\n",
1020 __func__); 1020 __func__);
1021 res = -EBUSY; 1021 res = -EBUSY;
1022 goto err; 1022 goto err;
1023 } 1023 }
1024 1024
1025 pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r)); 1025 pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
1026 if (!pdata->mc_vbase) { 1026 if (!pdata->mc_vbase) {
1027 printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__); 1027 printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
1028 res = -ENOMEM; 1028 res = -ENOMEM;
1029 goto err; 1029 goto err;
1030 } 1030 }
1031 1031
1032 sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG); 1032 sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
1033 if (!(sdram_ctl & DSC_ECC_EN)) { 1033 if (!(sdram_ctl & DSC_ECC_EN)) {
1034 /* no ECC */ 1034 /* no ECC */
1035 printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__); 1035 printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
1036 res = -ENODEV; 1036 res = -ENODEV;
1037 goto err; 1037 goto err;
1038 } 1038 }
1039 1039
1040 debugf3("%s(): init mci\n", __func__); 1040 debugf3("%s(): init mci\n", __func__);
1041 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 | 1041 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 |
1042 MEM_FLAG_DDR | MEM_FLAG_DDR2; 1042 MEM_FLAG_DDR | MEM_FLAG_DDR2;
1043 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 1043 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
1044 mci->edac_cap = EDAC_FLAG_SECDED; 1044 mci->edac_cap = EDAC_FLAG_SECDED;
1045 mci->mod_name = EDAC_MOD_STR; 1045 mci->mod_name = EDAC_MOD_STR;
1046 mci->mod_ver = MPC85XX_REVISION; 1046 mci->mod_ver = MPC85XX_REVISION;
1047 1047
1048 if (edac_op_state == EDAC_OPSTATE_POLL) 1048 if (edac_op_state == EDAC_OPSTATE_POLL)
1049 mci->edac_check = mpc85xx_mc_check; 1049 mci->edac_check = mpc85xx_mc_check;
1050 1050
1051 mci->ctl_page_to_phys = NULL; 1051 mci->ctl_page_to_phys = NULL;
1052 1052
1053 mci->scrub_mode = SCRUB_SW_SRC; 1053 mci->scrub_mode = SCRUB_SW_SRC;
1054 1054
1055 mpc85xx_init_csrows(mci); 1055 mpc85xx_init_csrows(mci);
1056 1056
1057 /* store the original error disable bits */ 1057 /* store the original error disable bits */
1058 orig_ddr_err_disable = 1058 orig_ddr_err_disable =
1059 in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE); 1059 in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
1060 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, 0); 1060 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, 0);
1061 1061
1062 /* clear all error bits */ 1062 /* clear all error bits */
1063 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0); 1063 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0);
1064 1064
1065 if (edac_mc_add_mc(mci)) { 1065 if (edac_mc_add_mc(mci)) {
1066 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 1066 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
1067 goto err; 1067 goto err;
1068 } 1068 }
1069 1069
1070 if (mpc85xx_create_sysfs_attributes(mci)) { 1070 if (mpc85xx_create_sysfs_attributes(mci)) {
1071 edac_mc_del_mc(mci->pdev); 1071 edac_mc_del_mc(mci->pdev);
1072 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 1072 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
1073 goto err; 1073 goto err;
1074 } 1074 }
1075 1075
1076 if (edac_op_state == EDAC_OPSTATE_INT) { 1076 if (edac_op_state == EDAC_OPSTATE_INT) {
1077 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 1077 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN,
1078 DDR_EIE_MBEE | DDR_EIE_SBEE); 1078 DDR_EIE_MBEE | DDR_EIE_SBEE);
1079 1079
1080 /* store the original error management threshold */ 1080 /* store the original error management threshold */
1081 orig_ddr_err_sbe = in_be32(pdata->mc_vbase + 1081 orig_ddr_err_sbe = in_be32(pdata->mc_vbase +
1082 MPC85XX_MC_ERR_SBE) & 0xff0000; 1082 MPC85XX_MC_ERR_SBE) & 0xff0000;
1083 1083
1084 /* set threshold to 1 error per interrupt */ 1084 /* set threshold to 1 error per interrupt */
1085 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000); 1085 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000);
1086 1086
1087 /* register interrupts */ 1087 /* register interrupts */
1088 pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0); 1088 pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1089 res = devm_request_irq(&op->dev, pdata->irq, 1089 res = devm_request_irq(&op->dev, pdata->irq,
1090 mpc85xx_mc_isr, 1090 mpc85xx_mc_isr,
1091 IRQF_DISABLED | IRQF_SHARED, 1091 IRQF_DISABLED | IRQF_SHARED,
1092 "[EDAC] MC err", mci); 1092 "[EDAC] MC err", mci);
1093 if (res < 0) { 1093 if (res < 0) {
1094 printk(KERN_ERR "%s: Unable to request irq %d for " 1094 printk(KERN_ERR "%s: Unable to request irq %d for "
1095 "MPC85xx DRAM ERR\n", __func__, pdata->irq); 1095 "MPC85xx DRAM ERR\n", __func__, pdata->irq);
1096 irq_dispose_mapping(pdata->irq); 1096 irq_dispose_mapping(pdata->irq);
1097 res = -ENODEV; 1097 res = -ENODEV;
1098 goto err2; 1098 goto err2;
1099 } 1099 }
1100 1100
1101 printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC\n", 1101 printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC\n",
1102 pdata->irq); 1102 pdata->irq);
1103 } 1103 }
1104 1104
1105 devres_remove_group(&op->dev, mpc85xx_mc_err_probe); 1105 devres_remove_group(&op->dev, mpc85xx_mc_err_probe);
1106 debugf3("%s(): success\n", __func__); 1106 debugf3("%s(): success\n", __func__);
1107 printk(KERN_INFO EDAC_MOD_STR " MC err registered\n"); 1107 printk(KERN_INFO EDAC_MOD_STR " MC err registered\n");
1108 1108
1109 return 0; 1109 return 0;
1110 1110
1111 err2: 1111 err2:
1112 edac_mc_del_mc(&op->dev); 1112 edac_mc_del_mc(&op->dev);
1113 err: 1113 err:
1114 devres_release_group(&op->dev, mpc85xx_mc_err_probe); 1114 devres_release_group(&op->dev, mpc85xx_mc_err_probe);
1115 edac_mc_free(mci); 1115 edac_mc_free(mci);
1116 return res; 1116 return res;
1117 } 1117 }
1118 1118
1119 static int mpc85xx_mc_err_remove(struct platform_device *op) 1119 static int mpc85xx_mc_err_remove(struct platform_device *op)
1120 { 1120 {
1121 struct mem_ctl_info *mci = dev_get_drvdata(&op->dev); 1121 struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
1122 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 1122 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
1123 1123
1124 debugf0("%s()\n", __func__); 1124 debugf0("%s()\n", __func__);
1125 1125
1126 if (edac_op_state == EDAC_OPSTATE_INT) { 1126 if (edac_op_state == EDAC_OPSTATE_INT) {
1127 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0); 1127 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0);
1128 irq_dispose_mapping(pdata->irq); 1128 irq_dispose_mapping(pdata->irq);
1129 } 1129 }
1130 1130
1131 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, 1131 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE,
1132 orig_ddr_err_disable); 1132 orig_ddr_err_disable);
1133 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe); 1133 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe);
1134 1134
1135 mpc85xx_remove_sysfs_attributes(mci); 1135 mpc85xx_remove_sysfs_attributes(mci);
1136 edac_mc_del_mc(&op->dev); 1136 edac_mc_del_mc(&op->dev);
1137 edac_mc_free(mci); 1137 edac_mc_free(mci);
1138 return 0; 1138 return 0;
1139 } 1139 }
1140 1140
1141 static struct of_device_id mpc85xx_mc_err_of_match[] = { 1141 static struct of_device_id mpc85xx_mc_err_of_match[] = {
1142 /* deprecate the fsl,85.. forms in the future, 2.6.30? */ 1142 /* deprecate the fsl,85.. forms in the future, 2.6.30? */
1143 { .compatible = "fsl,8540-memory-controller", }, 1143 { .compatible = "fsl,8540-memory-controller", },
1144 { .compatible = "fsl,8541-memory-controller", }, 1144 { .compatible = "fsl,8541-memory-controller", },
1145 { .compatible = "fsl,8544-memory-controller", }, 1145 { .compatible = "fsl,8544-memory-controller", },
1146 { .compatible = "fsl,8548-memory-controller", }, 1146 { .compatible = "fsl,8548-memory-controller", },
1147 { .compatible = "fsl,8555-memory-controller", }, 1147 { .compatible = "fsl,8555-memory-controller", },
1148 { .compatible = "fsl,8568-memory-controller", }, 1148 { .compatible = "fsl,8568-memory-controller", },
1149 { .compatible = "fsl,mpc8536-memory-controller", }, 1149 { .compatible = "fsl,mpc8536-memory-controller", },
1150 { .compatible = "fsl,mpc8540-memory-controller", }, 1150 { .compatible = "fsl,mpc8540-memory-controller", },
1151 { .compatible = "fsl,mpc8541-memory-controller", }, 1151 { .compatible = "fsl,mpc8541-memory-controller", },
1152 { .compatible = "fsl,mpc8544-memory-controller", }, 1152 { .compatible = "fsl,mpc8544-memory-controller", },
1153 { .compatible = "fsl,mpc8548-memory-controller", }, 1153 { .compatible = "fsl,mpc8548-memory-controller", },
1154 { .compatible = "fsl,mpc8555-memory-controller", }, 1154 { .compatible = "fsl,mpc8555-memory-controller", },
1155 { .compatible = "fsl,mpc8560-memory-controller", }, 1155 { .compatible = "fsl,mpc8560-memory-controller", },
1156 { .compatible = "fsl,mpc8568-memory-controller", }, 1156 { .compatible = "fsl,mpc8568-memory-controller", },
1157 { .compatible = "fsl,mpc8569-memory-controller", }, 1157 { .compatible = "fsl,mpc8569-memory-controller", },
1158 { .compatible = "fsl,mpc8572-memory-controller", }, 1158 { .compatible = "fsl,mpc8572-memory-controller", },
1159 { .compatible = "fsl,mpc8349-memory-controller", }, 1159 { .compatible = "fsl,mpc8349-memory-controller", },
1160 { .compatible = "fsl,p1020-memory-controller", }, 1160 { .compatible = "fsl,p1020-memory-controller", },
1161 { .compatible = "fsl,p1021-memory-controller", }, 1161 { .compatible = "fsl,p1021-memory-controller", },
1162 { .compatible = "fsl,p2020-memory-controller", }, 1162 { .compatible = "fsl,p2020-memory-controller", },
1163 { .compatible = "fsl,qoriq-memory-controller", }, 1163 { .compatible = "fsl,qoriq-memory-controller", },
1164 {}, 1164 {},
1165 }; 1165 };
1166 MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match); 1166 MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
1167 1167
1168 static struct platform_driver mpc85xx_mc_err_driver = { 1168 static struct platform_driver mpc85xx_mc_err_driver = {
1169 .probe = mpc85xx_mc_err_probe, 1169 .probe = mpc85xx_mc_err_probe,
1170 .remove = mpc85xx_mc_err_remove, 1170 .remove = mpc85xx_mc_err_remove,
1171 .driver = { 1171 .driver = {
1172 .name = "mpc85xx_mc_err", 1172 .name = "mpc85xx_mc_err",
1173 .owner = THIS_MODULE, 1173 .owner = THIS_MODULE,
1174 .of_match_table = mpc85xx_mc_err_of_match, 1174 .of_match_table = mpc85xx_mc_err_of_match,
1175 }, 1175 },
1176 }; 1176 };
1177 1177
1178 #ifdef CONFIG_FSL_SOC_BOOKE 1178 #ifdef CONFIG_FSL_SOC_BOOKE
1179 static void __init mpc85xx_mc_clear_rfxe(void *data) 1179 static void __init mpc85xx_mc_clear_rfxe(void *data)
1180 { 1180 {
1181 orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1); 1181 orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1);
1182 mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~HID1_RFXE)); 1182 mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~HID1_RFXE));
1183 } 1183 }
1184 #endif 1184 #endif
1185 1185
1186 static int __init mpc85xx_mc_init(void) 1186 static int __init mpc85xx_mc_init(void)
1187 { 1187 {
1188 int res = 0; 1188 int res = 0;
1189 u32 pvr = 0; 1189 u32 pvr = 0;
1190 1190
1191 printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, " 1191 printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
1192 "(C) 2006 Montavista Software\n"); 1192 "(C) 2006 Montavista Software\n");
1193 1193
1194 /* make sure error reporting method is sane */ 1194 /* make sure error reporting method is sane */
1195 switch (edac_op_state) { 1195 switch (edac_op_state) {
1196 case EDAC_OPSTATE_POLL: 1196 case EDAC_OPSTATE_POLL:
1197 case EDAC_OPSTATE_INT: 1197 case EDAC_OPSTATE_INT:
1198 break; 1198 break;
1199 default: 1199 default:
1200 edac_op_state = EDAC_OPSTATE_INT; 1200 edac_op_state = EDAC_OPSTATE_INT;
1201 break; 1201 break;
1202 } 1202 }
1203 1203
1204 res = platform_driver_register(&mpc85xx_mc_err_driver); 1204 res = platform_driver_register(&mpc85xx_mc_err_driver);
1205 if (res) 1205 if (res)
1206 printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n"); 1206 printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n");
1207 1207
1208 res = platform_driver_register(&mpc85xx_l2_err_driver); 1208 res = platform_driver_register(&mpc85xx_l2_err_driver);
1209 if (res) 1209 if (res)
1210 printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n"); 1210 printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n");
1211 1211
1212 #ifdef CONFIG_PCI 1212 #ifdef CONFIG_PCI
1213 res = platform_driver_register(&mpc85xx_pci_err_driver); 1213 res = platform_driver_register(&mpc85xx_pci_err_driver);
1214 if (res) 1214 if (res)
1215 printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n"); 1215 printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n");
1216 #endif 1216 #endif
1217 1217
1218 #ifdef CONFIG_FSL_SOC_BOOKE 1218 #ifdef CONFIG_FSL_SOC_BOOKE
1219 pvr = mfspr(SPRN_PVR); 1219 pvr = mfspr(SPRN_PVR);
1220 1220
1221 if ((PVR_VER(pvr) == PVR_VER_E500V1) || 1221 if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
1222 (PVR_VER(pvr) == PVR_VER_E500V2)) { 1222 (PVR_VER(pvr) == PVR_VER_E500V2)) {
1223 /* 1223 /*
1224 * need to clear HID1[RFXE] to disable machine check int 1224 * need to clear HID1[RFXE] to disable machine check int
1225 * so we can catch it 1225 * so we can catch it
1226 */ 1226 */
1227 if (edac_op_state == EDAC_OPSTATE_INT) 1227 if (edac_op_state == EDAC_OPSTATE_INT)
1228 on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0); 1228 on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0);
1229 } 1229 }
1230 #endif 1230 #endif
1231 1231
1232 return 0; 1232 return 0;
1233 } 1233 }
1234 1234
1235 module_init(mpc85xx_mc_init); 1235 module_init(mpc85xx_mc_init);
1236 1236
1237 #ifdef CONFIG_FSL_SOC_BOOKE 1237 #ifdef CONFIG_FSL_SOC_BOOKE
1238 static void __exit mpc85xx_mc_restore_hid1(void *data) 1238 static void __exit mpc85xx_mc_restore_hid1(void *data)
1239 { 1239 {
1240 mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]); 1240 mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]);
1241 } 1241 }
1242 #endif 1242 #endif
1243 1243
1244 static void __exit mpc85xx_mc_exit(void) 1244 static void __exit mpc85xx_mc_exit(void)
1245 { 1245 {
1246 #ifdef CONFIG_FSL_SOC_BOOKE 1246 #ifdef CONFIG_FSL_SOC_BOOKE
1247 u32 pvr = mfspr(SPRN_PVR); 1247 u32 pvr = mfspr(SPRN_PVR);
1248 1248
1249 if ((PVR_VER(pvr) == PVR_VER_E500V1) || 1249 if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
1250 (PVR_VER(pvr) == PVR_VER_E500V2)) { 1250 (PVR_VER(pvr) == PVR_VER_E500V2)) {
1251 on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0); 1251 on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
1252 } 1252 }
1253 #endif 1253 #endif
1254 #ifdef CONFIG_PCI 1254 #ifdef CONFIG_PCI
1255 platform_driver_unregister(&mpc85xx_pci_err_driver); 1255 platform_driver_unregister(&mpc85xx_pci_err_driver);
1256 #endif 1256 #endif
1257 platform_driver_unregister(&mpc85xx_l2_err_driver); 1257 platform_driver_unregister(&mpc85xx_l2_err_driver);
1258 platform_driver_unregister(&mpc85xx_mc_err_driver); 1258 platform_driver_unregister(&mpc85xx_mc_err_driver);
1259 } 1259 }
1260 1260
1261 module_exit(mpc85xx_mc_exit); 1261 module_exit(mpc85xx_mc_exit);
1262 1262
1263 MODULE_LICENSE("GPL"); 1263 MODULE_LICENSE("GPL");
1264 MODULE_AUTHOR("Montavista Software, Inc."); 1264 MODULE_AUTHOR("Montavista Software, Inc.");
1265 module_param(edac_op_state, int, 0444); 1265 module_param(edac_op_state, int, 0444);
1266 MODULE_PARM_DESC(edac_op_state, 1266 MODULE_PARM_DESC(edac_op_state,
1267 "EDAC Error Reporting state: 0=Poll, 2=Interrupt"); 1267 "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
1268 1268
drivers/edac/mv64x60_edac.c
1 /* 1 /*
2 * Marvell MV64x60 Memory Controller kernel module for PPC platforms 2 * Marvell MV64x60 Memory Controller kernel module for PPC platforms
3 * 3 *
4 * Author: Dave Jiang <djiang@mvista.com> 4 * Author: Dave Jiang <djiang@mvista.com>
5 * 5 *
6 * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under 6 * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program 7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express 8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied. 9 * or implied.
10 * 10 *
11 */ 11 */
12 12
13 #include <linux/module.h> 13 #include <linux/module.h>
14 #include <linux/init.h> 14 #include <linux/init.h>
15 #include <linux/interrupt.h> 15 #include <linux/interrupt.h>
16 #include <linux/io.h> 16 #include <linux/io.h>
17 #include <linux/edac.h> 17 #include <linux/edac.h>
18 #include <linux/gfp.h> 18 #include <linux/gfp.h>
19 19
20 #include "edac_core.h" 20 #include "edac_core.h"
21 #include "edac_module.h" 21 #include "edac_module.h"
22 #include "mv64x60_edac.h" 22 #include "mv64x60_edac.h"
23 23
24 static const char *mv64x60_ctl_name = "MV64x60"; 24 static const char *mv64x60_ctl_name = "MV64x60";
25 static int edac_dev_idx; 25 static int edac_dev_idx;
26 static int edac_pci_idx; 26 static int edac_pci_idx;
27 static int edac_mc_idx; 27 static int edac_mc_idx;
28 28
29 /*********************** PCI err device **********************************/ 29 /*********************** PCI err device **********************************/
30 #ifdef CONFIG_PCI 30 #ifdef CONFIG_PCI
31 static void mv64x60_pci_check(struct edac_pci_ctl_info *pci) 31 static void mv64x60_pci_check(struct edac_pci_ctl_info *pci)
32 { 32 {
33 struct mv64x60_pci_pdata *pdata = pci->pvt_info; 33 struct mv64x60_pci_pdata *pdata = pci->pvt_info;
34 u32 cause; 34 u32 cause;
35 35
36 cause = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE); 36 cause = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
37 if (!cause) 37 if (!cause)
38 return; 38 return;
39 39
40 printk(KERN_ERR "Error in PCI %d Interface\n", pdata->pci_hose); 40 printk(KERN_ERR "Error in PCI %d Interface\n", pdata->pci_hose);
41 printk(KERN_ERR "Cause register: 0x%08x\n", cause); 41 printk(KERN_ERR "Cause register: 0x%08x\n", cause);
42 printk(KERN_ERR "Address Low: 0x%08x\n", 42 printk(KERN_ERR "Address Low: 0x%08x\n",
43 in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_LO)); 43 in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_LO));
44 printk(KERN_ERR "Address High: 0x%08x\n", 44 printk(KERN_ERR "Address High: 0x%08x\n",
45 in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_HI)); 45 in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_HI));
46 printk(KERN_ERR "Attribute: 0x%08x\n", 46 printk(KERN_ERR "Attribute: 0x%08x\n",
47 in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ATTR)); 47 in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ATTR));
48 printk(KERN_ERR "Command: 0x%08x\n", 48 printk(KERN_ERR "Command: 0x%08x\n",
49 in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CMD)); 49 in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CMD));
50 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, ~cause); 50 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, ~cause);
51 51
52 if (cause & MV64X60_PCI_PE_MASK) 52 if (cause & MV64X60_PCI_PE_MASK)
53 edac_pci_handle_pe(pci, pci->ctl_name); 53 edac_pci_handle_pe(pci, pci->ctl_name);
54 54
55 if (!(cause & MV64X60_PCI_PE_MASK)) 55 if (!(cause & MV64X60_PCI_PE_MASK))
56 edac_pci_handle_npe(pci, pci->ctl_name); 56 edac_pci_handle_npe(pci, pci->ctl_name);
57 } 57 }
58 58
59 static irqreturn_t mv64x60_pci_isr(int irq, void *dev_id) 59 static irqreturn_t mv64x60_pci_isr(int irq, void *dev_id)
60 { 60 {
61 struct edac_pci_ctl_info *pci = dev_id; 61 struct edac_pci_ctl_info *pci = dev_id;
62 struct mv64x60_pci_pdata *pdata = pci->pvt_info; 62 struct mv64x60_pci_pdata *pdata = pci->pvt_info;
63 u32 val; 63 u32 val;
64 64
65 val = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE); 65 val = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
66 if (!val) 66 if (!val)
67 return IRQ_NONE; 67 return IRQ_NONE;
68 68
69 mv64x60_pci_check(pci); 69 mv64x60_pci_check(pci);
70 70
71 return IRQ_HANDLED; 71 return IRQ_HANDLED;
72 } 72 }
73 73
74 /* 74 /*
75 * Bit 0 of MV64x60_PCIx_ERR_MASK does not exist on the 64360 and because of 75 * Bit 0 of MV64x60_PCIx_ERR_MASK does not exist on the 64360 and because of
76 * errata FEr-#11 and FEr-##16 for the 64460, it should be 0 on that chip as 76 * errata FEr-#11 and FEr-##16 for the 64460, it should be 0 on that chip as
77 * well. IOW, don't set bit 0. 77 * well. IOW, don't set bit 0.
78 */ 78 */
79 79
80 /* Erratum FEr PCI-#16: clear bit 0 of PCI SERRn Mask reg. */ 80 /* Erratum FEr PCI-#16: clear bit 0 of PCI SERRn Mask reg. */
81 static int __init mv64x60_pci_fixup(struct platform_device *pdev) 81 static int __init mv64x60_pci_fixup(struct platform_device *pdev)
82 { 82 {
83 struct resource *r; 83 struct resource *r;
84 void __iomem *pci_serr; 84 void __iomem *pci_serr;
85 85
86 r = platform_get_resource(pdev, IORESOURCE_MEM, 1); 86 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
87 if (!r) { 87 if (!r) {
88 printk(KERN_ERR "%s: Unable to get resource for " 88 printk(KERN_ERR "%s: Unable to get resource for "
89 "PCI err regs\n", __func__); 89 "PCI err regs\n", __func__);
90 return -ENOENT; 90 return -ENOENT;
91 } 91 }
92 92
93 pci_serr = ioremap(r->start, resource_size(r)); 93 pci_serr = ioremap(r->start, resource_size(r));
94 if (!pci_serr) 94 if (!pci_serr)
95 return -ENOMEM; 95 return -ENOMEM;
96 96
97 out_le32(pci_serr, in_le32(pci_serr) & ~0x1); 97 out_le32(pci_serr, in_le32(pci_serr) & ~0x1);
98 iounmap(pci_serr); 98 iounmap(pci_serr);
99 99
100 return 0; 100 return 0;
101 } 101 }
102 102
103 static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev) 103 static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
104 { 104 {
105 struct edac_pci_ctl_info *pci; 105 struct edac_pci_ctl_info *pci;
106 struct mv64x60_pci_pdata *pdata; 106 struct mv64x60_pci_pdata *pdata;
107 struct resource *r; 107 struct resource *r;
108 int res = 0; 108 int res = 0;
109 109
110 if (!devres_open_group(&pdev->dev, mv64x60_pci_err_probe, GFP_KERNEL)) 110 if (!devres_open_group(&pdev->dev, mv64x60_pci_err_probe, GFP_KERNEL))
111 return -ENOMEM; 111 return -ENOMEM;
112 112
113 pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mv64x60_pci_err"); 113 pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mv64x60_pci_err");
114 if (!pci) 114 if (!pci)
115 return -ENOMEM; 115 return -ENOMEM;
116 116
117 pdata = pci->pvt_info; 117 pdata = pci->pvt_info;
118 118
119 pdata->pci_hose = pdev->id; 119 pdata->pci_hose = pdev->id;
120 pdata->name = "mpc85xx_pci_err"; 120 pdata->name = "mpc85xx_pci_err";
121 pdata->irq = NO_IRQ; 121 pdata->irq = NO_IRQ;
122 platform_set_drvdata(pdev, pci); 122 platform_set_drvdata(pdev, pci);
123 pci->dev = &pdev->dev; 123 pci->dev = &pdev->dev;
124 pci->dev_name = dev_name(&pdev->dev); 124 pci->dev_name = dev_name(&pdev->dev);
125 pci->mod_name = EDAC_MOD_STR; 125 pci->mod_name = EDAC_MOD_STR;
126 pci->ctl_name = pdata->name; 126 pci->ctl_name = pdata->name;
127 127
128 if (edac_op_state == EDAC_OPSTATE_POLL) 128 if (edac_op_state == EDAC_OPSTATE_POLL)
129 pci->edac_check = mv64x60_pci_check; 129 pci->edac_check = mv64x60_pci_check;
130 130
131 pdata->edac_idx = edac_pci_idx++; 131 pdata->edac_idx = edac_pci_idx++;
132 132
133 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 133 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
134 if (!r) { 134 if (!r) {
135 printk(KERN_ERR "%s: Unable to get resource for " 135 printk(KERN_ERR "%s: Unable to get resource for "
136 "PCI err regs\n", __func__); 136 "PCI err regs\n", __func__);
137 res = -ENOENT; 137 res = -ENOENT;
138 goto err; 138 goto err;
139 } 139 }
140 140
141 if (!devm_request_mem_region(&pdev->dev, 141 if (!devm_request_mem_region(&pdev->dev,
142 r->start, 142 r->start,
143 resource_size(r), 143 resource_size(r),
144 pdata->name)) { 144 pdata->name)) {
145 printk(KERN_ERR "%s: Error while requesting mem region\n", 145 printk(KERN_ERR "%s: Error while requesting mem region\n",
146 __func__); 146 __func__);
147 res = -EBUSY; 147 res = -EBUSY;
148 goto err; 148 goto err;
149 } 149 }
150 150
151 pdata->pci_vbase = devm_ioremap(&pdev->dev, 151 pdata->pci_vbase = devm_ioremap(&pdev->dev,
152 r->start, 152 r->start,
153 resource_size(r)); 153 resource_size(r));
154 if (!pdata->pci_vbase) { 154 if (!pdata->pci_vbase) {
155 printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__); 155 printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
156 res = -ENOMEM; 156 res = -ENOMEM;
157 goto err; 157 goto err;
158 } 158 }
159 159
160 res = mv64x60_pci_fixup(pdev); 160 res = mv64x60_pci_fixup(pdev);
161 if (res < 0) { 161 if (res < 0) {
162 printk(KERN_ERR "%s: PCI fixup failed\n", __func__); 162 printk(KERN_ERR "%s: PCI fixup failed\n", __func__);
163 goto err; 163 goto err;
164 } 164 }
165 165
166 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, 0); 166 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, 0);
167 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, 0); 167 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, 0);
168 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, 168 out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK,
169 MV64X60_PCIx_ERR_MASK_VAL); 169 MV64X60_PCIx_ERR_MASK_VAL);
170 170
171 if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { 171 if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
172 debugf3("%s(): failed edac_pci_add_device()\n", __func__); 172 debugf3("%s(): failed edac_pci_add_device()\n", __func__);
173 goto err; 173 goto err;
174 } 174 }
175 175
176 if (edac_op_state == EDAC_OPSTATE_INT) { 176 if (edac_op_state == EDAC_OPSTATE_INT) {
177 pdata->irq = platform_get_irq(pdev, 0); 177 pdata->irq = platform_get_irq(pdev, 0);
178 res = devm_request_irq(&pdev->dev, 178 res = devm_request_irq(&pdev->dev,
179 pdata->irq, 179 pdata->irq,
180 mv64x60_pci_isr, 180 mv64x60_pci_isr,
181 IRQF_DISABLED, 181 IRQF_DISABLED,
182 "[EDAC] PCI err", 182 "[EDAC] PCI err",
183 pci); 183 pci);
184 if (res < 0) { 184 if (res < 0) {
185 printk(KERN_ERR "%s: Unable to request irq %d for " 185 printk(KERN_ERR "%s: Unable to request irq %d for "
186 "MV64x60 PCI ERR\n", __func__, pdata->irq); 186 "MV64x60 PCI ERR\n", __func__, pdata->irq);
187 res = -ENODEV; 187 res = -ENODEV;
188 goto err2; 188 goto err2;
189 } 189 }
190 printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n", 190 printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
191 pdata->irq); 191 pdata->irq);
192 } 192 }
193 193
194 devres_remove_group(&pdev->dev, mv64x60_pci_err_probe); 194 devres_remove_group(&pdev->dev, mv64x60_pci_err_probe);
195 195
196 /* get this far and it's successful */ 196 /* get this far and it's successful */
197 debugf3("%s(): success\n", __func__); 197 debugf3("%s(): success\n", __func__);
198 198
199 return 0; 199 return 0;
200 200
201 err2: 201 err2:
202 edac_pci_del_device(&pdev->dev); 202 edac_pci_del_device(&pdev->dev);
203 err: 203 err:
204 edac_pci_free_ctl_info(pci); 204 edac_pci_free_ctl_info(pci);
205 devres_release_group(&pdev->dev, mv64x60_pci_err_probe); 205 devres_release_group(&pdev->dev, mv64x60_pci_err_probe);
206 return res; 206 return res;
207 } 207 }
208 208
209 static int mv64x60_pci_err_remove(struct platform_device *pdev) 209 static int mv64x60_pci_err_remove(struct platform_device *pdev)
210 { 210 {
211 struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev); 211 struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
212 212
213 debugf0("%s()\n", __func__); 213 debugf0("%s()\n", __func__);
214 214
215 edac_pci_del_device(&pdev->dev); 215 edac_pci_del_device(&pdev->dev);
216 216
217 edac_pci_free_ctl_info(pci); 217 edac_pci_free_ctl_info(pci);
218 218
219 return 0; 219 return 0;
220 } 220 }
221 221
222 static struct platform_driver mv64x60_pci_err_driver = { 222 static struct platform_driver mv64x60_pci_err_driver = {
223 .probe = mv64x60_pci_err_probe, 223 .probe = mv64x60_pci_err_probe,
224 .remove = __devexit_p(mv64x60_pci_err_remove), 224 .remove = __devexit_p(mv64x60_pci_err_remove),
225 .driver = { 225 .driver = {
226 .name = "mv64x60_pci_err", 226 .name = "mv64x60_pci_err",
227 } 227 }
228 }; 228 };
229 229
230 #endif /* CONFIG_PCI */ 230 #endif /* CONFIG_PCI */
231 231
232 /*********************** SRAM err device **********************************/ 232 /*********************** SRAM err device **********************************/
233 static void mv64x60_sram_check(struct edac_device_ctl_info *edac_dev) 233 static void mv64x60_sram_check(struct edac_device_ctl_info *edac_dev)
234 { 234 {
235 struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info; 235 struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
236 u32 cause; 236 u32 cause;
237 237
238 cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE); 238 cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
239 if (!cause) 239 if (!cause)
240 return; 240 return;
241 241
242 printk(KERN_ERR "Error in internal SRAM\n"); 242 printk(KERN_ERR "Error in internal SRAM\n");
243 printk(KERN_ERR "Cause register: 0x%08x\n", cause); 243 printk(KERN_ERR "Cause register: 0x%08x\n", cause);
244 printk(KERN_ERR "Address Low: 0x%08x\n", 244 printk(KERN_ERR "Address Low: 0x%08x\n",
245 in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_LO)); 245 in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_LO));
246 printk(KERN_ERR "Address High: 0x%08x\n", 246 printk(KERN_ERR "Address High: 0x%08x\n",
247 in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_HI)); 247 in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_HI));
248 printk(KERN_ERR "Data Low: 0x%08x\n", 248 printk(KERN_ERR "Data Low: 0x%08x\n",
249 in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_LO)); 249 in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_LO));
250 printk(KERN_ERR "Data High: 0x%08x\n", 250 printk(KERN_ERR "Data High: 0x%08x\n",
251 in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_HI)); 251 in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_HI));
252 printk(KERN_ERR "Parity: 0x%08x\n", 252 printk(KERN_ERR "Parity: 0x%08x\n",
253 in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_PARITY)); 253 in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_PARITY));
254 out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0); 254 out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
255 255
256 edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); 256 edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
257 } 257 }
258 258
259 static irqreturn_t mv64x60_sram_isr(int irq, void *dev_id) 259 static irqreturn_t mv64x60_sram_isr(int irq, void *dev_id)
260 { 260 {
261 struct edac_device_ctl_info *edac_dev = dev_id; 261 struct edac_device_ctl_info *edac_dev = dev_id;
262 struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info; 262 struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
263 u32 cause; 263 u32 cause;
264 264
265 cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE); 265 cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
266 if (!cause) 266 if (!cause)
267 return IRQ_NONE; 267 return IRQ_NONE;
268 268
269 mv64x60_sram_check(edac_dev); 269 mv64x60_sram_check(edac_dev);
270 270
271 return IRQ_HANDLED; 271 return IRQ_HANDLED;
272 } 272 }
273 273
274 static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev) 274 static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev)
275 { 275 {
276 struct edac_device_ctl_info *edac_dev; 276 struct edac_device_ctl_info *edac_dev;
277 struct mv64x60_sram_pdata *pdata; 277 struct mv64x60_sram_pdata *pdata;
278 struct resource *r; 278 struct resource *r;
279 int res = 0; 279 int res = 0;
280 280
281 if (!devres_open_group(&pdev->dev, mv64x60_sram_err_probe, GFP_KERNEL)) 281 if (!devres_open_group(&pdev->dev, mv64x60_sram_err_probe, GFP_KERNEL))
282 return -ENOMEM; 282 return -ENOMEM;
283 283
284 edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata), 284 edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
285 "sram", 1, NULL, 0, 0, NULL, 0, 285 "sram", 1, NULL, 0, 0, NULL, 0,
286 edac_dev_idx); 286 edac_dev_idx);
287 if (!edac_dev) { 287 if (!edac_dev) {
288 devres_release_group(&pdev->dev, mv64x60_sram_err_probe); 288 devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
289 return -ENOMEM; 289 return -ENOMEM;
290 } 290 }
291 291
292 pdata = edac_dev->pvt_info; 292 pdata = edac_dev->pvt_info;
293 pdata->name = "mv64x60_sram_err"; 293 pdata->name = "mv64x60_sram_err";
294 pdata->irq = NO_IRQ; 294 pdata->irq = NO_IRQ;
295 edac_dev->dev = &pdev->dev; 295 edac_dev->dev = &pdev->dev;
296 platform_set_drvdata(pdev, edac_dev); 296 platform_set_drvdata(pdev, edac_dev);
297 edac_dev->dev_name = dev_name(&pdev->dev); 297 edac_dev->dev_name = dev_name(&pdev->dev);
298 298
299 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 299 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
300 if (!r) { 300 if (!r) {
301 printk(KERN_ERR "%s: Unable to get resource for " 301 printk(KERN_ERR "%s: Unable to get resource for "
302 "SRAM err regs\n", __func__); 302 "SRAM err regs\n", __func__);
303 res = -ENOENT; 303 res = -ENOENT;
304 goto err; 304 goto err;
305 } 305 }
306 306
307 if (!devm_request_mem_region(&pdev->dev, 307 if (!devm_request_mem_region(&pdev->dev,
308 r->start, 308 r->start,
309 resource_size(r), 309 resource_size(r),
310 pdata->name)) { 310 pdata->name)) {
311 printk(KERN_ERR "%s: Error while request mem region\n", 311 printk(KERN_ERR "%s: Error while request mem region\n",
312 __func__); 312 __func__);
313 res = -EBUSY; 313 res = -EBUSY;
314 goto err; 314 goto err;
315 } 315 }
316 316
317 pdata->sram_vbase = devm_ioremap(&pdev->dev, 317 pdata->sram_vbase = devm_ioremap(&pdev->dev,
318 r->start, 318 r->start,
319 resource_size(r)); 319 resource_size(r));
320 if (!pdata->sram_vbase) { 320 if (!pdata->sram_vbase) {
321 printk(KERN_ERR "%s: Unable to setup SRAM err regs\n", 321 printk(KERN_ERR "%s: Unable to setup SRAM err regs\n",
322 __func__); 322 __func__);
323 res = -ENOMEM; 323 res = -ENOMEM;
324 goto err; 324 goto err;
325 } 325 }
326 326
327 /* setup SRAM err registers */ 327 /* setup SRAM err registers */
328 out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0); 328 out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
329 329
330 edac_dev->mod_name = EDAC_MOD_STR; 330 edac_dev->mod_name = EDAC_MOD_STR;
331 edac_dev->ctl_name = pdata->name; 331 edac_dev->ctl_name = pdata->name;
332 332
333 if (edac_op_state == EDAC_OPSTATE_POLL) 333 if (edac_op_state == EDAC_OPSTATE_POLL)
334 edac_dev->edac_check = mv64x60_sram_check; 334 edac_dev->edac_check = mv64x60_sram_check;
335 335
336 pdata->edac_idx = edac_dev_idx++; 336 pdata->edac_idx = edac_dev_idx++;
337 337
338 if (edac_device_add_device(edac_dev) > 0) { 338 if (edac_device_add_device(edac_dev) > 0) {
339 debugf3("%s(): failed edac_device_add_device()\n", __func__); 339 debugf3("%s(): failed edac_device_add_device()\n", __func__);
340 goto err; 340 goto err;
341 } 341 }
342 342
343 if (edac_op_state == EDAC_OPSTATE_INT) { 343 if (edac_op_state == EDAC_OPSTATE_INT) {
344 pdata->irq = platform_get_irq(pdev, 0); 344 pdata->irq = platform_get_irq(pdev, 0);
345 res = devm_request_irq(&pdev->dev, 345 res = devm_request_irq(&pdev->dev,
346 pdata->irq, 346 pdata->irq,
347 mv64x60_sram_isr, 347 mv64x60_sram_isr,
348 IRQF_DISABLED, 348 IRQF_DISABLED,
349 "[EDAC] SRAM err", 349 "[EDAC] SRAM err",
350 edac_dev); 350 edac_dev);
351 if (res < 0) { 351 if (res < 0) {
352 printk(KERN_ERR 352 printk(KERN_ERR
353 "%s: Unable to request irq %d for " 353 "%s: Unable to request irq %d for "
354 "MV64x60 SRAM ERR\n", __func__, pdata->irq); 354 "MV64x60 SRAM ERR\n", __func__, pdata->irq);
355 res = -ENODEV; 355 res = -ENODEV;
356 goto err2; 356 goto err2;
357 } 357 }
358 358
359 printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for SRAM Err\n", 359 printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for SRAM Err\n",
360 pdata->irq); 360 pdata->irq);
361 } 361 }
362 362
363 devres_remove_group(&pdev->dev, mv64x60_sram_err_probe); 363 devres_remove_group(&pdev->dev, mv64x60_sram_err_probe);
364 364
365 /* get this far and it's successful */ 365 /* get this far and it's successful */
366 debugf3("%s(): success\n", __func__); 366 debugf3("%s(): success\n", __func__);
367 367
368 return 0; 368 return 0;
369 369
370 err2: 370 err2:
371 edac_device_del_device(&pdev->dev); 371 edac_device_del_device(&pdev->dev);
372 err: 372 err:
373 devres_release_group(&pdev->dev, mv64x60_sram_err_probe); 373 devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
374 edac_device_free_ctl_info(edac_dev); 374 edac_device_free_ctl_info(edac_dev);
375 return res; 375 return res;
376 } 376 }
377 377
378 static int mv64x60_sram_err_remove(struct platform_device *pdev) 378 static int mv64x60_sram_err_remove(struct platform_device *pdev)
379 { 379 {
380 struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev); 380 struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
381 381
382 debugf0("%s()\n", __func__); 382 debugf0("%s()\n", __func__);
383 383
384 edac_device_del_device(&pdev->dev); 384 edac_device_del_device(&pdev->dev);
385 edac_device_free_ctl_info(edac_dev); 385 edac_device_free_ctl_info(edac_dev);
386 386
387 return 0; 387 return 0;
388 } 388 }
389 389
390 static struct platform_driver mv64x60_sram_err_driver = { 390 static struct platform_driver mv64x60_sram_err_driver = {
391 .probe = mv64x60_sram_err_probe, 391 .probe = mv64x60_sram_err_probe,
392 .remove = mv64x60_sram_err_remove, 392 .remove = mv64x60_sram_err_remove,
393 .driver = { 393 .driver = {
394 .name = "mv64x60_sram_err", 394 .name = "mv64x60_sram_err",
395 } 395 }
396 }; 396 };
397 397
398 /*********************** CPU err device **********************************/ 398 /*********************** CPU err device **********************************/
399 static void mv64x60_cpu_check(struct edac_device_ctl_info *edac_dev) 399 static void mv64x60_cpu_check(struct edac_device_ctl_info *edac_dev)
400 { 400 {
401 struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info; 401 struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
402 u32 cause; 402 u32 cause;
403 403
404 cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) & 404 cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
405 MV64x60_CPU_CAUSE_MASK; 405 MV64x60_CPU_CAUSE_MASK;
406 if (!cause) 406 if (!cause)
407 return; 407 return;
408 408
409 printk(KERN_ERR "Error on CPU interface\n"); 409 printk(KERN_ERR "Error on CPU interface\n");
410 printk(KERN_ERR "Cause register: 0x%08x\n", cause); 410 printk(KERN_ERR "Cause register: 0x%08x\n", cause);
411 printk(KERN_ERR "Address Low: 0x%08x\n", 411 printk(KERN_ERR "Address Low: 0x%08x\n",
412 in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_LO)); 412 in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_LO));
413 printk(KERN_ERR "Address High: 0x%08x\n", 413 printk(KERN_ERR "Address High: 0x%08x\n",
414 in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_HI)); 414 in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_HI));
415 printk(KERN_ERR "Data Low: 0x%08x\n", 415 printk(KERN_ERR "Data Low: 0x%08x\n",
416 in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_LO)); 416 in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_LO));
417 printk(KERN_ERR "Data High: 0x%08x\n", 417 printk(KERN_ERR "Data High: 0x%08x\n",
418 in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_HI)); 418 in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_HI));
419 printk(KERN_ERR "Parity: 0x%08x\n", 419 printk(KERN_ERR "Parity: 0x%08x\n",
420 in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_PARITY)); 420 in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_PARITY));
421 out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0); 421 out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
422 422
423 edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); 423 edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
424 } 424 }
425 425
426 static irqreturn_t mv64x60_cpu_isr(int irq, void *dev_id) 426 static irqreturn_t mv64x60_cpu_isr(int irq, void *dev_id)
427 { 427 {
428 struct edac_device_ctl_info *edac_dev = dev_id; 428 struct edac_device_ctl_info *edac_dev = dev_id;
429 struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info; 429 struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
430 u32 cause; 430 u32 cause;
431 431
432 cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) & 432 cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
433 MV64x60_CPU_CAUSE_MASK; 433 MV64x60_CPU_CAUSE_MASK;
434 if (!cause) 434 if (!cause)
435 return IRQ_NONE; 435 return IRQ_NONE;
436 436
437 mv64x60_cpu_check(edac_dev); 437 mv64x60_cpu_check(edac_dev);
438 438
439 return IRQ_HANDLED; 439 return IRQ_HANDLED;
440 } 440 }
441 441
442 static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev) 442 static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
443 { 443 {
444 struct edac_device_ctl_info *edac_dev; 444 struct edac_device_ctl_info *edac_dev;
445 struct resource *r; 445 struct resource *r;
446 struct mv64x60_cpu_pdata *pdata; 446 struct mv64x60_cpu_pdata *pdata;
447 int res = 0; 447 int res = 0;
448 448
449 if (!devres_open_group(&pdev->dev, mv64x60_cpu_err_probe, GFP_KERNEL)) 449 if (!devres_open_group(&pdev->dev, mv64x60_cpu_err_probe, GFP_KERNEL))
450 return -ENOMEM; 450 return -ENOMEM;
451 451
452 edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata), 452 edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
453 "cpu", 1, NULL, 0, 0, NULL, 0, 453 "cpu", 1, NULL, 0, 0, NULL, 0,
454 edac_dev_idx); 454 edac_dev_idx);
455 if (!edac_dev) { 455 if (!edac_dev) {
456 devres_release_group(&pdev->dev, mv64x60_cpu_err_probe); 456 devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
457 return -ENOMEM; 457 return -ENOMEM;
458 } 458 }
459 459
460 pdata = edac_dev->pvt_info; 460 pdata = edac_dev->pvt_info;
461 pdata->name = "mv64x60_cpu_err"; 461 pdata->name = "mv64x60_cpu_err";
462 pdata->irq = NO_IRQ; 462 pdata->irq = NO_IRQ;
463 edac_dev->dev = &pdev->dev; 463 edac_dev->dev = &pdev->dev;
464 platform_set_drvdata(pdev, edac_dev); 464 platform_set_drvdata(pdev, edac_dev);
465 edac_dev->dev_name = dev_name(&pdev->dev); 465 edac_dev->dev_name = dev_name(&pdev->dev);
466 466
467 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 467 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
468 if (!r) { 468 if (!r) {
469 printk(KERN_ERR "%s: Unable to get resource for " 469 printk(KERN_ERR "%s: Unable to get resource for "
470 "CPU err regs\n", __func__); 470 "CPU err regs\n", __func__);
471 res = -ENOENT; 471 res = -ENOENT;
472 goto err; 472 goto err;
473 } 473 }
474 474
475 if (!devm_request_mem_region(&pdev->dev, 475 if (!devm_request_mem_region(&pdev->dev,
476 r->start, 476 r->start,
477 resource_size(r), 477 resource_size(r),
478 pdata->name)) { 478 pdata->name)) {
479 printk(KERN_ERR "%s: Error while requesting mem region\n", 479 printk(KERN_ERR "%s: Error while requesting mem region\n",
480 __func__); 480 __func__);
481 res = -EBUSY; 481 res = -EBUSY;
482 goto err; 482 goto err;
483 } 483 }
484 484
485 pdata->cpu_vbase[0] = devm_ioremap(&pdev->dev, 485 pdata->cpu_vbase[0] = devm_ioremap(&pdev->dev,
486 r->start, 486 r->start,
487 resource_size(r)); 487 resource_size(r));
488 if (!pdata->cpu_vbase[0]) { 488 if (!pdata->cpu_vbase[0]) {
489 printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__); 489 printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
490 res = -ENOMEM; 490 res = -ENOMEM;
491 goto err; 491 goto err;
492 } 492 }
493 493
494 r = platform_get_resource(pdev, IORESOURCE_MEM, 1); 494 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
495 if (!r) { 495 if (!r) {
496 printk(KERN_ERR "%s: Unable to get resource for " 496 printk(KERN_ERR "%s: Unable to get resource for "
497 "CPU err regs\n", __func__); 497 "CPU err regs\n", __func__);
498 res = -ENOENT; 498 res = -ENOENT;
499 goto err; 499 goto err;
500 } 500 }
501 501
502 if (!devm_request_mem_region(&pdev->dev, 502 if (!devm_request_mem_region(&pdev->dev,
503 r->start, 503 r->start,
504 resource_size(r), 504 resource_size(r),
505 pdata->name)) { 505 pdata->name)) {
506 printk(KERN_ERR "%s: Error while requesting mem region\n", 506 printk(KERN_ERR "%s: Error while requesting mem region\n",
507 __func__); 507 __func__);
508 res = -EBUSY; 508 res = -EBUSY;
509 goto err; 509 goto err;
510 } 510 }
511 511
512 pdata->cpu_vbase[1] = devm_ioremap(&pdev->dev, 512 pdata->cpu_vbase[1] = devm_ioremap(&pdev->dev,
513 r->start, 513 r->start,
514 resource_size(r)); 514 resource_size(r));
515 if (!pdata->cpu_vbase[1]) { 515 if (!pdata->cpu_vbase[1]) {
516 printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__); 516 printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
517 res = -ENOMEM; 517 res = -ENOMEM;
518 goto err; 518 goto err;
519 } 519 }
520 520
521 /* setup CPU err registers */ 521 /* setup CPU err registers */
522 out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0); 522 out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
523 out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0); 523 out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0);
524 out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0x000000ff); 524 out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0x000000ff);
525 525
526 edac_dev->mod_name = EDAC_MOD_STR; 526 edac_dev->mod_name = EDAC_MOD_STR;
527 edac_dev->ctl_name = pdata->name; 527 edac_dev->ctl_name = pdata->name;
528 if (edac_op_state == EDAC_OPSTATE_POLL) 528 if (edac_op_state == EDAC_OPSTATE_POLL)
529 edac_dev->edac_check = mv64x60_cpu_check; 529 edac_dev->edac_check = mv64x60_cpu_check;
530 530
531 pdata->edac_idx = edac_dev_idx++; 531 pdata->edac_idx = edac_dev_idx++;
532 532
533 if (edac_device_add_device(edac_dev) > 0) { 533 if (edac_device_add_device(edac_dev) > 0) {
534 debugf3("%s(): failed edac_device_add_device()\n", __func__); 534 debugf3("%s(): failed edac_device_add_device()\n", __func__);
535 goto err; 535 goto err;
536 } 536 }
537 537
538 if (edac_op_state == EDAC_OPSTATE_INT) { 538 if (edac_op_state == EDAC_OPSTATE_INT) {
539 pdata->irq = platform_get_irq(pdev, 0); 539 pdata->irq = platform_get_irq(pdev, 0);
540 res = devm_request_irq(&pdev->dev, 540 res = devm_request_irq(&pdev->dev,
541 pdata->irq, 541 pdata->irq,
542 mv64x60_cpu_isr, 542 mv64x60_cpu_isr,
543 IRQF_DISABLED, 543 IRQF_DISABLED,
544 "[EDAC] CPU err", 544 "[EDAC] CPU err",
545 edac_dev); 545 edac_dev);
546 if (res < 0) { 546 if (res < 0) {
547 printk(KERN_ERR 547 printk(KERN_ERR
548 "%s: Unable to request irq %d for MV64x60 " 548 "%s: Unable to request irq %d for MV64x60 "
549 "CPU ERR\n", __func__, pdata->irq); 549 "CPU ERR\n", __func__, pdata->irq);
550 res = -ENODEV; 550 res = -ENODEV;
551 goto err2; 551 goto err2;
552 } 552 }
553 553
554 printk(KERN_INFO EDAC_MOD_STR 554 printk(KERN_INFO EDAC_MOD_STR
555 " acquired irq %d for CPU Err\n", pdata->irq); 555 " acquired irq %d for CPU Err\n", pdata->irq);
556 } 556 }
557 557
558 devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe); 558 devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe);
559 559
560 /* get this far and it's successful */ 560 /* get this far and it's successful */
561 debugf3("%s(): success\n", __func__); 561 debugf3("%s(): success\n", __func__);
562 562
563 return 0; 563 return 0;
564 564
565 err2: 565 err2:
566 edac_device_del_device(&pdev->dev); 566 edac_device_del_device(&pdev->dev);
567 err: 567 err:
568 devres_release_group(&pdev->dev, mv64x60_cpu_err_probe); 568 devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
569 edac_device_free_ctl_info(edac_dev); 569 edac_device_free_ctl_info(edac_dev);
570 return res; 570 return res;
571 } 571 }
572 572
573 static int mv64x60_cpu_err_remove(struct platform_device *pdev) 573 static int mv64x60_cpu_err_remove(struct platform_device *pdev)
574 { 574 {
575 struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev); 575 struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
576 576
577 debugf0("%s()\n", __func__); 577 debugf0("%s()\n", __func__);
578 578
579 edac_device_del_device(&pdev->dev); 579 edac_device_del_device(&pdev->dev);
580 edac_device_free_ctl_info(edac_dev); 580 edac_device_free_ctl_info(edac_dev);
581 return 0; 581 return 0;
582 } 582 }
583 583
584 static struct platform_driver mv64x60_cpu_err_driver = { 584 static struct platform_driver mv64x60_cpu_err_driver = {
585 .probe = mv64x60_cpu_err_probe, 585 .probe = mv64x60_cpu_err_probe,
586 .remove = mv64x60_cpu_err_remove, 586 .remove = mv64x60_cpu_err_remove,
587 .driver = { 587 .driver = {
588 .name = "mv64x60_cpu_err", 588 .name = "mv64x60_cpu_err",
589 } 589 }
590 }; 590 };
591 591
592 /*********************** DRAM err device **********************************/ 592 /*********************** DRAM err device **********************************/
593 593
594 static void mv64x60_mc_check(struct mem_ctl_info *mci) 594 static void mv64x60_mc_check(struct mem_ctl_info *mci)
595 { 595 {
596 struct mv64x60_mc_pdata *pdata = mci->pvt_info; 596 struct mv64x60_mc_pdata *pdata = mci->pvt_info;
597 u32 reg; 597 u32 reg;
598 u32 err_addr; 598 u32 err_addr;
599 u32 sdram_ecc; 599 u32 sdram_ecc;
600 u32 comp_ecc; 600 u32 comp_ecc;
601 u32 syndrome; 601 u32 syndrome;
602 602
603 reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR); 603 reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
604 if (!reg) 604 if (!reg)
605 return; 605 return;
606 606
607 err_addr = reg & ~0x3; 607 err_addr = reg & ~0x3;
608 sdram_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_RCVD); 608 sdram_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_RCVD);
609 comp_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CALC); 609 comp_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CALC);
610 syndrome = sdram_ecc ^ comp_ecc; 610 syndrome = sdram_ecc ^ comp_ecc;
611 611
612 /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */ 612 /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
613 if (!(reg & 0x1)) 613 if (!(reg & 0x1))
614 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 614 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
615 err_addr >> PAGE_SHIFT, 615 err_addr >> PAGE_SHIFT,
616 err_addr & PAGE_MASK, syndrome, 616 err_addr & PAGE_MASK, syndrome,
617 0, 0, -1, 617 0, 0, -1,
618 mci->ctl_name, "", NULL); 618 mci->ctl_name, "", NULL);
619 else /* 2 bit error, UE */ 619 else /* 2 bit error, UE */
620 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 620 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
621 err_addr >> PAGE_SHIFT, 621 err_addr >> PAGE_SHIFT,
622 err_addr & PAGE_MASK, 0, 622 err_addr & PAGE_MASK, 0,
623 0, 0, -1, 623 0, 0, -1,
624 mci->ctl_name, "", NULL); 624 mci->ctl_name, "", NULL);
625 625
626 /* clear the error */ 626 /* clear the error */
627 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0); 627 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
628 } 628 }
629 629
630 static irqreturn_t mv64x60_mc_isr(int irq, void *dev_id) 630 static irqreturn_t mv64x60_mc_isr(int irq, void *dev_id)
631 { 631 {
632 struct mem_ctl_info *mci = dev_id; 632 struct mem_ctl_info *mci = dev_id;
633 struct mv64x60_mc_pdata *pdata = mci->pvt_info; 633 struct mv64x60_mc_pdata *pdata = mci->pvt_info;
634 u32 reg; 634 u32 reg;
635 635
636 reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR); 636 reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
637 if (!reg) 637 if (!reg)
638 return IRQ_NONE; 638 return IRQ_NONE;
639 639
640 /* writing 0's to the ECC err addr in check function clears irq */ 640 /* writing 0's to the ECC err addr in check function clears irq */
641 mv64x60_mc_check(mci); 641 mv64x60_mc_check(mci);
642 642
643 return IRQ_HANDLED; 643 return IRQ_HANDLED;
644 } 644 }
645 645
646 static void get_total_mem(struct mv64x60_mc_pdata *pdata) 646 static void get_total_mem(struct mv64x60_mc_pdata *pdata)
647 { 647 {
648 struct device_node *np = NULL; 648 struct device_node *np = NULL;
649 const unsigned int *reg; 649 const unsigned int *reg;
650 650
651 np = of_find_node_by_type(NULL, "memory"); 651 np = of_find_node_by_type(NULL, "memory");
652 if (!np) 652 if (!np)
653 return; 653 return;
654 654
655 reg = of_get_property(np, "reg", NULL); 655 reg = of_get_property(np, "reg", NULL);
656 656
657 pdata->total_mem = reg[1]; 657 pdata->total_mem = reg[1];
658 } 658 }
659 659
660 static void mv64x60_init_csrows(struct mem_ctl_info *mci, 660 static void mv64x60_init_csrows(struct mem_ctl_info *mci,
661 struct mv64x60_mc_pdata *pdata) 661 struct mv64x60_mc_pdata *pdata)
662 { 662 {
663 struct csrow_info *csrow; 663 struct csrow_info *csrow;
664 struct dimm_info *dimm; 664 struct dimm_info *dimm;
665 665
666 u32 devtype; 666 u32 devtype;
667 u32 ctl; 667 u32 ctl;
668 668
669 get_total_mem(pdata); 669 get_total_mem(pdata);
670 670
671 ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG); 671 ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
672 672
673 csrow = &mci->csrows[0]; 673 csrow = mci->csrows[0];
674 dimm = csrow->channels[0].dimm; 674 dimm = csrow->channels[0]->dimm;
675 675
676 dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT; 676 dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT;
677 dimm->grain = 8; 677 dimm->grain = 8;
678 678
679 dimm->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR; 679 dimm->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
680 680
681 devtype = (ctl >> 20) & 0x3; 681 devtype = (ctl >> 20) & 0x3;
682 switch (devtype) { 682 switch (devtype) {
683 case 0x0: 683 case 0x0:
684 dimm->dtype = DEV_X32; 684 dimm->dtype = DEV_X32;
685 break; 685 break;
686 case 0x2: /* could be X8 too, but no way to tell */ 686 case 0x2: /* could be X8 too, but no way to tell */
687 dimm->dtype = DEV_X16; 687 dimm->dtype = DEV_X16;
688 break; 688 break;
689 case 0x3: 689 case 0x3:
690 dimm->dtype = DEV_X4; 690 dimm->dtype = DEV_X4;
691 break; 691 break;
692 default: 692 default:
693 dimm->dtype = DEV_UNKNOWN; 693 dimm->dtype = DEV_UNKNOWN;
694 break; 694 break;
695 } 695 }
696 696
697 dimm->edac_mode = EDAC_SECDED; 697 dimm->edac_mode = EDAC_SECDED;
698 } 698 }
699 699
700 static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev) 700 static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
701 { 701 {
702 struct mem_ctl_info *mci; 702 struct mem_ctl_info *mci;
703 struct edac_mc_layer layers[2]; 703 struct edac_mc_layer layers[2];
704 struct mv64x60_mc_pdata *pdata; 704 struct mv64x60_mc_pdata *pdata;
705 struct resource *r; 705 struct resource *r;
706 u32 ctl; 706 u32 ctl;
707 int res = 0; 707 int res = 0;
708 708
709 if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL)) 709 if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL))
710 return -ENOMEM; 710 return -ENOMEM;
711 711
712 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 712 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
713 layers[0].size = 1; 713 layers[0].size = 1;
714 layers[0].is_virt_csrow = true; 714 layers[0].is_virt_csrow = true;
715 layers[1].type = EDAC_MC_LAYER_CHANNEL; 715 layers[1].type = EDAC_MC_LAYER_CHANNEL;
716 layers[1].size = 1; 716 layers[1].size = 1;
717 layers[1].is_virt_csrow = false; 717 layers[1].is_virt_csrow = false;
718 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers, 718 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
719 sizeof(struct mv64x60_mc_pdata)); 719 sizeof(struct mv64x60_mc_pdata));
720 if (!mci) { 720 if (!mci) {
721 printk(KERN_ERR "%s: No memory for CPU err\n", __func__); 721 printk(KERN_ERR "%s: No memory for CPU err\n", __func__);
722 devres_release_group(&pdev->dev, mv64x60_mc_err_probe); 722 devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
723 return -ENOMEM; 723 return -ENOMEM;
724 } 724 }
725 725
726 pdata = mci->pvt_info; 726 pdata = mci->pvt_info;
727 mci->pdev = &pdev->dev; 727 mci->pdev = &pdev->dev;
728 platform_set_drvdata(pdev, mci); 728 platform_set_drvdata(pdev, mci);
729 pdata->name = "mv64x60_mc_err"; 729 pdata->name = "mv64x60_mc_err";
730 pdata->irq = NO_IRQ; 730 pdata->irq = NO_IRQ;
731 mci->dev_name = dev_name(&pdev->dev); 731 mci->dev_name = dev_name(&pdev->dev);
732 pdata->edac_idx = edac_mc_idx++; 732 pdata->edac_idx = edac_mc_idx++;
733 733
734 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 734 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
735 if (!r) { 735 if (!r) {
736 printk(KERN_ERR "%s: Unable to get resource for " 736 printk(KERN_ERR "%s: Unable to get resource for "
737 "MC err regs\n", __func__); 737 "MC err regs\n", __func__);
738 res = -ENOENT; 738 res = -ENOENT;
739 goto err; 739 goto err;
740 } 740 }
741 741
742 if (!devm_request_mem_region(&pdev->dev, 742 if (!devm_request_mem_region(&pdev->dev,
743 r->start, 743 r->start,
744 resource_size(r), 744 resource_size(r),
745 pdata->name)) { 745 pdata->name)) {
746 printk(KERN_ERR "%s: Error while requesting mem region\n", 746 printk(KERN_ERR "%s: Error while requesting mem region\n",
747 __func__); 747 __func__);
748 res = -EBUSY; 748 res = -EBUSY;
749 goto err; 749 goto err;
750 } 750 }
751 751
752 pdata->mc_vbase = devm_ioremap(&pdev->dev, 752 pdata->mc_vbase = devm_ioremap(&pdev->dev,
753 r->start, 753 r->start,
754 resource_size(r)); 754 resource_size(r));
755 if (!pdata->mc_vbase) { 755 if (!pdata->mc_vbase) {
756 printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__); 756 printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
757 res = -ENOMEM; 757 res = -ENOMEM;
758 goto err; 758 goto err;
759 } 759 }
760 760
761 ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG); 761 ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
762 if (!(ctl & MV64X60_SDRAM_ECC)) { 762 if (!(ctl & MV64X60_SDRAM_ECC)) {
763 /* Non-ECC RAM? */ 763 /* Non-ECC RAM? */
764 printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__); 764 printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
765 res = -ENODEV; 765 res = -ENODEV;
766 goto err2; 766 goto err2;
767 } 767 }
768 768
769 debugf3("%s(): init mci\n", __func__); 769 debugf3("%s(): init mci\n", __func__);
770 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; 770 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
771 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 771 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
772 mci->edac_cap = EDAC_FLAG_SECDED; 772 mci->edac_cap = EDAC_FLAG_SECDED;
773 mci->mod_name = EDAC_MOD_STR; 773 mci->mod_name = EDAC_MOD_STR;
774 mci->mod_ver = MV64x60_REVISION; 774 mci->mod_ver = MV64x60_REVISION;
775 mci->ctl_name = mv64x60_ctl_name; 775 mci->ctl_name = mv64x60_ctl_name;
776 776
777 if (edac_op_state == EDAC_OPSTATE_POLL) 777 if (edac_op_state == EDAC_OPSTATE_POLL)
778 mci->edac_check = mv64x60_mc_check; 778 mci->edac_check = mv64x60_mc_check;
779 779
780 mci->ctl_page_to_phys = NULL; 780 mci->ctl_page_to_phys = NULL;
781 781
782 mci->scrub_mode = SCRUB_SW_SRC; 782 mci->scrub_mode = SCRUB_SW_SRC;
783 783
784 mv64x60_init_csrows(mci, pdata); 784 mv64x60_init_csrows(mci, pdata);
785 785
786 /* setup MC registers */ 786 /* setup MC registers */
787 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0); 787 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
788 ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL); 788 ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
789 ctl = (ctl & 0xff00ffff) | 0x10000; 789 ctl = (ctl & 0xff00ffff) | 0x10000;
790 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl); 790 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl);
791 791
792 if (edac_mc_add_mc(mci)) { 792 if (edac_mc_add_mc(mci)) {
793 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 793 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
794 goto err; 794 goto err;
795 } 795 }
796 796
797 if (edac_op_state == EDAC_OPSTATE_INT) { 797 if (edac_op_state == EDAC_OPSTATE_INT) {
798 /* acquire interrupt that reports errors */ 798 /* acquire interrupt that reports errors */
799 pdata->irq = platform_get_irq(pdev, 0); 799 pdata->irq = platform_get_irq(pdev, 0);
800 res = devm_request_irq(&pdev->dev, 800 res = devm_request_irq(&pdev->dev,
801 pdata->irq, 801 pdata->irq,
802 mv64x60_mc_isr, 802 mv64x60_mc_isr,
803 IRQF_DISABLED, 803 IRQF_DISABLED,
804 "[EDAC] MC err", 804 "[EDAC] MC err",
805 mci); 805 mci);
806 if (res < 0) { 806 if (res < 0) {
807 printk(KERN_ERR "%s: Unable to request irq %d for " 807 printk(KERN_ERR "%s: Unable to request irq %d for "
808 "MV64x60 DRAM ERR\n", __func__, pdata->irq); 808 "MV64x60 DRAM ERR\n", __func__, pdata->irq);
809 res = -ENODEV; 809 res = -ENODEV;
810 goto err2; 810 goto err2;
811 } 811 }
812 812
813 printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC Err\n", 813 printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC Err\n",
814 pdata->irq); 814 pdata->irq);
815 } 815 }
816 816
817 /* get this far and it's successful */ 817 /* get this far and it's successful */
818 debugf3("%s(): success\n", __func__); 818 debugf3("%s(): success\n", __func__);
819 819
820 return 0; 820 return 0;
821 821
822 err2: 822 err2:
823 edac_mc_del_mc(&pdev->dev); 823 edac_mc_del_mc(&pdev->dev);
824 err: 824 err:
825 devres_release_group(&pdev->dev, mv64x60_mc_err_probe); 825 devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
826 edac_mc_free(mci); 826 edac_mc_free(mci);
827 return res; 827 return res;
828 } 828 }
829 829
830 static int mv64x60_mc_err_remove(struct platform_device *pdev) 830 static int mv64x60_mc_err_remove(struct platform_device *pdev)
831 { 831 {
832 struct mem_ctl_info *mci = platform_get_drvdata(pdev); 832 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
833 833
834 debugf0("%s()\n", __func__); 834 debugf0("%s()\n", __func__);
835 835
836 edac_mc_del_mc(&pdev->dev); 836 edac_mc_del_mc(&pdev->dev);
837 edac_mc_free(mci); 837 edac_mc_free(mci);
838 return 0; 838 return 0;
839 } 839 }
840 840
841 static struct platform_driver mv64x60_mc_err_driver = { 841 static struct platform_driver mv64x60_mc_err_driver = {
842 .probe = mv64x60_mc_err_probe, 842 .probe = mv64x60_mc_err_probe,
843 .remove = mv64x60_mc_err_remove, 843 .remove = mv64x60_mc_err_remove,
844 .driver = { 844 .driver = {
845 .name = "mv64x60_mc_err", 845 .name = "mv64x60_mc_err",
846 } 846 }
847 }; 847 };
848 848
849 static int __init mv64x60_edac_init(void) 849 static int __init mv64x60_edac_init(void)
850 { 850 {
851 int ret = 0; 851 int ret = 0;
852 852
853 printk(KERN_INFO "Marvell MV64x60 EDAC driver " MV64x60_REVISION "\n"); 853 printk(KERN_INFO "Marvell MV64x60 EDAC driver " MV64x60_REVISION "\n");
854 printk(KERN_INFO "\t(C) 2006-2007 MontaVista Software\n"); 854 printk(KERN_INFO "\t(C) 2006-2007 MontaVista Software\n");
855 /* make sure error reporting method is sane */ 855 /* make sure error reporting method is sane */
856 switch (edac_op_state) { 856 switch (edac_op_state) {
857 case EDAC_OPSTATE_POLL: 857 case EDAC_OPSTATE_POLL:
858 case EDAC_OPSTATE_INT: 858 case EDAC_OPSTATE_INT:
859 break; 859 break;
860 default: 860 default:
861 edac_op_state = EDAC_OPSTATE_INT; 861 edac_op_state = EDAC_OPSTATE_INT;
862 break; 862 break;
863 } 863 }
864 864
865 ret = platform_driver_register(&mv64x60_mc_err_driver); 865 ret = platform_driver_register(&mv64x60_mc_err_driver);
866 if (ret) 866 if (ret)
867 printk(KERN_WARNING EDAC_MOD_STR "MC err failed to register\n"); 867 printk(KERN_WARNING EDAC_MOD_STR "MC err failed to register\n");
868 868
869 ret = platform_driver_register(&mv64x60_cpu_err_driver); 869 ret = platform_driver_register(&mv64x60_cpu_err_driver);
870 if (ret) 870 if (ret)
871 printk(KERN_WARNING EDAC_MOD_STR 871 printk(KERN_WARNING EDAC_MOD_STR
872 "CPU err failed to register\n"); 872 "CPU err failed to register\n");
873 873
874 ret = platform_driver_register(&mv64x60_sram_err_driver); 874 ret = platform_driver_register(&mv64x60_sram_err_driver);
875 if (ret) 875 if (ret)
876 printk(KERN_WARNING EDAC_MOD_STR 876 printk(KERN_WARNING EDAC_MOD_STR
877 "SRAM err failed to register\n"); 877 "SRAM err failed to register\n");
878 878
879 #ifdef CONFIG_PCI 879 #ifdef CONFIG_PCI
880 ret = platform_driver_register(&mv64x60_pci_err_driver); 880 ret = platform_driver_register(&mv64x60_pci_err_driver);
881 if (ret) 881 if (ret)
882 printk(KERN_WARNING EDAC_MOD_STR 882 printk(KERN_WARNING EDAC_MOD_STR
883 "PCI err failed to register\n"); 883 "PCI err failed to register\n");
884 #endif 884 #endif
885 885
886 return ret; 886 return ret;
887 } 887 }
888 module_init(mv64x60_edac_init); 888 module_init(mv64x60_edac_init);
889 889
890 static void __exit mv64x60_edac_exit(void) 890 static void __exit mv64x60_edac_exit(void)
891 { 891 {
892 #ifdef CONFIG_PCI 892 #ifdef CONFIG_PCI
893 platform_driver_unregister(&mv64x60_pci_err_driver); 893 platform_driver_unregister(&mv64x60_pci_err_driver);
894 #endif 894 #endif
895 platform_driver_unregister(&mv64x60_sram_err_driver); 895 platform_driver_unregister(&mv64x60_sram_err_driver);
896 platform_driver_unregister(&mv64x60_cpu_err_driver); 896 platform_driver_unregister(&mv64x60_cpu_err_driver);
897 platform_driver_unregister(&mv64x60_mc_err_driver); 897 platform_driver_unregister(&mv64x60_mc_err_driver);
898 } 898 }
899 module_exit(mv64x60_edac_exit); 899 module_exit(mv64x60_edac_exit);
900 900
901 MODULE_LICENSE("GPL"); 901 MODULE_LICENSE("GPL");
902 MODULE_AUTHOR("Montavista Software, Inc."); 902 MODULE_AUTHOR("Montavista Software, Inc.");
903 module_param(edac_op_state, int, 0444); 903 module_param(edac_op_state, int, 0444);
904 MODULE_PARM_DESC(edac_op_state, 904 MODULE_PARM_DESC(edac_op_state,
905 "EDAC Error Reporting state: 0=Poll, 2=Interrupt"); 905 "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
906 906
drivers/edac/pasemi_edac.c
1 /* 1 /*
2 * Copyright (C) 2006-2007 PA Semi, Inc 2 * Copyright (C) 2006-2007 PA Semi, Inc
3 * 3 *
4 * Author: Egor Martovetsky <egor@pasemi.com> 4 * Author: Egor Martovetsky <egor@pasemi.com>
5 * Maintained by: Olof Johansson <olof@lixom.net> 5 * Maintained by: Olof Johansson <olof@lixom.net>
6 * 6 *
7 * Driver for the PWRficient onchip memory controllers 7 * Driver for the PWRficient onchip memory controllers
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, 13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */ 21 */
22 22
23 23
24 #include <linux/module.h> 24 #include <linux/module.h>
25 #include <linux/init.h> 25 #include <linux/init.h>
26 #include <linux/pci.h> 26 #include <linux/pci.h>
27 #include <linux/pci_ids.h> 27 #include <linux/pci_ids.h>
28 #include <linux/edac.h> 28 #include <linux/edac.h>
29 #include "edac_core.h" 29 #include "edac_core.h"
30 30
31 #define MODULE_NAME "pasemi_edac" 31 #define MODULE_NAME "pasemi_edac"
32 32
33 #define MCCFG_MCEN 0x300 33 #define MCCFG_MCEN 0x300
34 #define MCCFG_MCEN_MMC_EN 0x00000001 34 #define MCCFG_MCEN_MMC_EN 0x00000001
35 #define MCCFG_ERRCOR 0x388 35 #define MCCFG_ERRCOR 0x388
36 #define MCCFG_ERRCOR_RNK_FAIL_DET_EN 0x00000100 36 #define MCCFG_ERRCOR_RNK_FAIL_DET_EN 0x00000100
37 #define MCCFG_ERRCOR_ECC_GEN_EN 0x00000010 37 #define MCCFG_ERRCOR_ECC_GEN_EN 0x00000010
38 #define MCCFG_ERRCOR_ECC_CRR_EN 0x00000001 38 #define MCCFG_ERRCOR_ECC_CRR_EN 0x00000001
39 #define MCCFG_SCRUB 0x384 39 #define MCCFG_SCRUB 0x384
40 #define MCCFG_SCRUB_RGLR_SCRB_EN 0x00000001 40 #define MCCFG_SCRUB_RGLR_SCRB_EN 0x00000001
41 #define MCDEBUG_ERRCTL1 0x728 41 #define MCDEBUG_ERRCTL1 0x728
42 #define MCDEBUG_ERRCTL1_RFL_LOG_EN 0x00080000 42 #define MCDEBUG_ERRCTL1_RFL_LOG_EN 0x00080000
43 #define MCDEBUG_ERRCTL1_MBE_LOG_EN 0x00040000 43 #define MCDEBUG_ERRCTL1_MBE_LOG_EN 0x00040000
44 #define MCDEBUG_ERRCTL1_SBE_LOG_EN 0x00020000 44 #define MCDEBUG_ERRCTL1_SBE_LOG_EN 0x00020000
45 #define MCDEBUG_ERRSTA 0x730 45 #define MCDEBUG_ERRSTA 0x730
46 #define MCDEBUG_ERRSTA_RFL_STATUS 0x00000004 46 #define MCDEBUG_ERRSTA_RFL_STATUS 0x00000004
47 #define MCDEBUG_ERRSTA_MBE_STATUS 0x00000002 47 #define MCDEBUG_ERRSTA_MBE_STATUS 0x00000002
48 #define MCDEBUG_ERRSTA_SBE_STATUS 0x00000001 48 #define MCDEBUG_ERRSTA_SBE_STATUS 0x00000001
49 #define MCDEBUG_ERRCNT1 0x734 49 #define MCDEBUG_ERRCNT1 0x734
50 #define MCDEBUG_ERRCNT1_SBE_CNT_OVRFLO 0x00000080 50 #define MCDEBUG_ERRCNT1_SBE_CNT_OVRFLO 0x00000080
51 #define MCDEBUG_ERRLOG1A 0x738 51 #define MCDEBUG_ERRLOG1A 0x738
52 #define MCDEBUG_ERRLOG1A_MERR_TYPE_M 0x30000000 52 #define MCDEBUG_ERRLOG1A_MERR_TYPE_M 0x30000000
53 #define MCDEBUG_ERRLOG1A_MERR_TYPE_NONE 0x00000000 53 #define MCDEBUG_ERRLOG1A_MERR_TYPE_NONE 0x00000000
54 #define MCDEBUG_ERRLOG1A_MERR_TYPE_SBE 0x10000000 54 #define MCDEBUG_ERRLOG1A_MERR_TYPE_SBE 0x10000000
55 #define MCDEBUG_ERRLOG1A_MERR_TYPE_MBE 0x20000000 55 #define MCDEBUG_ERRLOG1A_MERR_TYPE_MBE 0x20000000
56 #define MCDEBUG_ERRLOG1A_MERR_TYPE_RFL 0x30000000 56 #define MCDEBUG_ERRLOG1A_MERR_TYPE_RFL 0x30000000
57 #define MCDEBUG_ERRLOG1A_MERR_BA_M 0x00700000 57 #define MCDEBUG_ERRLOG1A_MERR_BA_M 0x00700000
58 #define MCDEBUG_ERRLOG1A_MERR_BA_S 20 58 #define MCDEBUG_ERRLOG1A_MERR_BA_S 20
59 #define MCDEBUG_ERRLOG1A_MERR_CS_M 0x00070000 59 #define MCDEBUG_ERRLOG1A_MERR_CS_M 0x00070000
60 #define MCDEBUG_ERRLOG1A_MERR_CS_S 16 60 #define MCDEBUG_ERRLOG1A_MERR_CS_S 16
61 #define MCDEBUG_ERRLOG1A_SYNDROME_M 0x0000ffff 61 #define MCDEBUG_ERRLOG1A_SYNDROME_M 0x0000ffff
62 #define MCDRAM_RANKCFG 0x114 62 #define MCDRAM_RANKCFG 0x114
63 #define MCDRAM_RANKCFG_EN 0x00000001 63 #define MCDRAM_RANKCFG_EN 0x00000001
64 #define MCDRAM_RANKCFG_TYPE_SIZE_M 0x000001c0 64 #define MCDRAM_RANKCFG_TYPE_SIZE_M 0x000001c0
65 #define MCDRAM_RANKCFG_TYPE_SIZE_S 6 65 #define MCDRAM_RANKCFG_TYPE_SIZE_S 6
66 66
67 #define PASEMI_EDAC_NR_CSROWS 8 67 #define PASEMI_EDAC_NR_CSROWS 8
68 #define PASEMI_EDAC_NR_CHANS 1 68 #define PASEMI_EDAC_NR_CHANS 1
69 #define PASEMI_EDAC_ERROR_GRAIN 64 69 #define PASEMI_EDAC_ERROR_GRAIN 64
70 70
71 static int last_page_in_mmc; 71 static int last_page_in_mmc;
72 static int system_mmc_id; 72 static int system_mmc_id;
73 73
74 74
75 static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci) 75 static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci)
76 { 76 {
77 struct pci_dev *pdev = to_pci_dev(mci->pdev); 77 struct pci_dev *pdev = to_pci_dev(mci->pdev);
78 u32 tmp; 78 u32 tmp;
79 79
80 pci_read_config_dword(pdev, MCDEBUG_ERRSTA, 80 pci_read_config_dword(pdev, MCDEBUG_ERRSTA,
81 &tmp); 81 &tmp);
82 82
83 tmp &= (MCDEBUG_ERRSTA_RFL_STATUS | MCDEBUG_ERRSTA_MBE_STATUS 83 tmp &= (MCDEBUG_ERRSTA_RFL_STATUS | MCDEBUG_ERRSTA_MBE_STATUS
84 | MCDEBUG_ERRSTA_SBE_STATUS); 84 | MCDEBUG_ERRSTA_SBE_STATUS);
85 85
86 if (tmp) { 86 if (tmp) {
87 if (tmp & MCDEBUG_ERRSTA_SBE_STATUS) 87 if (tmp & MCDEBUG_ERRSTA_SBE_STATUS)
88 pci_write_config_dword(pdev, MCDEBUG_ERRCNT1, 88 pci_write_config_dword(pdev, MCDEBUG_ERRCNT1,
89 MCDEBUG_ERRCNT1_SBE_CNT_OVRFLO); 89 MCDEBUG_ERRCNT1_SBE_CNT_OVRFLO);
90 pci_write_config_dword(pdev, MCDEBUG_ERRSTA, tmp); 90 pci_write_config_dword(pdev, MCDEBUG_ERRSTA, tmp);
91 } 91 }
92 92
93 return tmp; 93 return tmp;
94 } 94 }
95 95
96 static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta) 96 static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
97 { 97 {
98 struct pci_dev *pdev = to_pci_dev(mci->pdev); 98 struct pci_dev *pdev = to_pci_dev(mci->pdev);
99 u32 errlog1a; 99 u32 errlog1a;
100 u32 cs; 100 u32 cs;
101 101
102 if (!errsta) 102 if (!errsta)
103 return; 103 return;
104 104
105 pci_read_config_dword(pdev, MCDEBUG_ERRLOG1A, &errlog1a); 105 pci_read_config_dword(pdev, MCDEBUG_ERRLOG1A, &errlog1a);
106 106
107 cs = (errlog1a & MCDEBUG_ERRLOG1A_MERR_CS_M) >> 107 cs = (errlog1a & MCDEBUG_ERRLOG1A_MERR_CS_M) >>
108 MCDEBUG_ERRLOG1A_MERR_CS_S; 108 MCDEBUG_ERRLOG1A_MERR_CS_S;
109 109
110 /* uncorrectable/multi-bit errors */ 110 /* uncorrectable/multi-bit errors */
111 if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS | 111 if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS |
112 MCDEBUG_ERRSTA_RFL_STATUS)) { 112 MCDEBUG_ERRSTA_RFL_STATUS)) {
113 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 113 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
114 mci->csrows[cs].first_page, 0, 0, 114 mci->csrows[cs]->first_page, 0, 0,
115 cs, 0, -1, mci->ctl_name, "", NULL); 115 cs, 0, -1, mci->ctl_name, "", NULL);
116 } 116 }
117 117
118 /* correctable/single-bit errors */ 118 /* correctable/single-bit errors */
119 if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) 119 if (errsta & MCDEBUG_ERRSTA_SBE_STATUS)
120 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 120 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
121 mci->csrows[cs].first_page, 0, 0, 121 mci->csrows[cs]->first_page, 0, 0,
122 cs, 0, -1, mci->ctl_name, "", NULL); 122 cs, 0, -1, mci->ctl_name, "", NULL);
123 } 123 }
124 124
125 static void pasemi_edac_check(struct mem_ctl_info *mci) 125 static void pasemi_edac_check(struct mem_ctl_info *mci)
126 { 126 {
127 u32 errsta; 127 u32 errsta;
128 128
129 errsta = pasemi_edac_get_error_info(mci); 129 errsta = pasemi_edac_get_error_info(mci);
130 if (errsta) 130 if (errsta)
131 pasemi_edac_process_error_info(mci, errsta); 131 pasemi_edac_process_error_info(mci, errsta);
132 } 132 }
133 133
134 static int pasemi_edac_init_csrows(struct mem_ctl_info *mci, 134 static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
135 struct pci_dev *pdev, 135 struct pci_dev *pdev,
136 enum edac_type edac_mode) 136 enum edac_type edac_mode)
137 { 137 {
138 struct csrow_info *csrow; 138 struct csrow_info *csrow;
139 struct dimm_info *dimm; 139 struct dimm_info *dimm;
140 u32 rankcfg; 140 u32 rankcfg;
141 int index; 141 int index;
142 142
143 for (index = 0; index < mci->nr_csrows; index++) { 143 for (index = 0; index < mci->nr_csrows; index++) {
144 csrow = &mci->csrows[index]; 144 csrow = mci->csrows[index];
145 dimm = csrow->channels[0].dimm; 145 dimm = csrow->channels[0]->dimm;
146 146
147 pci_read_config_dword(pdev, 147 pci_read_config_dword(pdev,
148 MCDRAM_RANKCFG + (index * 12), 148 MCDRAM_RANKCFG + (index * 12),
149 &rankcfg); 149 &rankcfg);
150 150
151 if (!(rankcfg & MCDRAM_RANKCFG_EN)) 151 if (!(rankcfg & MCDRAM_RANKCFG_EN))
152 continue; 152 continue;
153 153
154 switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >> 154 switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >>
155 MCDRAM_RANKCFG_TYPE_SIZE_S) { 155 MCDRAM_RANKCFG_TYPE_SIZE_S) {
156 case 0: 156 case 0:
157 dimm->nr_pages = 128 << (20 - PAGE_SHIFT); 157 dimm->nr_pages = 128 << (20 - PAGE_SHIFT);
158 break; 158 break;
159 case 1: 159 case 1:
160 dimm->nr_pages = 256 << (20 - PAGE_SHIFT); 160 dimm->nr_pages = 256 << (20 - PAGE_SHIFT);
161 break; 161 break;
162 case 2: 162 case 2:
163 case 3: 163 case 3:
164 dimm->nr_pages = 512 << (20 - PAGE_SHIFT); 164 dimm->nr_pages = 512 << (20 - PAGE_SHIFT);
165 break; 165 break;
166 case 4: 166 case 4:
167 dimm->nr_pages = 1024 << (20 - PAGE_SHIFT); 167 dimm->nr_pages = 1024 << (20 - PAGE_SHIFT);
168 break; 168 break;
169 case 5: 169 case 5:
170 dimm->nr_pages = 2048 << (20 - PAGE_SHIFT); 170 dimm->nr_pages = 2048 << (20 - PAGE_SHIFT);
171 break; 171 break;
172 default: 172 default:
173 edac_mc_printk(mci, KERN_ERR, 173 edac_mc_printk(mci, KERN_ERR,
174 "Unrecognized Rank Config. rankcfg=%u\n", 174 "Unrecognized Rank Config. rankcfg=%u\n",
175 rankcfg); 175 rankcfg);
176 return -EINVAL; 176 return -EINVAL;
177 } 177 }
178 178
179 csrow->first_page = last_page_in_mmc; 179 csrow->first_page = last_page_in_mmc;
180 csrow->last_page = csrow->first_page + dimm->nr_pages - 1; 180 csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
181 last_page_in_mmc += dimm->nr_pages; 181 last_page_in_mmc += dimm->nr_pages;
182 csrow->page_mask = 0; 182 csrow->page_mask = 0;
183 dimm->grain = PASEMI_EDAC_ERROR_GRAIN; 183 dimm->grain = PASEMI_EDAC_ERROR_GRAIN;
184 dimm->mtype = MEM_DDR; 184 dimm->mtype = MEM_DDR;
185 dimm->dtype = DEV_UNKNOWN; 185 dimm->dtype = DEV_UNKNOWN;
186 dimm->edac_mode = edac_mode; 186 dimm->edac_mode = edac_mode;
187 } 187 }
188 return 0; 188 return 0;
189 } 189 }
190 190
191 static int __devinit pasemi_edac_probe(struct pci_dev *pdev, 191 static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
192 const struct pci_device_id *ent) 192 const struct pci_device_id *ent)
193 { 193 {
194 struct mem_ctl_info *mci = NULL; 194 struct mem_ctl_info *mci = NULL;
195 struct edac_mc_layer layers[2]; 195 struct edac_mc_layer layers[2];
196 u32 errctl1, errcor, scrub, mcen; 196 u32 errctl1, errcor, scrub, mcen;
197 197
198 pci_read_config_dword(pdev, MCCFG_MCEN, &mcen); 198 pci_read_config_dword(pdev, MCCFG_MCEN, &mcen);
199 if (!(mcen & MCCFG_MCEN_MMC_EN)) 199 if (!(mcen & MCCFG_MCEN_MMC_EN))
200 return -ENODEV; 200 return -ENODEV;
201 201
202 /* 202 /*
203 * We should think about enabling other error detection later on 203 * We should think about enabling other error detection later on
204 */ 204 */
205 205
206 pci_read_config_dword(pdev, MCDEBUG_ERRCTL1, &errctl1); 206 pci_read_config_dword(pdev, MCDEBUG_ERRCTL1, &errctl1);
207 errctl1 |= MCDEBUG_ERRCTL1_SBE_LOG_EN | 207 errctl1 |= MCDEBUG_ERRCTL1_SBE_LOG_EN |
208 MCDEBUG_ERRCTL1_MBE_LOG_EN | 208 MCDEBUG_ERRCTL1_MBE_LOG_EN |
209 MCDEBUG_ERRCTL1_RFL_LOG_EN; 209 MCDEBUG_ERRCTL1_RFL_LOG_EN;
210 pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1); 210 pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1);
211 211
212 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 212 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
213 layers[0].size = PASEMI_EDAC_NR_CSROWS; 213 layers[0].size = PASEMI_EDAC_NR_CSROWS;
214 layers[0].is_virt_csrow = true; 214 layers[0].is_virt_csrow = true;
215 layers[1].type = EDAC_MC_LAYER_CHANNEL; 215 layers[1].type = EDAC_MC_LAYER_CHANNEL;
216 layers[1].size = PASEMI_EDAC_NR_CHANS; 216 layers[1].size = PASEMI_EDAC_NR_CHANS;
217 layers[1].is_virt_csrow = false; 217 layers[1].is_virt_csrow = false;
218 mci = edac_mc_alloc(system_mmc_id++, ARRAY_SIZE(layers), layers, 218 mci = edac_mc_alloc(system_mmc_id++, ARRAY_SIZE(layers), layers,
219 0); 219 0);
220 if (mci == NULL) 220 if (mci == NULL)
221 return -ENOMEM; 221 return -ENOMEM;
222 222
223 pci_read_config_dword(pdev, MCCFG_ERRCOR, &errcor); 223 pci_read_config_dword(pdev, MCCFG_ERRCOR, &errcor);
224 errcor |= MCCFG_ERRCOR_RNK_FAIL_DET_EN | 224 errcor |= MCCFG_ERRCOR_RNK_FAIL_DET_EN |
225 MCCFG_ERRCOR_ECC_GEN_EN | 225 MCCFG_ERRCOR_ECC_GEN_EN |
226 MCCFG_ERRCOR_ECC_CRR_EN; 226 MCCFG_ERRCOR_ECC_CRR_EN;
227 227
228 mci->pdev = &pdev->dev; 228 mci->pdev = &pdev->dev;
229 mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR; 229 mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR;
230 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 230 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
231 mci->edac_cap = (errcor & MCCFG_ERRCOR_ECC_GEN_EN) ? 231 mci->edac_cap = (errcor & MCCFG_ERRCOR_ECC_GEN_EN) ?
232 ((errcor & MCCFG_ERRCOR_ECC_CRR_EN) ? 232 ((errcor & MCCFG_ERRCOR_ECC_CRR_EN) ?
233 (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_EC) : 233 (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_EC) :
234 EDAC_FLAG_NONE; 234 EDAC_FLAG_NONE;
235 mci->mod_name = MODULE_NAME; 235 mci->mod_name = MODULE_NAME;
236 mci->dev_name = pci_name(pdev); 236 mci->dev_name = pci_name(pdev);
237 mci->ctl_name = "pasemi,pwrficient-mc"; 237 mci->ctl_name = "pasemi,pwrficient-mc";
238 mci->edac_check = pasemi_edac_check; 238 mci->edac_check = pasemi_edac_check;
239 mci->ctl_page_to_phys = NULL; 239 mci->ctl_page_to_phys = NULL;
240 pci_read_config_dword(pdev, MCCFG_SCRUB, &scrub); 240 pci_read_config_dword(pdev, MCCFG_SCRUB, &scrub);
241 mci->scrub_cap = SCRUB_FLAG_HW_PROG | SCRUB_FLAG_HW_SRC; 241 mci->scrub_cap = SCRUB_FLAG_HW_PROG | SCRUB_FLAG_HW_SRC;
242 mci->scrub_mode = 242 mci->scrub_mode =
243 ((errcor & MCCFG_ERRCOR_ECC_CRR_EN) ? SCRUB_FLAG_HW_SRC : 0) | 243 ((errcor & MCCFG_ERRCOR_ECC_CRR_EN) ? SCRUB_FLAG_HW_SRC : 0) |
244 ((scrub & MCCFG_SCRUB_RGLR_SCRB_EN) ? SCRUB_FLAG_HW_PROG : 0); 244 ((scrub & MCCFG_SCRUB_RGLR_SCRB_EN) ? SCRUB_FLAG_HW_PROG : 0);
245 245
246 if (pasemi_edac_init_csrows(mci, pdev, 246 if (pasemi_edac_init_csrows(mci, pdev,
247 (mci->edac_cap & EDAC_FLAG_SECDED) ? 247 (mci->edac_cap & EDAC_FLAG_SECDED) ?
248 EDAC_SECDED : 248 EDAC_SECDED :
249 ((mci->edac_cap & EDAC_FLAG_EC) ? 249 ((mci->edac_cap & EDAC_FLAG_EC) ?
250 EDAC_EC : EDAC_NONE))) 250 EDAC_EC : EDAC_NONE)))
251 goto fail; 251 goto fail;
252 252
253 /* 253 /*
254 * Clear status 254 * Clear status
255 */ 255 */
256 pasemi_edac_get_error_info(mci); 256 pasemi_edac_get_error_info(mci);
257 257
258 if (edac_mc_add_mc(mci)) 258 if (edac_mc_add_mc(mci))
259 goto fail; 259 goto fail;
260 260
261 /* get this far and it's successful */ 261 /* get this far and it's successful */
262 return 0; 262 return 0;
263 263
264 fail: 264 fail:
265 edac_mc_free(mci); 265 edac_mc_free(mci);
266 return -ENODEV; 266 return -ENODEV;
267 } 267 }
268 268
269 static void __devexit pasemi_edac_remove(struct pci_dev *pdev) 269 static void __devexit pasemi_edac_remove(struct pci_dev *pdev)
270 { 270 {
271 struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev); 271 struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
272 272
273 if (!mci) 273 if (!mci)
274 return; 274 return;
275 275
276 edac_mc_free(mci); 276 edac_mc_free(mci);
277 } 277 }
278 278
279 279
280 static const struct pci_device_id pasemi_edac_pci_tbl[] = { 280 static const struct pci_device_id pasemi_edac_pci_tbl[] = {
281 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa00a) }, 281 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa00a) },
282 { } 282 { }
283 }; 283 };
284 284
285 MODULE_DEVICE_TABLE(pci, pasemi_edac_pci_tbl); 285 MODULE_DEVICE_TABLE(pci, pasemi_edac_pci_tbl);
286 286
287 static struct pci_driver pasemi_edac_driver = { 287 static struct pci_driver pasemi_edac_driver = {
288 .name = MODULE_NAME, 288 .name = MODULE_NAME,
289 .probe = pasemi_edac_probe, 289 .probe = pasemi_edac_probe,
290 .remove = __devexit_p(pasemi_edac_remove), 290 .remove = __devexit_p(pasemi_edac_remove),
291 .id_table = pasemi_edac_pci_tbl, 291 .id_table = pasemi_edac_pci_tbl,
292 }; 292 };
293 293
294 static int __init pasemi_edac_init(void) 294 static int __init pasemi_edac_init(void)
295 { 295 {
296 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 296 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
297 opstate_init(); 297 opstate_init();
298 298
299 return pci_register_driver(&pasemi_edac_driver); 299 return pci_register_driver(&pasemi_edac_driver);
300 } 300 }
301 301
302 static void __exit pasemi_edac_exit(void) 302 static void __exit pasemi_edac_exit(void)
303 { 303 {
304 pci_unregister_driver(&pasemi_edac_driver); 304 pci_unregister_driver(&pasemi_edac_driver);
305 } 305 }
306 306
307 module_init(pasemi_edac_init); 307 module_init(pasemi_edac_init);
308 module_exit(pasemi_edac_exit); 308 module_exit(pasemi_edac_exit);
309 309
310 MODULE_LICENSE("GPL"); 310 MODULE_LICENSE("GPL");
311 MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>"); 311 MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
312 MODULE_DESCRIPTION("MC support for PA Semi PWRficient memory controller"); 312 MODULE_DESCRIPTION("MC support for PA Semi PWRficient memory controller");
313 module_param(edac_op_state, int, 0444); 313 module_param(edac_op_state, int, 0444);
314 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 314 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
315 315
316 316
drivers/edac/r82600_edac.c
1 /* 1 /*
2 * Radisys 82600 Embedded chipset Memory Controller kernel module 2 * Radisys 82600 Embedded chipset Memory Controller kernel module
3 * (C) 2005 EADS Astrium 3 * (C) 2005 EADS Astrium
4 * This file may be distributed under the terms of the 4 * This file may be distributed under the terms of the
5 * GNU General Public License. 5 * GNU General Public License.
6 * 6 *
7 * Written by Tim Small <tim@buttersideup.com>, based on work by Thayne 7 * Written by Tim Small <tim@buttersideup.com>, based on work by Thayne
8 * Harbaugh, Dan Hollis <goemon at anime dot net> and others. 8 * Harbaugh, Dan Hollis <goemon at anime dot net> and others.
9 * 9 *
10 * $Id: edac_r82600.c,v 1.1.2.6 2005/10/05 00:43:44 dsp_llnl Exp $ 10 * $Id: edac_r82600.c,v 1.1.2.6 2005/10/05 00:43:44 dsp_llnl Exp $
11 * 11 *
12 * Written with reference to 82600 High Integration Dual PCI System 12 * Written with reference to 82600 High Integration Dual PCI System
13 * Controller Data Book: 13 * Controller Data Book:
14 * www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf 14 * www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf
15 * references to this document given in [] 15 * references to this document given in []
16 */ 16 */
17 17
18 #include <linux/module.h> 18 #include <linux/module.h>
19 #include <linux/init.h> 19 #include <linux/init.h>
20 #include <linux/pci.h> 20 #include <linux/pci.h>
21 #include <linux/pci_ids.h> 21 #include <linux/pci_ids.h>
22 #include <linux/edac.h> 22 #include <linux/edac.h>
23 #include "edac_core.h" 23 #include "edac_core.h"
24 24
25 #define R82600_REVISION " Ver: 2.0.2" 25 #define R82600_REVISION " Ver: 2.0.2"
26 #define EDAC_MOD_STR "r82600_edac" 26 #define EDAC_MOD_STR "r82600_edac"
27 27
28 #define r82600_printk(level, fmt, arg...) \ 28 #define r82600_printk(level, fmt, arg...) \
29 edac_printk(level, "r82600", fmt, ##arg) 29 edac_printk(level, "r82600", fmt, ##arg)
30 30
31 #define r82600_mc_printk(mci, level, fmt, arg...) \ 31 #define r82600_mc_printk(mci, level, fmt, arg...) \
32 edac_mc_chipset_printk(mci, level, "r82600", fmt, ##arg) 32 edac_mc_chipset_printk(mci, level, "r82600", fmt, ##arg)
33 33
34 /* Radisys say "The 82600 integrates a main memory SDRAM controller that 34 /* Radisys say "The 82600 integrates a main memory SDRAM controller that
35 * supports up to four banks of memory. The four banks can support a mix of 35 * supports up to four banks of memory. The four banks can support a mix of
36 * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs, 36 * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs,
37 * each of which can be any size from 16MB to 512MB. Both registered (control 37 * each of which can be any size from 16MB to 512MB. Both registered (control
38 * signals buffered) and unbuffered DIMM types are supported. Mixing of 38 * signals buffered) and unbuffered DIMM types are supported. Mixing of
39 * registered and unbuffered DIMMs as well as mixing of ECC and non-ECC DIMMs 39 * registered and unbuffered DIMMs as well as mixing of ECC and non-ECC DIMMs
40 * is not allowed. The 82600 SDRAM interface operates at the same frequency as 40 * is not allowed. The 82600 SDRAM interface operates at the same frequency as
41 * the CPU bus, 66MHz, 100MHz or 133MHz." 41 * the CPU bus, 66MHz, 100MHz or 133MHz."
42 */ 42 */
43 43
44 #define R82600_NR_CSROWS 4 44 #define R82600_NR_CSROWS 4
45 #define R82600_NR_CHANS 1 45 #define R82600_NR_CHANS 1
46 #define R82600_NR_DIMMS 4 46 #define R82600_NR_DIMMS 4
47 47
48 #define R82600_BRIDGE_ID 0x8200 48 #define R82600_BRIDGE_ID 0x8200
49 49
50 /* Radisys 82600 register addresses - device 0 function 0 - PCI bridge */ 50 /* Radisys 82600 register addresses - device 0 function 0 - PCI bridge */
51 #define R82600_DRAMC 0x57 /* Various SDRAM related control bits 51 #define R82600_DRAMC 0x57 /* Various SDRAM related control bits
52 * all bits are R/W 52 * all bits are R/W
53 * 53 *
54 * 7 SDRAM ISA Hole Enable 54 * 7 SDRAM ISA Hole Enable
55 * 6 Flash Page Mode Enable 55 * 6 Flash Page Mode Enable
56 * 5 ECC Enable: 1=ECC 0=noECC 56 * 5 ECC Enable: 1=ECC 0=noECC
57 * 4 DRAM DIMM Type: 1= 57 * 4 DRAM DIMM Type: 1=
58 * 3 BIOS Alias Disable 58 * 3 BIOS Alias Disable
59 * 2 SDRAM BIOS Flash Write Enable 59 * 2 SDRAM BIOS Flash Write Enable
60 * 1:0 SDRAM Refresh Rate: 00=Disabled 60 * 1:0 SDRAM Refresh Rate: 00=Disabled
61 * 01=7.8usec (256Mbit SDRAMs) 61 * 01=7.8usec (256Mbit SDRAMs)
62 * 10=15.6us 11=125usec 62 * 10=15.6us 11=125usec
63 */ 63 */
64 64
65 #define R82600_SDRAMC 0x76 /* "SDRAM Control Register" 65 #define R82600_SDRAMC 0x76 /* "SDRAM Control Register"
66 * More SDRAM related control bits 66 * More SDRAM related control bits
67 * all bits are R/W 67 * all bits are R/W
68 * 68 *
69 * 15:8 Reserved. 69 * 15:8 Reserved.
70 * 70 *
71 * 7:5 Special SDRAM Mode Select 71 * 7:5 Special SDRAM Mode Select
72 * 72 *
73 * 4 Force ECC 73 * 4 Force ECC
74 * 74 *
75 * 1=Drive ECC bits to 0 during 75 * 1=Drive ECC bits to 0 during
76 * write cycles (i.e. ECC test mode) 76 * write cycles (i.e. ECC test mode)
77 * 77 *
78 * 0=Normal ECC functioning 78 * 0=Normal ECC functioning
79 * 79 *
80 * 3 Enhanced Paging Enable 80 * 3 Enhanced Paging Enable
81 * 81 *
82 * 2 CAS# Latency 0=3clks 1=2clks 82 * 2 CAS# Latency 0=3clks 1=2clks
83 * 83 *
84 * 1 RAS# to CAS# Delay 0=3 1=2 84 * 1 RAS# to CAS# Delay 0=3 1=2
85 * 85 *
86 * 0 RAS# Precharge 0=3 1=2 86 * 0 RAS# Precharge 0=3 1=2
87 */ 87 */
88 88
89 #define R82600_EAP 0x80 /* ECC Error Address Pointer Register 89 #define R82600_EAP 0x80 /* ECC Error Address Pointer Register
90 * 90 *
91 * 31 Disable Hardware Scrubbing (RW) 91 * 31 Disable Hardware Scrubbing (RW)
92 * 0=Scrub on corrected read 92 * 0=Scrub on corrected read
93 * 1=Don't scrub on corrected read 93 * 1=Don't scrub on corrected read
94 * 94 *
95 * 30:12 Error Address Pointer (RO) 95 * 30:12 Error Address Pointer (RO)
96 * Upper 19 bits of error address 96 * Upper 19 bits of error address
97 * 97 *
98 * 11:4 Syndrome Bits (RO) 98 * 11:4 Syndrome Bits (RO)
99 * 99 *
100 * 3 BSERR# on multibit error (RW) 100 * 3 BSERR# on multibit error (RW)
101 * 1=enable 0=disable 101 * 1=enable 0=disable
102 * 102 *
103 * 2 NMI on Single Bit Eror (RW) 103 * 2 NMI on Single Bit Eror (RW)
104 * 1=NMI triggered by SBE n.b. other 104 * 1=NMI triggered by SBE n.b. other
105 * prerequeists 105 * prerequeists
106 * 0=NMI not triggered 106 * 0=NMI not triggered
107 * 107 *
108 * 1 MBE (R/WC) 108 * 1 MBE (R/WC)
109 * read 1=MBE at EAP (see above) 109 * read 1=MBE at EAP (see above)
110 * read 0=no MBE, or SBE occurred first 110 * read 0=no MBE, or SBE occurred first
111 * write 1=Clear MBE status (must also 111 * write 1=Clear MBE status (must also
112 * clear SBE) 112 * clear SBE)
113 * write 0=NOP 113 * write 0=NOP
114 * 114 *
115 * 1 SBE (R/WC) 115 * 1 SBE (R/WC)
116 * read 1=SBE at EAP (see above) 116 * read 1=SBE at EAP (see above)
117 * read 0=no SBE, or MBE occurred first 117 * read 0=no SBE, or MBE occurred first
118 * write 1=Clear SBE status (must also 118 * write 1=Clear SBE status (must also
119 * clear MBE) 119 * clear MBE)
120 * write 0=NOP 120 * write 0=NOP
121 */ 121 */
122 122
123 #define R82600_DRBA 0x60 /* + 0x60..0x63 SDRAM Row Boundary Address 123 #define R82600_DRBA 0x60 /* + 0x60..0x63 SDRAM Row Boundary Address
124 * Registers 124 * Registers
125 * 125 *
126 * 7:0 Address lines 30:24 - upper limit of 126 * 7:0 Address lines 30:24 - upper limit of
127 * each row [p57] 127 * each row [p57]
128 */ 128 */
129 129
130 struct r82600_error_info { 130 struct r82600_error_info {
131 u32 eapr; 131 u32 eapr;
132 }; 132 };
133 133
134 static bool disable_hardware_scrub; 134 static bool disable_hardware_scrub;
135 135
136 static struct edac_pci_ctl_info *r82600_pci; 136 static struct edac_pci_ctl_info *r82600_pci;
137 137
138 static void r82600_get_error_info(struct mem_ctl_info *mci, 138 static void r82600_get_error_info(struct mem_ctl_info *mci,
139 struct r82600_error_info *info) 139 struct r82600_error_info *info)
140 { 140 {
141 struct pci_dev *pdev; 141 struct pci_dev *pdev;
142 142
143 pdev = to_pci_dev(mci->pdev); 143 pdev = to_pci_dev(mci->pdev);
144 pci_read_config_dword(pdev, R82600_EAP, &info->eapr); 144 pci_read_config_dword(pdev, R82600_EAP, &info->eapr);
145 145
146 if (info->eapr & BIT(0)) 146 if (info->eapr & BIT(0))
147 /* Clear error to allow next error to be reported [p.62] */ 147 /* Clear error to allow next error to be reported [p.62] */
148 pci_write_bits32(pdev, R82600_EAP, 148 pci_write_bits32(pdev, R82600_EAP,
149 ((u32) BIT(0) & (u32) BIT(1)), 149 ((u32) BIT(0) & (u32) BIT(1)),
150 ((u32) BIT(0) & (u32) BIT(1))); 150 ((u32) BIT(0) & (u32) BIT(1)));
151 151
152 if (info->eapr & BIT(1)) 152 if (info->eapr & BIT(1))
153 /* Clear error to allow next error to be reported [p.62] */ 153 /* Clear error to allow next error to be reported [p.62] */
154 pci_write_bits32(pdev, R82600_EAP, 154 pci_write_bits32(pdev, R82600_EAP,
155 ((u32) BIT(0) & (u32) BIT(1)), 155 ((u32) BIT(0) & (u32) BIT(1)),
156 ((u32) BIT(0) & (u32) BIT(1))); 156 ((u32) BIT(0) & (u32) BIT(1)));
157 } 157 }
158 158
159 static int r82600_process_error_info(struct mem_ctl_info *mci, 159 static int r82600_process_error_info(struct mem_ctl_info *mci,
160 struct r82600_error_info *info, 160 struct r82600_error_info *info,
161 int handle_errors) 161 int handle_errors)
162 { 162 {
163 int error_found; 163 int error_found;
164 u32 eapaddr, page; 164 u32 eapaddr, page;
165 u32 syndrome; 165 u32 syndrome;
166 166
167 error_found = 0; 167 error_found = 0;
168 168
169 /* bits 30:12 store the upper 19 bits of the 32 bit error address */ 169 /* bits 30:12 store the upper 19 bits of the 32 bit error address */
170 eapaddr = ((info->eapr >> 12) & 0x7FFF) << 13; 170 eapaddr = ((info->eapr >> 12) & 0x7FFF) << 13;
171 /* Syndrome in bits 11:4 [p.62] */ 171 /* Syndrome in bits 11:4 [p.62] */
172 syndrome = (info->eapr >> 4) & 0xFF; 172 syndrome = (info->eapr >> 4) & 0xFF;
173 173
174 /* the R82600 reports at less than page * 174 /* the R82600 reports at less than page *
175 * granularity (upper 19 bits only) */ 175 * granularity (upper 19 bits only) */
176 page = eapaddr >> PAGE_SHIFT; 176 page = eapaddr >> PAGE_SHIFT;
177 177
178 if (info->eapr & BIT(0)) { /* CE? */ 178 if (info->eapr & BIT(0)) { /* CE? */
179 error_found = 1; 179 error_found = 1;
180 180
181 if (handle_errors) 181 if (handle_errors)
182 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 182 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
183 page, 0, syndrome, 183 page, 0, syndrome,
184 edac_mc_find_csrow_by_page(mci, page), 184 edac_mc_find_csrow_by_page(mci, page),
185 0, -1, 185 0, -1,
186 mci->ctl_name, "", NULL); 186 mci->ctl_name, "", NULL);
187 } 187 }
188 188
189 if (info->eapr & BIT(1)) { /* UE? */ 189 if (info->eapr & BIT(1)) { /* UE? */
190 error_found = 1; 190 error_found = 1;
191 191
192 if (handle_errors) 192 if (handle_errors)
193 /* 82600 doesn't give enough info */ 193 /* 82600 doesn't give enough info */
194 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 194 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
195 page, 0, 0, 195 page, 0, 0,
196 edac_mc_find_csrow_by_page(mci, page), 196 edac_mc_find_csrow_by_page(mci, page),
197 0, -1, 197 0, -1,
198 mci->ctl_name, "", NULL); 198 mci->ctl_name, "", NULL);
199 } 199 }
200 200
201 return error_found; 201 return error_found;
202 } 202 }
203 203
204 static void r82600_check(struct mem_ctl_info *mci) 204 static void r82600_check(struct mem_ctl_info *mci)
205 { 205 {
206 struct r82600_error_info info; 206 struct r82600_error_info info;
207 207
208 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 208 debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
209 r82600_get_error_info(mci, &info); 209 r82600_get_error_info(mci, &info);
210 r82600_process_error_info(mci, &info, 1); 210 r82600_process_error_info(mci, &info, 1);
211 } 211 }
212 212
213 static inline int ecc_enabled(u8 dramcr) 213 static inline int ecc_enabled(u8 dramcr)
214 { 214 {
215 return dramcr & BIT(5); 215 return dramcr & BIT(5);
216 } 216 }
217 217
218 static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, 218 static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
219 u8 dramcr) 219 u8 dramcr)
220 { 220 {
221 struct csrow_info *csrow; 221 struct csrow_info *csrow;
222 struct dimm_info *dimm; 222 struct dimm_info *dimm;
223 int index; 223 int index;
224 u8 drbar; /* SDRAM Row Boundary Address Register */ 224 u8 drbar; /* SDRAM Row Boundary Address Register */
225 u32 row_high_limit, row_high_limit_last; 225 u32 row_high_limit, row_high_limit_last;
226 u32 reg_sdram, ecc_on, row_base; 226 u32 reg_sdram, ecc_on, row_base;
227 227
228 ecc_on = ecc_enabled(dramcr); 228 ecc_on = ecc_enabled(dramcr);
229 reg_sdram = dramcr & BIT(4); 229 reg_sdram = dramcr & BIT(4);
230 row_high_limit_last = 0; 230 row_high_limit_last = 0;
231 231
232 for (index = 0; index < mci->nr_csrows; index++) { 232 for (index = 0; index < mci->nr_csrows; index++) {
233 csrow = &mci->csrows[index]; 233 csrow = mci->csrows[index];
234 dimm = csrow->channels[0].dimm; 234 dimm = csrow->channels[0]->dimm;
235 235
236 /* find the DRAM Chip Select Base address and mask */ 236 /* find the DRAM Chip Select Base address and mask */
237 pci_read_config_byte(pdev, R82600_DRBA + index, &drbar); 237 pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
238 238
239 debugf1("%s() Row=%d DRBA = %#0x\n", __func__, index, drbar); 239 debugf1("%s() Row=%d DRBA = %#0x\n", __func__, index, drbar);
240 240
241 row_high_limit = ((u32) drbar << 24); 241 row_high_limit = ((u32) drbar << 24);
242 /* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */ 242 /* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
243 243
244 debugf1("%s() Row=%d, Boundary Address=%#0x, Last = %#0x\n", 244 debugf1("%s() Row=%d, Boundary Address=%#0x, Last = %#0x\n",
245 __func__, index, row_high_limit, row_high_limit_last); 245 __func__, index, row_high_limit, row_high_limit_last);
246 246
247 /* Empty row [p.57] */ 247 /* Empty row [p.57] */
248 if (row_high_limit == row_high_limit_last) 248 if (row_high_limit == row_high_limit_last)
249 continue; 249 continue;
250 250
251 row_base = row_high_limit_last; 251 row_base = row_high_limit_last;
252 252
253 csrow->first_page = row_base >> PAGE_SHIFT; 253 csrow->first_page = row_base >> PAGE_SHIFT;
254 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; 254 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
255 255
256 dimm->nr_pages = csrow->last_page - csrow->first_page + 1; 256 dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
257 /* Error address is top 19 bits - so granularity is * 257 /* Error address is top 19 bits - so granularity is *
258 * 14 bits */ 258 * 14 bits */
259 dimm->grain = 1 << 14; 259 dimm->grain = 1 << 14;
260 dimm->mtype = reg_sdram ? MEM_RDDR : MEM_DDR; 260 dimm->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
261 /* FIXME - check that this is unknowable with this chipset */ 261 /* FIXME - check that this is unknowable with this chipset */
262 dimm->dtype = DEV_UNKNOWN; 262 dimm->dtype = DEV_UNKNOWN;
263 263
264 /* Mode is global on 82600 */ 264 /* Mode is global on 82600 */
265 dimm->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE; 265 dimm->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
266 row_high_limit_last = row_high_limit; 266 row_high_limit_last = row_high_limit;
267 } 267 }
268 } 268 }
269 269
270 static int r82600_probe1(struct pci_dev *pdev, int dev_idx) 270 static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
271 { 271 {
272 struct mem_ctl_info *mci; 272 struct mem_ctl_info *mci;
273 struct edac_mc_layer layers[2]; 273 struct edac_mc_layer layers[2];
274 u8 dramcr; 274 u8 dramcr;
275 u32 eapr; 275 u32 eapr;
276 u32 scrub_disabled; 276 u32 scrub_disabled;
277 u32 sdram_refresh_rate; 277 u32 sdram_refresh_rate;
278 struct r82600_error_info discard; 278 struct r82600_error_info discard;
279 279
280 debugf0("%s()\n", __func__); 280 debugf0("%s()\n", __func__);
281 pci_read_config_byte(pdev, R82600_DRAMC, &dramcr); 281 pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
282 pci_read_config_dword(pdev, R82600_EAP, &eapr); 282 pci_read_config_dword(pdev, R82600_EAP, &eapr);
283 scrub_disabled = eapr & BIT(31); 283 scrub_disabled = eapr & BIT(31);
284 sdram_refresh_rate = dramcr & (BIT(0) | BIT(1)); 284 sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
285 debugf2("%s(): sdram refresh rate = %#0x\n", __func__, 285 debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
286 sdram_refresh_rate); 286 sdram_refresh_rate);
287 debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr); 287 debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
288 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 288 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
289 layers[0].size = R82600_NR_CSROWS; 289 layers[0].size = R82600_NR_CSROWS;
290 layers[0].is_virt_csrow = true; 290 layers[0].is_virt_csrow = true;
291 layers[1].type = EDAC_MC_LAYER_CHANNEL; 291 layers[1].type = EDAC_MC_LAYER_CHANNEL;
292 layers[1].size = R82600_NR_CHANS; 292 layers[1].size = R82600_NR_CHANS;
293 layers[1].is_virt_csrow = false; 293 layers[1].is_virt_csrow = false;
294 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); 294 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
295 if (mci == NULL) 295 if (mci == NULL)
296 return -ENOMEM; 296 return -ENOMEM;
297 297
298 debugf0("%s(): mci = %p\n", __func__, mci); 298 debugf0("%s(): mci = %p\n", __func__, mci);
299 mci->pdev = &pdev->dev; 299 mci->pdev = &pdev->dev;
300 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; 300 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
301 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 301 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
302 /* FIXME try to work out if the chip leads have been used for COM2 302 /* FIXME try to work out if the chip leads have been used for COM2
303 * instead on this board? [MA6?] MAYBE: 303 * instead on this board? [MA6?] MAYBE:
304 */ 304 */
305 305
306 /* On the R82600, the pins for memory bits 72:65 - i.e. the * 306 /* On the R82600, the pins for memory bits 72:65 - i.e. the *
307 * EC bits are shared with the pins for COM2 (!), so if COM2 * 307 * EC bits are shared with the pins for COM2 (!), so if COM2 *
308 * is enabled, we assume COM2 is wired up, and thus no EDAC * 308 * is enabled, we assume COM2 is wired up, and thus no EDAC *
309 * is possible. */ 309 * is possible. */
310 mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 310 mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
311 311
312 if (ecc_enabled(dramcr)) { 312 if (ecc_enabled(dramcr)) {
313 if (scrub_disabled) 313 if (scrub_disabled)
314 debugf3("%s(): mci = %p - Scrubbing disabled! EAP: " 314 debugf3("%s(): mci = %p - Scrubbing disabled! EAP: "
315 "%#0x\n", __func__, mci, eapr); 315 "%#0x\n", __func__, mci, eapr);
316 } else 316 } else
317 mci->edac_cap = EDAC_FLAG_NONE; 317 mci->edac_cap = EDAC_FLAG_NONE;
318 318
319 mci->mod_name = EDAC_MOD_STR; 319 mci->mod_name = EDAC_MOD_STR;
320 mci->mod_ver = R82600_REVISION; 320 mci->mod_ver = R82600_REVISION;
321 mci->ctl_name = "R82600"; 321 mci->ctl_name = "R82600";
322 mci->dev_name = pci_name(pdev); 322 mci->dev_name = pci_name(pdev);
323 mci->edac_check = r82600_check; 323 mci->edac_check = r82600_check;
324 mci->ctl_page_to_phys = NULL; 324 mci->ctl_page_to_phys = NULL;
325 r82600_init_csrows(mci, pdev, dramcr); 325 r82600_init_csrows(mci, pdev, dramcr);
326 r82600_get_error_info(mci, &discard); /* clear counters */ 326 r82600_get_error_info(mci, &discard); /* clear counters */
327 327
328 /* Here we assume that we will never see multiple instances of this 328 /* Here we assume that we will never see multiple instances of this
329 * type of memory controller. The ID is therefore hardcoded to 0. 329 * type of memory controller. The ID is therefore hardcoded to 0.
330 */ 330 */
331 if (edac_mc_add_mc(mci)) { 331 if (edac_mc_add_mc(mci)) {
332 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 332 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
333 goto fail; 333 goto fail;
334 } 334 }
335 335
336 /* get this far and it's successful */ 336 /* get this far and it's successful */
337 337
338 if (disable_hardware_scrub) { 338 if (disable_hardware_scrub) {
339 debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n", 339 debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n",
340 __func__); 340 __func__);
341 pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31)); 341 pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31));
342 } 342 }
343 343
344 /* allocating generic PCI control info */ 344 /* allocating generic PCI control info */
345 r82600_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); 345 r82600_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
346 if (!r82600_pci) { 346 if (!r82600_pci) {
347 printk(KERN_WARNING 347 printk(KERN_WARNING
348 "%s(): Unable to create PCI control\n", 348 "%s(): Unable to create PCI control\n",
349 __func__); 349 __func__);
350 printk(KERN_WARNING 350 printk(KERN_WARNING
351 "%s(): PCI error report via EDAC not setup\n", 351 "%s(): PCI error report via EDAC not setup\n",
352 __func__); 352 __func__);
353 } 353 }
354 354
355 debugf3("%s(): success\n", __func__); 355 debugf3("%s(): success\n", __func__);
356 return 0; 356 return 0;
357 357
358 fail: 358 fail:
359 edac_mc_free(mci); 359 edac_mc_free(mci);
360 return -ENODEV; 360 return -ENODEV;
361 } 361 }
362 362
363 /* returns count (>= 0), or negative on error */ 363 /* returns count (>= 0), or negative on error */
364 static int __devinit r82600_init_one(struct pci_dev *pdev, 364 static int __devinit r82600_init_one(struct pci_dev *pdev,
365 const struct pci_device_id *ent) 365 const struct pci_device_id *ent)
366 { 366 {
367 debugf0("%s()\n", __func__); 367 debugf0("%s()\n", __func__);
368 368
369 /* don't need to call pci_enable_device() */ 369 /* don't need to call pci_enable_device() */
370 return r82600_probe1(pdev, ent->driver_data); 370 return r82600_probe1(pdev, ent->driver_data);
371 } 371 }
372 372
373 static void __devexit r82600_remove_one(struct pci_dev *pdev) 373 static void __devexit r82600_remove_one(struct pci_dev *pdev)
374 { 374 {
375 struct mem_ctl_info *mci; 375 struct mem_ctl_info *mci;
376 376
377 debugf0("%s()\n", __func__); 377 debugf0("%s()\n", __func__);
378 378
379 if (r82600_pci) 379 if (r82600_pci)
380 edac_pci_release_generic_ctl(r82600_pci); 380 edac_pci_release_generic_ctl(r82600_pci);
381 381
382 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) 382 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
383 return; 383 return;
384 384
385 edac_mc_free(mci); 385 edac_mc_free(mci);
386 } 386 }
387 387
388 static DEFINE_PCI_DEVICE_TABLE(r82600_pci_tbl) = { 388 static DEFINE_PCI_DEVICE_TABLE(r82600_pci_tbl) = {
389 { 389 {
390 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID) 390 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
391 }, 391 },
392 { 392 {
393 0, 393 0,
394 } /* 0 terminated list. */ 394 } /* 0 terminated list. */
395 }; 395 };
396 396
397 MODULE_DEVICE_TABLE(pci, r82600_pci_tbl); 397 MODULE_DEVICE_TABLE(pci, r82600_pci_tbl);
398 398
399 static struct pci_driver r82600_driver = { 399 static struct pci_driver r82600_driver = {
400 .name = EDAC_MOD_STR, 400 .name = EDAC_MOD_STR,
401 .probe = r82600_init_one, 401 .probe = r82600_init_one,
402 .remove = __devexit_p(r82600_remove_one), 402 .remove = __devexit_p(r82600_remove_one),
403 .id_table = r82600_pci_tbl, 403 .id_table = r82600_pci_tbl,
404 }; 404 };
405 405
406 static int __init r82600_init(void) 406 static int __init r82600_init(void)
407 { 407 {
408 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 408 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
409 opstate_init(); 409 opstate_init();
410 410
411 return pci_register_driver(&r82600_driver); 411 return pci_register_driver(&r82600_driver);
412 } 412 }
413 413
414 static void __exit r82600_exit(void) 414 static void __exit r82600_exit(void)
415 { 415 {
416 pci_unregister_driver(&r82600_driver); 416 pci_unregister_driver(&r82600_driver);
417 } 417 }
418 418
419 module_init(r82600_init); 419 module_init(r82600_init);
420 module_exit(r82600_exit); 420 module_exit(r82600_exit);
421 421
422 MODULE_LICENSE("GPL"); 422 MODULE_LICENSE("GPL");
423 MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. " 423 MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. "
424 "on behalf of EADS Astrium"); 424 "on behalf of EADS Astrium");
425 MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers"); 425 MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
426 426
427 module_param(disable_hardware_scrub, bool, 0644); 427 module_param(disable_hardware_scrub, bool, 0644);
428 MODULE_PARM_DESC(disable_hardware_scrub, 428 MODULE_PARM_DESC(disable_hardware_scrub,
429 "If set, disable the chipset's automatic scrub for CEs"); 429 "If set, disable the chipset's automatic scrub for CEs");
430 430
431 module_param(edac_op_state, int, 0444); 431 module_param(edac_op_state, int, 0444);
432 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 432 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
433 433
drivers/edac/tile_edac.c
1 /* 1 /*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved. 2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2. 6 * as published by the Free Software Foundation, version 2.
7 * 7 *
8 * This program is distributed in the hope that it will be useful, but 8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of 9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for 11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details. 12 * more details.
13 * Tilera-specific EDAC driver. 13 * Tilera-specific EDAC driver.
14 * 14 *
15 * This source code is derived from the following driver: 15 * This source code is derived from the following driver:
16 * 16 *
17 * Cell MIC driver for ECC counting 17 * Cell MIC driver for ECC counting
18 * 18 *
19 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. 19 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
20 * <benh@kernel.crashing.org> 20 * <benh@kernel.crashing.org>
21 * 21 *
22 */ 22 */
23 23
24 #include <linux/module.h> 24 #include <linux/module.h>
25 #include <linux/init.h> 25 #include <linux/init.h>
26 #include <linux/platform_device.h> 26 #include <linux/platform_device.h>
27 #include <linux/io.h> 27 #include <linux/io.h>
28 #include <linux/uaccess.h> 28 #include <linux/uaccess.h>
29 #include <linux/edac.h> 29 #include <linux/edac.h>
30 #include <hv/hypervisor.h> 30 #include <hv/hypervisor.h>
31 #include <hv/drv_mshim_intf.h> 31 #include <hv/drv_mshim_intf.h>
32 32
33 #include "edac_core.h" 33 #include "edac_core.h"
34 34
35 #define DRV_NAME "tile-edac" 35 #define DRV_NAME "tile-edac"
36 36
37 /* Number of cs_rows needed per memory controller on TILEPro. */ 37 /* Number of cs_rows needed per memory controller on TILEPro. */
38 #define TILE_EDAC_NR_CSROWS 1 38 #define TILE_EDAC_NR_CSROWS 1
39 39
40 /* Number of channels per memory controller on TILEPro. */ 40 /* Number of channels per memory controller on TILEPro. */
41 #define TILE_EDAC_NR_CHANS 1 41 #define TILE_EDAC_NR_CHANS 1
42 42
43 /* Granularity of reported error in bytes on TILEPro. */ 43 /* Granularity of reported error in bytes on TILEPro. */
44 #define TILE_EDAC_ERROR_GRAIN 8 44 #define TILE_EDAC_ERROR_GRAIN 8
45 45
46 /* TILE processor has multiple independent memory controllers. */ 46 /* TILE processor has multiple independent memory controllers. */
47 struct platform_device *mshim_pdev[TILE_MAX_MSHIMS]; 47 struct platform_device *mshim_pdev[TILE_MAX_MSHIMS];
48 48
49 struct tile_edac_priv { 49 struct tile_edac_priv {
50 int hv_devhdl; /* Hypervisor device handle. */ 50 int hv_devhdl; /* Hypervisor device handle. */
51 int node; /* Memory controller instance #. */ 51 int node; /* Memory controller instance #. */
52 unsigned int ce_count; /* 52 unsigned int ce_count; /*
53 * Correctable-error counter 53 * Correctable-error counter
54 * kept by the driver. 54 * kept by the driver.
55 */ 55 */
56 }; 56 };
57 57
58 static void tile_edac_check(struct mem_ctl_info *mci) 58 static void tile_edac_check(struct mem_ctl_info *mci)
59 { 59 {
60 struct tile_edac_priv *priv = mci->pvt_info; 60 struct tile_edac_priv *priv = mci->pvt_info;
61 struct mshim_mem_error mem_error; 61 struct mshim_mem_error mem_error;
62 62
63 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_error, 63 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_error,
64 sizeof(struct mshim_mem_error), MSHIM_MEM_ERROR_OFF) != 64 sizeof(struct mshim_mem_error), MSHIM_MEM_ERROR_OFF) !=
65 sizeof(struct mshim_mem_error)) { 65 sizeof(struct mshim_mem_error)) {
66 pr_err(DRV_NAME ": MSHIM_MEM_ERROR_OFF pread failure.\n"); 66 pr_err(DRV_NAME ": MSHIM_MEM_ERROR_OFF pread failure.\n");
67 return; 67 return;
68 } 68 }
69 69
70 /* Check if the current error count is different from the saved one. */ 70 /* Check if the current error count is different from the saved one. */
71 if (mem_error.sbe_count != priv->ce_count) { 71 if (mem_error.sbe_count != priv->ce_count) {
72 dev_dbg(mci->pdev, "ECC CE err on node %d\n", priv->node); 72 dev_dbg(mci->pdev, "ECC CE err on node %d\n", priv->node);
73 priv->ce_count = mem_error.sbe_count; 73 priv->ce_count = mem_error.sbe_count;
74 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 74 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
75 0, 0, 0, 75 0, 0, 0,
76 0, 0, -1, 76 0, 0, -1,
77 mci->ctl_name, "", NULL); 77 mci->ctl_name, "", NULL);
78 } 78 }
79 } 79 }
80 80
81 /* 81 /*
82 * Initialize the 'csrows' table within the mci control structure with the 82 * Initialize the 'csrows' table within the mci control structure with the
83 * addressing of memory. 83 * addressing of memory.
84 */ 84 */
85 static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci) 85 static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
86 { 86 {
87 struct csrow_info *csrow = &mci->csrows[0]; 87 struct csrow_info *csrow = mci->csrows[0];
88 struct tile_edac_priv *priv = mci->pvt_info; 88 struct tile_edac_priv *priv = mci->pvt_info;
89 struct mshim_mem_info mem_info; 89 struct mshim_mem_info mem_info;
90 struct dimm_info *dimm = csrow->channels[0].dimm; 90 struct dimm_info *dimm = csrow->channels[0]->dimm;
91 91
92 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info, 92 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info,
93 sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) != 93 sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) !=
94 sizeof(struct mshim_mem_info)) { 94 sizeof(struct mshim_mem_info)) {
95 pr_err(DRV_NAME ": MSHIM_MEM_INFO_OFF pread failure.\n"); 95 pr_err(DRV_NAME ": MSHIM_MEM_INFO_OFF pread failure.\n");
96 return -1; 96 return -1;
97 } 97 }
98 98
99 if (mem_info.mem_ecc) 99 if (mem_info.mem_ecc)
100 dimm->edac_mode = EDAC_SECDED; 100 dimm->edac_mode = EDAC_SECDED;
101 else 101 else
102 dimm->edac_mode = EDAC_NONE; 102 dimm->edac_mode = EDAC_NONE;
103 switch (mem_info.mem_type) { 103 switch (mem_info.mem_type) {
104 case DDR2: 104 case DDR2:
105 dimm->mtype = MEM_DDR2; 105 dimm->mtype = MEM_DDR2;
106 break; 106 break;
107 107
108 case DDR3: 108 case DDR3:
109 dimm->mtype = MEM_DDR3; 109 dimm->mtype = MEM_DDR3;
110 break; 110 break;
111 111
112 default: 112 default:
113 return -1; 113 return -1;
114 } 114 }
115 115
116 dimm->nr_pages = mem_info.mem_size >> PAGE_SHIFT; 116 dimm->nr_pages = mem_info.mem_size >> PAGE_SHIFT;
117 dimm->grain = TILE_EDAC_ERROR_GRAIN; 117 dimm->grain = TILE_EDAC_ERROR_GRAIN;
118 dimm->dtype = DEV_UNKNOWN; 118 dimm->dtype = DEV_UNKNOWN;
119 119
120 return 0; 120 return 0;
121 } 121 }
122 122
123 static int __devinit tile_edac_mc_probe(struct platform_device *pdev) 123 static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
124 { 124 {
125 char hv_file[32]; 125 char hv_file[32];
126 int hv_devhdl; 126 int hv_devhdl;
127 struct mem_ctl_info *mci; 127 struct mem_ctl_info *mci;
128 struct edac_mc_layer layers[2]; 128 struct edac_mc_layer layers[2];
129 struct tile_edac_priv *priv; 129 struct tile_edac_priv *priv;
130 int rc; 130 int rc;
131 131
132 sprintf(hv_file, "mshim/%d", pdev->id); 132 sprintf(hv_file, "mshim/%d", pdev->id);
133 hv_devhdl = hv_dev_open((HV_VirtAddr)hv_file, 0); 133 hv_devhdl = hv_dev_open((HV_VirtAddr)hv_file, 0);
134 if (hv_devhdl < 0) 134 if (hv_devhdl < 0)
135 return -EINVAL; 135 return -EINVAL;
136 136
137 /* A TILE MC has a single channel and one chip-select row. */ 137 /* A TILE MC has a single channel and one chip-select row. */
138 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 138 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
139 layers[0].size = TILE_EDAC_NR_CSROWS; 139 layers[0].size = TILE_EDAC_NR_CSROWS;
140 layers[0].is_virt_csrow = true; 140 layers[0].is_virt_csrow = true;
141 layers[1].type = EDAC_MC_LAYER_CHANNEL; 141 layers[1].type = EDAC_MC_LAYER_CHANNEL;
142 layers[1].size = TILE_EDAC_NR_CHANS; 142 layers[1].size = TILE_EDAC_NR_CHANS;
143 layers[1].is_virt_csrow = false; 143 layers[1].is_virt_csrow = false;
144 mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers, 144 mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
145 sizeof(struct tile_edac_priv)); 145 sizeof(struct tile_edac_priv));
146 if (mci == NULL) 146 if (mci == NULL)
147 return -ENOMEM; 147 return -ENOMEM;
148 priv = mci->pvt_info; 148 priv = mci->pvt_info;
149 priv->node = pdev->id; 149 priv->node = pdev->id;
150 priv->hv_devhdl = hv_devhdl; 150 priv->hv_devhdl = hv_devhdl;
151 151
152 mci->pdev = &pdev->dev; 152 mci->pdev = &pdev->dev;
153 mci->mtype_cap = MEM_FLAG_DDR2; 153 mci->mtype_cap = MEM_FLAG_DDR2;
154 mci->edac_ctl_cap = EDAC_FLAG_SECDED; 154 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
155 155
156 mci->mod_name = DRV_NAME; 156 mci->mod_name = DRV_NAME;
157 #ifdef __tilegx__ 157 #ifdef __tilegx__
158 mci->ctl_name = "TILEGx_Memory_Controller"; 158 mci->ctl_name = "TILEGx_Memory_Controller";
159 #else 159 #else
160 mci->ctl_name = "TILEPro_Memory_Controller"; 160 mci->ctl_name = "TILEPro_Memory_Controller";
161 #endif 161 #endif
162 mci->dev_name = dev_name(&pdev->dev); 162 mci->dev_name = dev_name(&pdev->dev);
163 mci->edac_check = tile_edac_check; 163 mci->edac_check = tile_edac_check;
164 164
165 /* 165 /*
166 * Initialize the MC control structure 'csrows' table 166 * Initialize the MC control structure 'csrows' table
167 * with the mapping and control information. 167 * with the mapping and control information.
168 */ 168 */
169 if (tile_edac_init_csrows(mci)) { 169 if (tile_edac_init_csrows(mci)) {
170 /* No csrows found. */ 170 /* No csrows found. */
171 mci->edac_cap = EDAC_FLAG_NONE; 171 mci->edac_cap = EDAC_FLAG_NONE;
172 } else { 172 } else {
173 mci->edac_cap = EDAC_FLAG_SECDED; 173 mci->edac_cap = EDAC_FLAG_SECDED;
174 } 174 }
175 175
176 platform_set_drvdata(pdev, mci); 176 platform_set_drvdata(pdev, mci);
177 177
178 /* Register with EDAC core */ 178 /* Register with EDAC core */
179 rc = edac_mc_add_mc(mci); 179 rc = edac_mc_add_mc(mci);
180 if (rc) { 180 if (rc) {
181 dev_err(&pdev->dev, "failed to register with EDAC core\n"); 181 dev_err(&pdev->dev, "failed to register with EDAC core\n");
182 edac_mc_free(mci); 182 edac_mc_free(mci);
183 return rc; 183 return rc;
184 } 184 }
185 185
186 return 0; 186 return 0;
187 } 187 }
188 188
189 static int __devexit tile_edac_mc_remove(struct platform_device *pdev) 189 static int __devexit tile_edac_mc_remove(struct platform_device *pdev)
190 { 190 {
191 struct mem_ctl_info *mci = platform_get_drvdata(pdev); 191 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
192 192
193 edac_mc_del_mc(&pdev->dev); 193 edac_mc_del_mc(&pdev->dev);
194 if (mci) 194 if (mci)
195 edac_mc_free(mci); 195 edac_mc_free(mci);
196 return 0; 196 return 0;
197 } 197 }
198 198
199 static struct platform_driver tile_edac_mc_driver = { 199 static struct platform_driver tile_edac_mc_driver = {
200 .driver = { 200 .driver = {
201 .name = DRV_NAME, 201 .name = DRV_NAME,
202 .owner = THIS_MODULE, 202 .owner = THIS_MODULE,
203 }, 203 },
204 .probe = tile_edac_mc_probe, 204 .probe = tile_edac_mc_probe,
205 .remove = __devexit_p(tile_edac_mc_remove), 205 .remove = __devexit_p(tile_edac_mc_remove),
206 }; 206 };
207 207
208 /* 208 /*
209 * Driver init routine. 209 * Driver init routine.
210 */ 210 */
211 static int __init tile_edac_init(void) 211 static int __init tile_edac_init(void)
212 { 212 {
213 char hv_file[32]; 213 char hv_file[32];
214 struct platform_device *pdev; 214 struct platform_device *pdev;
215 int i, err, num = 0; 215 int i, err, num = 0;
216 216
217 /* Only support POLL mode. */ 217 /* Only support POLL mode. */
218 edac_op_state = EDAC_OPSTATE_POLL; 218 edac_op_state = EDAC_OPSTATE_POLL;
219 219
220 err = platform_driver_register(&tile_edac_mc_driver); 220 err = platform_driver_register(&tile_edac_mc_driver);
221 if (err) 221 if (err)
222 return err; 222 return err;
223 223
224 for (i = 0; i < TILE_MAX_MSHIMS; i++) { 224 for (i = 0; i < TILE_MAX_MSHIMS; i++) {
225 /* 225 /*
226 * Not all memory controllers are configured such as in the 226 * Not all memory controllers are configured such as in the
227 * case of a simulator. So we register only those mshims 227 * case of a simulator. So we register only those mshims
228 * that are configured by the hypervisor. 228 * that are configured by the hypervisor.
229 */ 229 */
230 sprintf(hv_file, "mshim/%d", i); 230 sprintf(hv_file, "mshim/%d", i);
231 if (hv_dev_open((HV_VirtAddr)hv_file, 0) < 0) 231 if (hv_dev_open((HV_VirtAddr)hv_file, 0) < 0)
232 continue; 232 continue;
233 233
234 pdev = platform_device_register_simple(DRV_NAME, i, NULL, 0); 234 pdev = platform_device_register_simple(DRV_NAME, i, NULL, 0);
235 if (IS_ERR(pdev)) 235 if (IS_ERR(pdev))
236 continue; 236 continue;
237 mshim_pdev[i] = pdev; 237 mshim_pdev[i] = pdev;
238 num++; 238 num++;
239 } 239 }
240 240
241 if (num == 0) { 241 if (num == 0) {
242 platform_driver_unregister(&tile_edac_mc_driver); 242 platform_driver_unregister(&tile_edac_mc_driver);
243 return -ENODEV; 243 return -ENODEV;
244 } 244 }
245 return 0; 245 return 0;
246 } 246 }
247 247
248 /* 248 /*
249 * Driver cleanup routine. 249 * Driver cleanup routine.
250 */ 250 */
251 static void __exit tile_edac_exit(void) 251 static void __exit tile_edac_exit(void)
252 { 252 {
253 int i; 253 int i;
254 254
255 for (i = 0; i < TILE_MAX_MSHIMS; i++) { 255 for (i = 0; i < TILE_MAX_MSHIMS; i++) {
256 struct platform_device *pdev = mshim_pdev[i]; 256 struct platform_device *pdev = mshim_pdev[i];
257 if (!pdev) 257 if (!pdev)
258 continue; 258 continue;
259 259
260 platform_set_drvdata(pdev, NULL); 260 platform_set_drvdata(pdev, NULL);
261 platform_device_unregister(pdev); 261 platform_device_unregister(pdev);
262 } 262 }
263 platform_driver_unregister(&tile_edac_mc_driver); 263 platform_driver_unregister(&tile_edac_mc_driver);
264 } 264 }
265 265
266 module_init(tile_edac_init); 266 module_init(tile_edac_init);
267 module_exit(tile_edac_exit); 267 module_exit(tile_edac_exit);
268 268
drivers/edac/x38_edac.c
1 /* 1 /*
2 * Intel X38 Memory Controller kernel module 2 * Intel X38 Memory Controller kernel module
3 * Copyright (C) 2008 Cluster Computing, Inc. 3 * Copyright (C) 2008 Cluster Computing, Inc.
4 * 4 *
5 * This file may be distributed under the terms of the 5 * This file may be distributed under the terms of the
6 * GNU General Public License. 6 * GNU General Public License.
7 * 7 *
8 * This file is based on i3200_edac.c 8 * This file is based on i3200_edac.c
9 * 9 *
10 */ 10 */
11 11
12 #include <linux/module.h> 12 #include <linux/module.h>
13 #include <linux/init.h> 13 #include <linux/init.h>
14 #include <linux/pci.h> 14 #include <linux/pci.h>
15 #include <linux/pci_ids.h> 15 #include <linux/pci_ids.h>
16 #include <linux/edac.h> 16 #include <linux/edac.h>
17 #include "edac_core.h" 17 #include "edac_core.h"
18 18
19 #define X38_REVISION "1.1" 19 #define X38_REVISION "1.1"
20 20
21 #define EDAC_MOD_STR "x38_edac" 21 #define EDAC_MOD_STR "x38_edac"
22 22
23 #define PCI_DEVICE_ID_INTEL_X38_HB 0x29e0 23 #define PCI_DEVICE_ID_INTEL_X38_HB 0x29e0
24 24
25 #define X38_RANKS 8 25 #define X38_RANKS 8
26 #define X38_RANKS_PER_CHANNEL 4 26 #define X38_RANKS_PER_CHANNEL 4
27 #define X38_CHANNELS 2 27 #define X38_CHANNELS 2
28 28
29 /* Intel X38 register addresses - device 0 function 0 - DRAM Controller */ 29 /* Intel X38 register addresses - device 0 function 0 - DRAM Controller */
30 30
31 #define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */ 31 #define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */
32 #define X38_MCHBAR_HIGH 0x4c 32 #define X38_MCHBAR_HIGH 0x4c
33 #define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */ 33 #define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */
34 #define X38_MMR_WINDOW_SIZE 16384 34 #define X38_MMR_WINDOW_SIZE 16384
35 35
36 #define X38_TOM 0xa0 /* Top of Memory (16b) 36 #define X38_TOM 0xa0 /* Top of Memory (16b)
37 * 37 *
38 * 15:10 reserved 38 * 15:10 reserved
39 * 9:0 total populated physical memory 39 * 9:0 total populated physical memory
40 */ 40 */
41 #define X38_TOM_MASK 0x3ff /* bits 9:0 */ 41 #define X38_TOM_MASK 0x3ff /* bits 9:0 */
42 #define X38_TOM_SHIFT 26 /* 64MiB grain */ 42 #define X38_TOM_SHIFT 26 /* 64MiB grain */
43 43
44 #define X38_ERRSTS 0xc8 /* Error Status Register (16b) 44 #define X38_ERRSTS 0xc8 /* Error Status Register (16b)
45 * 45 *
46 * 15 reserved 46 * 15 reserved
47 * 14 Isochronous TBWRR Run Behind FIFO Full 47 * 14 Isochronous TBWRR Run Behind FIFO Full
48 * (ITCV) 48 * (ITCV)
49 * 13 Isochronous TBWRR Run Behind FIFO Put 49 * 13 Isochronous TBWRR Run Behind FIFO Put
50 * (ITSTV) 50 * (ITSTV)
51 * 12 reserved 51 * 12 reserved
52 * 11 MCH Thermal Sensor Event 52 * 11 MCH Thermal Sensor Event
53 * for SMI/SCI/SERR (GTSE) 53 * for SMI/SCI/SERR (GTSE)
54 * 10 reserved 54 * 10 reserved
55 * 9 LOCK to non-DRAM Memory Flag (LCKF) 55 * 9 LOCK to non-DRAM Memory Flag (LCKF)
56 * 8 reserved 56 * 8 reserved
57 * 7 DRAM Throttle Flag (DTF) 57 * 7 DRAM Throttle Flag (DTF)
58 * 6:2 reserved 58 * 6:2 reserved
59 * 1 Multi-bit DRAM ECC Error Flag (DMERR) 59 * 1 Multi-bit DRAM ECC Error Flag (DMERR)
60 * 0 Single-bit DRAM ECC Error Flag (DSERR) 60 * 0 Single-bit DRAM ECC Error Flag (DSERR)
61 */ 61 */
62 #define X38_ERRSTS_UE 0x0002 62 #define X38_ERRSTS_UE 0x0002
63 #define X38_ERRSTS_CE 0x0001 63 #define X38_ERRSTS_CE 0x0001
64 #define X38_ERRSTS_BITS (X38_ERRSTS_UE | X38_ERRSTS_CE) 64 #define X38_ERRSTS_BITS (X38_ERRSTS_UE | X38_ERRSTS_CE)
65 65
66 66
67 /* Intel MMIO register space - device 0 function 0 - MMR space */ 67 /* Intel MMIO register space - device 0 function 0 - MMR space */
68 68
69 #define X38_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4) 69 #define X38_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4)
70 * 70 *
71 * 15:10 reserved 71 * 15:10 reserved
72 * 9:0 Channel 0 DRAM Rank Boundary Address 72 * 9:0 Channel 0 DRAM Rank Boundary Address
73 */ 73 */
74 #define X38_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */ 74 #define X38_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */
75 #define X38_DRB_MASK 0x3ff /* bits 9:0 */ 75 #define X38_DRB_MASK 0x3ff /* bits 9:0 */
76 #define X38_DRB_SHIFT 26 /* 64MiB grain */ 76 #define X38_DRB_SHIFT 26 /* 64MiB grain */
77 77
78 #define X38_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b) 78 #define X38_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b)
79 * 79 *
80 * 63:48 Error Column Address (ERRCOL) 80 * 63:48 Error Column Address (ERRCOL)
81 * 47:32 Error Row Address (ERRROW) 81 * 47:32 Error Row Address (ERRROW)
82 * 31:29 Error Bank Address (ERRBANK) 82 * 31:29 Error Bank Address (ERRBANK)
83 * 28:27 Error Rank Address (ERRRANK) 83 * 28:27 Error Rank Address (ERRRANK)
84 * 26:24 reserved 84 * 26:24 reserved
85 * 23:16 Error Syndrome (ERRSYND) 85 * 23:16 Error Syndrome (ERRSYND)
86 * 15: 2 reserved 86 * 15: 2 reserved
87 * 1 Multiple Bit Error Status (MERRSTS) 87 * 1 Multiple Bit Error Status (MERRSTS)
88 * 0 Correctable Error Status (CERRSTS) 88 * 0 Correctable Error Status (CERRSTS)
89 */ 89 */
90 #define X38_C1ECCERRLOG 0x680 /* Channel 1 ECC Error Log (64b) */ 90 #define X38_C1ECCERRLOG 0x680 /* Channel 1 ECC Error Log (64b) */
91 #define X38_ECCERRLOG_CE 0x1 91 #define X38_ECCERRLOG_CE 0x1
92 #define X38_ECCERRLOG_UE 0x2 92 #define X38_ECCERRLOG_UE 0x2
93 #define X38_ECCERRLOG_RANK_BITS 0x18000000 93 #define X38_ECCERRLOG_RANK_BITS 0x18000000
94 #define X38_ECCERRLOG_SYNDROME_BITS 0xff0000 94 #define X38_ECCERRLOG_SYNDROME_BITS 0xff0000
95 95
96 #define X38_CAPID0 0xe0 /* see P.94 of spec for details */ 96 #define X38_CAPID0 0xe0 /* see P.94 of spec for details */
97 97
98 static int x38_channel_num; 98 static int x38_channel_num;
99 99
100 static int how_many_channel(struct pci_dev *pdev) 100 static int how_many_channel(struct pci_dev *pdev)
101 { 101 {
102 unsigned char capid0_8b; /* 8th byte of CAPID0 */ 102 unsigned char capid0_8b; /* 8th byte of CAPID0 */
103 103
104 pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b); 104 pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b);
105 if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */ 105 if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
106 debugf0("In single channel mode.\n"); 106 debugf0("In single channel mode.\n");
107 x38_channel_num = 1; 107 x38_channel_num = 1;
108 } else { 108 } else {
109 debugf0("In dual channel mode.\n"); 109 debugf0("In dual channel mode.\n");
110 x38_channel_num = 2; 110 x38_channel_num = 2;
111 } 111 }
112 112
113 return x38_channel_num; 113 return x38_channel_num;
114 } 114 }
115 115
116 static unsigned long eccerrlog_syndrome(u64 log) 116 static unsigned long eccerrlog_syndrome(u64 log)
117 { 117 {
118 return (log & X38_ECCERRLOG_SYNDROME_BITS) >> 16; 118 return (log & X38_ECCERRLOG_SYNDROME_BITS) >> 16;
119 } 119 }
120 120
121 static int eccerrlog_row(int channel, u64 log) 121 static int eccerrlog_row(int channel, u64 log)
122 { 122 {
123 return ((log & X38_ECCERRLOG_RANK_BITS) >> 27) | 123 return ((log & X38_ECCERRLOG_RANK_BITS) >> 27) |
124 (channel * X38_RANKS_PER_CHANNEL); 124 (channel * X38_RANKS_PER_CHANNEL);
125 } 125 }
126 126
127 enum x38_chips { 127 enum x38_chips {
128 X38 = 0, 128 X38 = 0,
129 }; 129 };
130 130
131 struct x38_dev_info { 131 struct x38_dev_info {
132 const char *ctl_name; 132 const char *ctl_name;
133 }; 133 };
134 134
135 struct x38_error_info { 135 struct x38_error_info {
136 u16 errsts; 136 u16 errsts;
137 u16 errsts2; 137 u16 errsts2;
138 u64 eccerrlog[X38_CHANNELS]; 138 u64 eccerrlog[X38_CHANNELS];
139 }; 139 };
140 140
141 static const struct x38_dev_info x38_devs[] = { 141 static const struct x38_dev_info x38_devs[] = {
142 [X38] = { 142 [X38] = {
143 .ctl_name = "x38"}, 143 .ctl_name = "x38"},
144 }; 144 };
145 145
146 static struct pci_dev *mci_pdev; 146 static struct pci_dev *mci_pdev;
147 static int x38_registered = 1; 147 static int x38_registered = 1;
148 148
149 149
150 static void x38_clear_error_info(struct mem_ctl_info *mci) 150 static void x38_clear_error_info(struct mem_ctl_info *mci)
151 { 151 {
152 struct pci_dev *pdev; 152 struct pci_dev *pdev;
153 153
154 pdev = to_pci_dev(mci->pdev); 154 pdev = to_pci_dev(mci->pdev);
155 155
156 /* 156 /*
157 * Clear any error bits. 157 * Clear any error bits.
158 * (Yes, we really clear bits by writing 1 to them.) 158 * (Yes, we really clear bits by writing 1 to them.)
159 */ 159 */
160 pci_write_bits16(pdev, X38_ERRSTS, X38_ERRSTS_BITS, 160 pci_write_bits16(pdev, X38_ERRSTS, X38_ERRSTS_BITS,
161 X38_ERRSTS_BITS); 161 X38_ERRSTS_BITS);
162 } 162 }
163 163
164 static u64 x38_readq(const void __iomem *addr) 164 static u64 x38_readq(const void __iomem *addr)
165 { 165 {
166 return readl(addr) | (((u64)readl(addr + 4)) << 32); 166 return readl(addr) | (((u64)readl(addr + 4)) << 32);
167 } 167 }
168 168
169 static void x38_get_and_clear_error_info(struct mem_ctl_info *mci, 169 static void x38_get_and_clear_error_info(struct mem_ctl_info *mci,
170 struct x38_error_info *info) 170 struct x38_error_info *info)
171 { 171 {
172 struct pci_dev *pdev; 172 struct pci_dev *pdev;
173 void __iomem *window = mci->pvt_info; 173 void __iomem *window = mci->pvt_info;
174 174
175 pdev = to_pci_dev(mci->pdev); 175 pdev = to_pci_dev(mci->pdev);
176 176
177 /* 177 /*
178 * This is a mess because there is no atomic way to read all the 178 * This is a mess because there is no atomic way to read all the
179 * registers at once and the registers can transition from CE being 179 * registers at once and the registers can transition from CE being
180 * overwritten by UE. 180 * overwritten by UE.
181 */ 181 */
182 pci_read_config_word(pdev, X38_ERRSTS, &info->errsts); 182 pci_read_config_word(pdev, X38_ERRSTS, &info->errsts);
183 if (!(info->errsts & X38_ERRSTS_BITS)) 183 if (!(info->errsts & X38_ERRSTS_BITS))
184 return; 184 return;
185 185
186 info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG); 186 info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
187 if (x38_channel_num == 2) 187 if (x38_channel_num == 2)
188 info->eccerrlog[1] = x38_readq(window + X38_C1ECCERRLOG); 188 info->eccerrlog[1] = x38_readq(window + X38_C1ECCERRLOG);
189 189
190 pci_read_config_word(pdev, X38_ERRSTS, &info->errsts2); 190 pci_read_config_word(pdev, X38_ERRSTS, &info->errsts2);
191 191
192 /* 192 /*
193 * If the error is the same for both reads then the first set 193 * If the error is the same for both reads then the first set
194 * of reads is valid. If there is a change then there is a CE 194 * of reads is valid. If there is a change then there is a CE
195 * with no info and the second set of reads is valid and 195 * with no info and the second set of reads is valid and
196 * should be UE info. 196 * should be UE info.
197 */ 197 */
198 if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) { 198 if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
199 info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG); 199 info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
200 if (x38_channel_num == 2) 200 if (x38_channel_num == 2)
201 info->eccerrlog[1] = 201 info->eccerrlog[1] =
202 x38_readq(window + X38_C1ECCERRLOG); 202 x38_readq(window + X38_C1ECCERRLOG);
203 } 203 }
204 204
205 x38_clear_error_info(mci); 205 x38_clear_error_info(mci);
206 } 206 }
207 207
208 static void x38_process_error_info(struct mem_ctl_info *mci, 208 static void x38_process_error_info(struct mem_ctl_info *mci,
209 struct x38_error_info *info) 209 struct x38_error_info *info)
210 { 210 {
211 int channel; 211 int channel;
212 u64 log; 212 u64 log;
213 213
214 if (!(info->errsts & X38_ERRSTS_BITS)) 214 if (!(info->errsts & X38_ERRSTS_BITS))
215 return; 215 return;
216 216
217 if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) { 217 if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
218 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, 218 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
219 -1, -1, -1, 219 -1, -1, -1,
220 "UE overwrote CE", "", NULL); 220 "UE overwrote CE", "", NULL);
221 info->errsts = info->errsts2; 221 info->errsts = info->errsts2;
222 } 222 }
223 223
224 for (channel = 0; channel < x38_channel_num; channel++) { 224 for (channel = 0; channel < x38_channel_num; channel++) {
225 log = info->eccerrlog[channel]; 225 log = info->eccerrlog[channel];
226 if (log & X38_ECCERRLOG_UE) { 226 if (log & X38_ECCERRLOG_UE) {
227 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 227 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
228 0, 0, 0, 228 0, 0, 0,
229 eccerrlog_row(channel, log), 229 eccerrlog_row(channel, log),
230 -1, -1, 230 -1, -1,
231 "x38 UE", "", NULL); 231 "x38 UE", "", NULL);
232 } else if (log & X38_ECCERRLOG_CE) { 232 } else if (log & X38_ECCERRLOG_CE) {
233 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 233 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
234 0, 0, eccerrlog_syndrome(log), 234 0, 0, eccerrlog_syndrome(log),
235 eccerrlog_row(channel, log), 235 eccerrlog_row(channel, log),
236 -1, -1, 236 -1, -1,
237 "x38 CE", "", NULL); 237 "x38 CE", "", NULL);
238 } 238 }
239 } 239 }
240 } 240 }
241 241
242 static void x38_check(struct mem_ctl_info *mci) 242 static void x38_check(struct mem_ctl_info *mci)
243 { 243 {
244 struct x38_error_info info; 244 struct x38_error_info info;
245 245
246 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 246 debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
247 x38_get_and_clear_error_info(mci, &info); 247 x38_get_and_clear_error_info(mci, &info);
248 x38_process_error_info(mci, &info); 248 x38_process_error_info(mci, &info);
249 } 249 }
250 250
251 251
252 void __iomem *x38_map_mchbar(struct pci_dev *pdev) 252 void __iomem *x38_map_mchbar(struct pci_dev *pdev)
253 { 253 {
254 union { 254 union {
255 u64 mchbar; 255 u64 mchbar;
256 struct { 256 struct {
257 u32 mchbar_low; 257 u32 mchbar_low;
258 u32 mchbar_high; 258 u32 mchbar_high;
259 }; 259 };
260 } u; 260 } u;
261 void __iomem *window; 261 void __iomem *window;
262 262
263 pci_read_config_dword(pdev, X38_MCHBAR_LOW, &u.mchbar_low); 263 pci_read_config_dword(pdev, X38_MCHBAR_LOW, &u.mchbar_low);
264 pci_write_config_dword(pdev, X38_MCHBAR_LOW, u.mchbar_low | 0x1); 264 pci_write_config_dword(pdev, X38_MCHBAR_LOW, u.mchbar_low | 0x1);
265 pci_read_config_dword(pdev, X38_MCHBAR_HIGH, &u.mchbar_high); 265 pci_read_config_dword(pdev, X38_MCHBAR_HIGH, &u.mchbar_high);
266 u.mchbar &= X38_MCHBAR_MASK; 266 u.mchbar &= X38_MCHBAR_MASK;
267 267
268 if (u.mchbar != (resource_size_t)u.mchbar) { 268 if (u.mchbar != (resource_size_t)u.mchbar) {
269 printk(KERN_ERR 269 printk(KERN_ERR
270 "x38: mmio space beyond accessible range (0x%llx)\n", 270 "x38: mmio space beyond accessible range (0x%llx)\n",
271 (unsigned long long)u.mchbar); 271 (unsigned long long)u.mchbar);
272 return NULL; 272 return NULL;
273 } 273 }
274 274
275 window = ioremap_nocache(u.mchbar, X38_MMR_WINDOW_SIZE); 275 window = ioremap_nocache(u.mchbar, X38_MMR_WINDOW_SIZE);
276 if (!window) 276 if (!window)
277 printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n", 277 printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n",
278 (unsigned long long)u.mchbar); 278 (unsigned long long)u.mchbar);
279 279
280 return window; 280 return window;
281 } 281 }
282 282
283 283
284 static void x38_get_drbs(void __iomem *window, 284 static void x38_get_drbs(void __iomem *window,
285 u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL]) 285 u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
286 { 286 {
287 int i; 287 int i;
288 288
289 for (i = 0; i < X38_RANKS_PER_CHANNEL; i++) { 289 for (i = 0; i < X38_RANKS_PER_CHANNEL; i++) {
290 drbs[0][i] = readw(window + X38_C0DRB + 2*i) & X38_DRB_MASK; 290 drbs[0][i] = readw(window + X38_C0DRB + 2*i) & X38_DRB_MASK;
291 drbs[1][i] = readw(window + X38_C1DRB + 2*i) & X38_DRB_MASK; 291 drbs[1][i] = readw(window + X38_C1DRB + 2*i) & X38_DRB_MASK;
292 } 292 }
293 } 293 }
294 294
295 static bool x38_is_stacked(struct pci_dev *pdev, 295 static bool x38_is_stacked(struct pci_dev *pdev,
296 u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL]) 296 u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
297 { 297 {
298 u16 tom; 298 u16 tom;
299 299
300 pci_read_config_word(pdev, X38_TOM, &tom); 300 pci_read_config_word(pdev, X38_TOM, &tom);
301 tom &= X38_TOM_MASK; 301 tom &= X38_TOM_MASK;
302 302
303 return drbs[X38_CHANNELS - 1][X38_RANKS_PER_CHANNEL - 1] == tom; 303 return drbs[X38_CHANNELS - 1][X38_RANKS_PER_CHANNEL - 1] == tom;
304 } 304 }
305 305
306 static unsigned long drb_to_nr_pages( 306 static unsigned long drb_to_nr_pages(
307 u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL], 307 u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL],
308 bool stacked, int channel, int rank) 308 bool stacked, int channel, int rank)
309 { 309 {
310 int n; 310 int n;
311 311
312 n = drbs[channel][rank]; 312 n = drbs[channel][rank];
313 if (rank > 0) 313 if (rank > 0)
314 n -= drbs[channel][rank - 1]; 314 n -= drbs[channel][rank - 1];
315 if (stacked && (channel == 1) && drbs[channel][rank] == 315 if (stacked && (channel == 1) && drbs[channel][rank] ==
316 drbs[channel][X38_RANKS_PER_CHANNEL - 1]) { 316 drbs[channel][X38_RANKS_PER_CHANNEL - 1]) {
317 n -= drbs[0][X38_RANKS_PER_CHANNEL - 1]; 317 n -= drbs[0][X38_RANKS_PER_CHANNEL - 1];
318 } 318 }
319 319
320 n <<= (X38_DRB_SHIFT - PAGE_SHIFT); 320 n <<= (X38_DRB_SHIFT - PAGE_SHIFT);
321 return n; 321 return n;
322 } 322 }
323 323
324 static int x38_probe1(struct pci_dev *pdev, int dev_idx) 324 static int x38_probe1(struct pci_dev *pdev, int dev_idx)
325 { 325 {
326 int rc; 326 int rc;
327 int i, j; 327 int i, j;
328 struct mem_ctl_info *mci = NULL; 328 struct mem_ctl_info *mci = NULL;
329 struct edac_mc_layer layers[2]; 329 struct edac_mc_layer layers[2];
330 u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL]; 330 u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL];
331 bool stacked; 331 bool stacked;
332 void __iomem *window; 332 void __iomem *window;
333 333
334 debugf0("MC: %s()\n", __func__); 334 debugf0("MC: %s()\n", __func__);
335 335
336 window = x38_map_mchbar(pdev); 336 window = x38_map_mchbar(pdev);
337 if (!window) 337 if (!window)
338 return -ENODEV; 338 return -ENODEV;
339 339
340 x38_get_drbs(window, drbs); 340 x38_get_drbs(window, drbs);
341 341
342 how_many_channel(pdev); 342 how_many_channel(pdev);
343 343
344 /* FIXME: unconventional pvt_info usage */ 344 /* FIXME: unconventional pvt_info usage */
345 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 345 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
346 layers[0].size = X38_RANKS; 346 layers[0].size = X38_RANKS;
347 layers[0].is_virt_csrow = true; 347 layers[0].is_virt_csrow = true;
348 layers[1].type = EDAC_MC_LAYER_CHANNEL; 348 layers[1].type = EDAC_MC_LAYER_CHANNEL;
349 layers[1].size = x38_channel_num; 349 layers[1].size = x38_channel_num;
350 layers[1].is_virt_csrow = false; 350 layers[1].is_virt_csrow = false;
351 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); 351 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
352 if (!mci) 352 if (!mci)
353 return -ENOMEM; 353 return -ENOMEM;
354 354
355 debugf3("MC: %s(): init mci\n", __func__); 355 debugf3("MC: %s(): init mci\n", __func__);
356 356
357 mci->pdev = &pdev->dev; 357 mci->pdev = &pdev->dev;
358 mci->mtype_cap = MEM_FLAG_DDR2; 358 mci->mtype_cap = MEM_FLAG_DDR2;
359 359
360 mci->edac_ctl_cap = EDAC_FLAG_SECDED; 360 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
361 mci->edac_cap = EDAC_FLAG_SECDED; 361 mci->edac_cap = EDAC_FLAG_SECDED;
362 362
363 mci->mod_name = EDAC_MOD_STR; 363 mci->mod_name = EDAC_MOD_STR;
364 mci->mod_ver = X38_REVISION; 364 mci->mod_ver = X38_REVISION;
365 mci->ctl_name = x38_devs[dev_idx].ctl_name; 365 mci->ctl_name = x38_devs[dev_idx].ctl_name;
366 mci->dev_name = pci_name(pdev); 366 mci->dev_name = pci_name(pdev);
367 mci->edac_check = x38_check; 367 mci->edac_check = x38_check;
368 mci->ctl_page_to_phys = NULL; 368 mci->ctl_page_to_phys = NULL;
369 mci->pvt_info = window; 369 mci->pvt_info = window;
370 370
371 stacked = x38_is_stacked(pdev, drbs); 371 stacked = x38_is_stacked(pdev, drbs);
372 372
373 /* 373 /*
374 * The dram rank boundary (DRB) reg values are boundary addresses 374 * The dram rank boundary (DRB) reg values are boundary addresses
375 * for each DRAM rank with a granularity of 64MB. DRB regs are 375 * for each DRAM rank with a granularity of 64MB. DRB regs are
376 * cumulative; the last one will contain the total memory 376 * cumulative; the last one will contain the total memory
377 * contained in all ranks. 377 * contained in all ranks.
378 */ 378 */
379 for (i = 0; i < mci->nr_csrows; i++) { 379 for (i = 0; i < mci->nr_csrows; i++) {
380 unsigned long nr_pages; 380 unsigned long nr_pages;
381 struct csrow_info *csrow = &mci->csrows[i]; 381 struct csrow_info *csrow = mci->csrows[i];
382 382
383 nr_pages = drb_to_nr_pages(drbs, stacked, 383 nr_pages = drb_to_nr_pages(drbs, stacked,
384 i / X38_RANKS_PER_CHANNEL, 384 i / X38_RANKS_PER_CHANNEL,
385 i % X38_RANKS_PER_CHANNEL); 385 i % X38_RANKS_PER_CHANNEL);
386 386
387 if (nr_pages == 0) 387 if (nr_pages == 0)
388 continue; 388 continue;
389 389
390 for (j = 0; j < x38_channel_num; j++) { 390 for (j = 0; j < x38_channel_num; j++) {
391 struct dimm_info *dimm = csrow->channels[j].dimm; 391 struct dimm_info *dimm = csrow->channels[j]->dimm;
392 392
393 dimm->nr_pages = nr_pages / x38_channel_num; 393 dimm->nr_pages = nr_pages / x38_channel_num;
394 dimm->grain = nr_pages << PAGE_SHIFT; 394 dimm->grain = nr_pages << PAGE_SHIFT;
395 dimm->mtype = MEM_DDR2; 395 dimm->mtype = MEM_DDR2;
396 dimm->dtype = DEV_UNKNOWN; 396 dimm->dtype = DEV_UNKNOWN;
397 dimm->edac_mode = EDAC_UNKNOWN; 397 dimm->edac_mode = EDAC_UNKNOWN;
398 } 398 }
399 } 399 }
400 400
401 x38_clear_error_info(mci); 401 x38_clear_error_info(mci);
402 402
403 rc = -ENODEV; 403 rc = -ENODEV;
404 if (edac_mc_add_mc(mci)) { 404 if (edac_mc_add_mc(mci)) {
405 debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); 405 debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
406 goto fail; 406 goto fail;
407 } 407 }
408 408
409 /* get this far and it's successful */ 409 /* get this far and it's successful */
410 debugf3("MC: %s(): success\n", __func__); 410 debugf3("MC: %s(): success\n", __func__);
411 return 0; 411 return 0;
412 412
413 fail: 413 fail:
414 iounmap(window); 414 iounmap(window);
415 if (mci) 415 if (mci)
416 edac_mc_free(mci); 416 edac_mc_free(mci);
417 417
418 return rc; 418 return rc;
419 } 419 }
420 420
421 static int __devinit x38_init_one(struct pci_dev *pdev, 421 static int __devinit x38_init_one(struct pci_dev *pdev,
422 const struct pci_device_id *ent) 422 const struct pci_device_id *ent)
423 { 423 {
424 int rc; 424 int rc;
425 425
426 debugf0("MC: %s()\n", __func__); 426 debugf0("MC: %s()\n", __func__);
427 427
428 if (pci_enable_device(pdev) < 0) 428 if (pci_enable_device(pdev) < 0)
429 return -EIO; 429 return -EIO;
430 430
431 rc = x38_probe1(pdev, ent->driver_data); 431 rc = x38_probe1(pdev, ent->driver_data);
432 if (!mci_pdev) 432 if (!mci_pdev)
433 mci_pdev = pci_dev_get(pdev); 433 mci_pdev = pci_dev_get(pdev);
434 434
435 return rc; 435 return rc;
436 } 436 }
437 437
438 static void __devexit x38_remove_one(struct pci_dev *pdev) 438 static void __devexit x38_remove_one(struct pci_dev *pdev)
439 { 439 {
440 struct mem_ctl_info *mci; 440 struct mem_ctl_info *mci;
441 441
442 debugf0("%s()\n", __func__); 442 debugf0("%s()\n", __func__);
443 443
444 mci = edac_mc_del_mc(&pdev->dev); 444 mci = edac_mc_del_mc(&pdev->dev);
445 if (!mci) 445 if (!mci)
446 return; 446 return;
447 447
448 iounmap(mci->pvt_info); 448 iounmap(mci->pvt_info);
449 449
450 edac_mc_free(mci); 450 edac_mc_free(mci);
451 } 451 }
452 452
453 static DEFINE_PCI_DEVICE_TABLE(x38_pci_tbl) = { 453 static DEFINE_PCI_DEVICE_TABLE(x38_pci_tbl) = {
454 { 454 {
455 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 455 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
456 X38}, 456 X38},
457 { 457 {
458 0, 458 0,
459 } /* 0 terminated list. */ 459 } /* 0 terminated list. */
460 }; 460 };
461 461
462 MODULE_DEVICE_TABLE(pci, x38_pci_tbl); 462 MODULE_DEVICE_TABLE(pci, x38_pci_tbl);
463 463
464 static struct pci_driver x38_driver = { 464 static struct pci_driver x38_driver = {
465 .name = EDAC_MOD_STR, 465 .name = EDAC_MOD_STR,
466 .probe = x38_init_one, 466 .probe = x38_init_one,
467 .remove = __devexit_p(x38_remove_one), 467 .remove = __devexit_p(x38_remove_one),
468 .id_table = x38_pci_tbl, 468 .id_table = x38_pci_tbl,
469 }; 469 };
470 470
471 static int __init x38_init(void) 471 static int __init x38_init(void)
472 { 472 {
473 int pci_rc; 473 int pci_rc;
474 474
475 debugf3("MC: %s()\n", __func__); 475 debugf3("MC: %s()\n", __func__);
476 476
477 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 477 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
478 opstate_init(); 478 opstate_init();
479 479
480 pci_rc = pci_register_driver(&x38_driver); 480 pci_rc = pci_register_driver(&x38_driver);
481 if (pci_rc < 0) 481 if (pci_rc < 0)
482 goto fail0; 482 goto fail0;
483 483
484 if (!mci_pdev) { 484 if (!mci_pdev) {
485 x38_registered = 0; 485 x38_registered = 0;
486 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 486 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
487 PCI_DEVICE_ID_INTEL_X38_HB, NULL); 487 PCI_DEVICE_ID_INTEL_X38_HB, NULL);
488 if (!mci_pdev) { 488 if (!mci_pdev) {
489 debugf0("x38 pci_get_device fail\n"); 489 debugf0("x38 pci_get_device fail\n");
490 pci_rc = -ENODEV; 490 pci_rc = -ENODEV;
491 goto fail1; 491 goto fail1;
492 } 492 }
493 493
494 pci_rc = x38_init_one(mci_pdev, x38_pci_tbl); 494 pci_rc = x38_init_one(mci_pdev, x38_pci_tbl);
495 if (pci_rc < 0) { 495 if (pci_rc < 0) {
496 debugf0("x38 init fail\n"); 496 debugf0("x38 init fail\n");
497 pci_rc = -ENODEV; 497 pci_rc = -ENODEV;
498 goto fail1; 498 goto fail1;
499 } 499 }
500 } 500 }
501 501
502 return 0; 502 return 0;
503 503
504 fail1: 504 fail1:
505 pci_unregister_driver(&x38_driver); 505 pci_unregister_driver(&x38_driver);
506 506
507 fail0: 507 fail0:
508 if (mci_pdev) 508 if (mci_pdev)
509 pci_dev_put(mci_pdev); 509 pci_dev_put(mci_pdev);
510 510
511 return pci_rc; 511 return pci_rc;
512 } 512 }
513 513
514 static void __exit x38_exit(void) 514 static void __exit x38_exit(void)
515 { 515 {
516 debugf3("MC: %s()\n", __func__); 516 debugf3("MC: %s()\n", __func__);
517 517
518 pci_unregister_driver(&x38_driver); 518 pci_unregister_driver(&x38_driver);
519 if (!x38_registered) { 519 if (!x38_registered) {
520 x38_remove_one(mci_pdev); 520 x38_remove_one(mci_pdev);
521 pci_dev_put(mci_pdev); 521 pci_dev_put(mci_pdev);
522 } 522 }
523 } 523 }
524 524
525 module_init(x38_init); 525 module_init(x38_init);
526 module_exit(x38_exit); 526 module_exit(x38_exit);
527 527
528 MODULE_LICENSE("GPL"); 528 MODULE_LICENSE("GPL");
529 MODULE_AUTHOR("Cluster Computing, Inc. Hitoshi Mitake"); 529 MODULE_AUTHOR("Cluster Computing, Inc. Hitoshi Mitake");
530 MODULE_DESCRIPTION("MC support for Intel X38 memory hub controllers"); 530 MODULE_DESCRIPTION("MC support for Intel X38 memory hub controllers");
531 531
532 module_param(edac_op_state, int, 0444); 532 module_param(edac_op_state, int, 0444);
533 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 533 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
534 534
include/linux/edac.h
1 /* 1 /*
2 * Generic EDAC defs 2 * Generic EDAC defs
3 * 3 *
4 * Author: Dave Jiang <djiang@mvista.com> 4 * Author: Dave Jiang <djiang@mvista.com>
5 * 5 *
6 * 2006-2008 (c) MontaVista Software, Inc. This file is licensed under 6 * 2006-2008 (c) MontaVista Software, Inc. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program 7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express 8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied. 9 * or implied.
10 * 10 *
11 */ 11 */
12 #ifndef _LINUX_EDAC_H_ 12 #ifndef _LINUX_EDAC_H_
13 #define _LINUX_EDAC_H_ 13 #define _LINUX_EDAC_H_
14 14
15 #include <linux/atomic.h> 15 #include <linux/atomic.h>
16 #include <linux/device.h> 16 #include <linux/device.h>
17 #include <linux/kobject.h> 17 #include <linux/kobject.h>
18 #include <linux/completion.h> 18 #include <linux/completion.h>
19 #include <linux/workqueue.h> 19 #include <linux/workqueue.h>
20 #include <linux/debugfs.h> 20 #include <linux/debugfs.h>
21 21
22 struct device; 22 struct device;
23 23
24 #define EDAC_OPSTATE_INVAL -1 24 #define EDAC_OPSTATE_INVAL -1
25 #define EDAC_OPSTATE_POLL 0 25 #define EDAC_OPSTATE_POLL 0
26 #define EDAC_OPSTATE_NMI 1 26 #define EDAC_OPSTATE_NMI 1
27 #define EDAC_OPSTATE_INT 2 27 #define EDAC_OPSTATE_INT 2
28 28
29 extern int edac_op_state; 29 extern int edac_op_state;
30 extern int edac_err_assert; 30 extern int edac_err_assert;
31 extern atomic_t edac_handlers; 31 extern atomic_t edac_handlers;
32 extern struct bus_type edac_subsys; 32 extern struct bus_type edac_subsys;
33 33
34 extern int edac_handler_set(void); 34 extern int edac_handler_set(void);
35 extern void edac_atomic_assert_error(void); 35 extern void edac_atomic_assert_error(void);
36 extern struct bus_type *edac_get_sysfs_subsys(void); 36 extern struct bus_type *edac_get_sysfs_subsys(void);
37 extern void edac_put_sysfs_subsys(void); 37 extern void edac_put_sysfs_subsys(void);
38 38
39 static inline void opstate_init(void) 39 static inline void opstate_init(void)
40 { 40 {
41 switch (edac_op_state) { 41 switch (edac_op_state) {
42 case EDAC_OPSTATE_POLL: 42 case EDAC_OPSTATE_POLL:
43 case EDAC_OPSTATE_NMI: 43 case EDAC_OPSTATE_NMI:
44 break; 44 break;
45 default: 45 default:
46 edac_op_state = EDAC_OPSTATE_POLL; 46 edac_op_state = EDAC_OPSTATE_POLL;
47 } 47 }
48 return; 48 return;
49 } 49 }
50 50
51 #define EDAC_MC_LABEL_LEN 31 51 #define EDAC_MC_LABEL_LEN 31
52 #define MC_PROC_NAME_MAX_LEN 7 52 #define MC_PROC_NAME_MAX_LEN 7
53 53
54 /** 54 /**
55 * enum dev_type - describe the type of memory DRAM chips used at the stick 55 * enum dev_type - describe the type of memory DRAM chips used at the stick
56 * @DEV_UNKNOWN: Can't be determined, or MC doesn't support detect it 56 * @DEV_UNKNOWN: Can't be determined, or MC doesn't support detect it
57 * @DEV_X1: 1 bit for data 57 * @DEV_X1: 1 bit for data
58 * @DEV_X2: 2 bits for data 58 * @DEV_X2: 2 bits for data
59 * @DEV_X4: 4 bits for data 59 * @DEV_X4: 4 bits for data
60 * @DEV_X8: 8 bits for data 60 * @DEV_X8: 8 bits for data
61 * @DEV_X16: 16 bits for data 61 * @DEV_X16: 16 bits for data
62 * @DEV_X32: 32 bits for data 62 * @DEV_X32: 32 bits for data
63 * @DEV_X64: 64 bits for data 63 * @DEV_X64: 64 bits for data
64 * 64 *
65 * Typical values are x4 and x8. 65 * Typical values are x4 and x8.
66 */ 66 */
67 enum dev_type { 67 enum dev_type {
68 DEV_UNKNOWN = 0, 68 DEV_UNKNOWN = 0,
69 DEV_X1, 69 DEV_X1,
70 DEV_X2, 70 DEV_X2,
71 DEV_X4, 71 DEV_X4,
72 DEV_X8, 72 DEV_X8,
73 DEV_X16, 73 DEV_X16,
74 DEV_X32, /* Do these parts exist? */ 74 DEV_X32, /* Do these parts exist? */
75 DEV_X64 /* Do these parts exist? */ 75 DEV_X64 /* Do these parts exist? */
76 }; 76 };
77 77
78 #define DEV_FLAG_UNKNOWN BIT(DEV_UNKNOWN) 78 #define DEV_FLAG_UNKNOWN BIT(DEV_UNKNOWN)
79 #define DEV_FLAG_X1 BIT(DEV_X1) 79 #define DEV_FLAG_X1 BIT(DEV_X1)
80 #define DEV_FLAG_X2 BIT(DEV_X2) 80 #define DEV_FLAG_X2 BIT(DEV_X2)
81 #define DEV_FLAG_X4 BIT(DEV_X4) 81 #define DEV_FLAG_X4 BIT(DEV_X4)
82 #define DEV_FLAG_X8 BIT(DEV_X8) 82 #define DEV_FLAG_X8 BIT(DEV_X8)
83 #define DEV_FLAG_X16 BIT(DEV_X16) 83 #define DEV_FLAG_X16 BIT(DEV_X16)
84 #define DEV_FLAG_X32 BIT(DEV_X32) 84 #define DEV_FLAG_X32 BIT(DEV_X32)
85 #define DEV_FLAG_X64 BIT(DEV_X64) 85 #define DEV_FLAG_X64 BIT(DEV_X64)
86 86
87 /** 87 /**
88 * enum hw_event_mc_err_type - type of the detected error 88 * enum hw_event_mc_err_type - type of the detected error
89 * 89 *
90 * @HW_EVENT_ERR_CORRECTED: Corrected Error - Indicates that an ECC 90 * @HW_EVENT_ERR_CORRECTED: Corrected Error - Indicates that an ECC
91 * corrected error was detected 91 * corrected error was detected
92 * @HW_EVENT_ERR_UNCORRECTED: Uncorrected Error - Indicates an error that 92 * @HW_EVENT_ERR_UNCORRECTED: Uncorrected Error - Indicates an error that
93 * can't be corrected by ECC, but it is not 93 * can't be corrected by ECC, but it is not
94 * fatal (maybe it is on an unused memory area, 94 * fatal (maybe it is on an unused memory area,
95 * or the memory controller could recover from 95 * or the memory controller could recover from
96 * it for example, by re-trying the operation). 96 * it for example, by re-trying the operation).
97 * @HW_EVENT_ERR_FATAL: Fatal Error - Uncorrected error that could not 97 * @HW_EVENT_ERR_FATAL: Fatal Error - Uncorrected error that could not
98 * be recovered. 98 * be recovered.
99 */ 99 */
100 enum hw_event_mc_err_type { 100 enum hw_event_mc_err_type {
101 HW_EVENT_ERR_CORRECTED, 101 HW_EVENT_ERR_CORRECTED,
102 HW_EVENT_ERR_UNCORRECTED, 102 HW_EVENT_ERR_UNCORRECTED,
103 HW_EVENT_ERR_FATAL, 103 HW_EVENT_ERR_FATAL,
104 }; 104 };
105 105
106 /** 106 /**
107 * enum mem_type - memory types. For a more detailed reference, please see 107 * enum mem_type - memory types. For a more detailed reference, please see
108 * http://en.wikipedia.org/wiki/DRAM 108 * http://en.wikipedia.org/wiki/DRAM
109 * 109 *
110 * @MEM_EMPTY Empty csrow 110 * @MEM_EMPTY Empty csrow
111 * @MEM_RESERVED: Reserved csrow type 111 * @MEM_RESERVED: Reserved csrow type
112 * @MEM_UNKNOWN: Unknown csrow type 112 * @MEM_UNKNOWN: Unknown csrow type
113 * @MEM_FPM: FPM - Fast Page Mode, used on systems up to 1995. 113 * @MEM_FPM: FPM - Fast Page Mode, used on systems up to 1995.
114 * @MEM_EDO: EDO - Extended data out, used on systems up to 1998. 114 * @MEM_EDO: EDO - Extended data out, used on systems up to 1998.
115 * @MEM_BEDO: BEDO - Burst Extended data out, an EDO variant. 115 * @MEM_BEDO: BEDO - Burst Extended data out, an EDO variant.
116 * @MEM_SDR: SDR - Single data rate SDRAM 116 * @MEM_SDR: SDR - Single data rate SDRAM
117 * http://en.wikipedia.org/wiki/Synchronous_dynamic_random-access_memory 117 * http://en.wikipedia.org/wiki/Synchronous_dynamic_random-access_memory
118 * They use 3 pins for chip select: Pins 0 and 2 are 118 * They use 3 pins for chip select: Pins 0 and 2 are
119 * for rank 0; pins 1 and 3 are for rank 1, if the memory 119 * for rank 0; pins 1 and 3 are for rank 1, if the memory
120 * is dual-rank. 120 * is dual-rank.
121 * @MEM_RDR: Registered SDR SDRAM 121 * @MEM_RDR: Registered SDR SDRAM
122 * @MEM_DDR: Double data rate SDRAM 122 * @MEM_DDR: Double data rate SDRAM
123 * http://en.wikipedia.org/wiki/DDR_SDRAM 123 * http://en.wikipedia.org/wiki/DDR_SDRAM
124 * @MEM_RDDR: Registered Double data rate SDRAM 124 * @MEM_RDDR: Registered Double data rate SDRAM
125 * This is a variant of the DDR memories. 125 * This is a variant of the DDR memories.
126 * A registered memory has a buffer inside it, hiding 126 * A registered memory has a buffer inside it, hiding
127 * part of the memory details to the memory controller. 127 * part of the memory details to the memory controller.
128 * @MEM_RMBS: Rambus DRAM, used on a few Pentium III/IV controllers. 128 * @MEM_RMBS: Rambus DRAM, used on a few Pentium III/IV controllers.
129 * @MEM_DDR2: DDR2 RAM, as described at JEDEC JESD79-2F. 129 * @MEM_DDR2: DDR2 RAM, as described at JEDEC JESD79-2F.
130 * Those memories are labed as "PC2-" instead of "PC" to 130 * Those memories are labed as "PC2-" instead of "PC" to
131 * differenciate from DDR. 131 * differenciate from DDR.
132 * @MEM_FB_DDR2: Fully-Buffered DDR2, as described at JEDEC Std No. 205 132 * @MEM_FB_DDR2: Fully-Buffered DDR2, as described at JEDEC Std No. 205
133 * and JESD206. 133 * and JESD206.
134 * Those memories are accessed per DIMM slot, and not by 134 * Those memories are accessed per DIMM slot, and not by
135 * a chip select signal. 135 * a chip select signal.
136 * @MEM_RDDR2: Registered DDR2 RAM 136 * @MEM_RDDR2: Registered DDR2 RAM
137 * This is a variant of the DDR2 memories. 137 * This is a variant of the DDR2 memories.
138 * @MEM_XDR: Rambus XDR 138 * @MEM_XDR: Rambus XDR
139 * It is an evolution of the original RAMBUS memories, 139 * It is an evolution of the original RAMBUS memories,
140 * created to compete with DDR2. Weren't used on any 140 * created to compete with DDR2. Weren't used on any
141 * x86 arch, but cell_edac PPC memory controller uses it. 141 * x86 arch, but cell_edac PPC memory controller uses it.
142 * @MEM_DDR3: DDR3 RAM 142 * @MEM_DDR3: DDR3 RAM
143 * @MEM_RDDR3: Registered DDR3 RAM 143 * @MEM_RDDR3: Registered DDR3 RAM
144 * This is a variant of the DDR3 memories. 144 * This is a variant of the DDR3 memories.
145 */ 145 */
146 enum mem_type { 146 enum mem_type {
147 MEM_EMPTY = 0, 147 MEM_EMPTY = 0,
148 MEM_RESERVED, 148 MEM_RESERVED,
149 MEM_UNKNOWN, 149 MEM_UNKNOWN,
150 MEM_FPM, 150 MEM_FPM,
151 MEM_EDO, 151 MEM_EDO,
152 MEM_BEDO, 152 MEM_BEDO,
153 MEM_SDR, 153 MEM_SDR,
154 MEM_RDR, 154 MEM_RDR,
155 MEM_DDR, 155 MEM_DDR,
156 MEM_RDDR, 156 MEM_RDDR,
157 MEM_RMBS, 157 MEM_RMBS,
158 MEM_DDR2, 158 MEM_DDR2,
159 MEM_FB_DDR2, 159 MEM_FB_DDR2,
160 MEM_RDDR2, 160 MEM_RDDR2,
161 MEM_XDR, 161 MEM_XDR,
162 MEM_DDR3, 162 MEM_DDR3,
163 MEM_RDDR3, 163 MEM_RDDR3,
164 }; 164 };
165 165
166 #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) 166 #define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
167 #define MEM_FLAG_RESERVED BIT(MEM_RESERVED) 167 #define MEM_FLAG_RESERVED BIT(MEM_RESERVED)
168 #define MEM_FLAG_UNKNOWN BIT(MEM_UNKNOWN) 168 #define MEM_FLAG_UNKNOWN BIT(MEM_UNKNOWN)
169 #define MEM_FLAG_FPM BIT(MEM_FPM) 169 #define MEM_FLAG_FPM BIT(MEM_FPM)
170 #define MEM_FLAG_EDO BIT(MEM_EDO) 170 #define MEM_FLAG_EDO BIT(MEM_EDO)
171 #define MEM_FLAG_BEDO BIT(MEM_BEDO) 171 #define MEM_FLAG_BEDO BIT(MEM_BEDO)
172 #define MEM_FLAG_SDR BIT(MEM_SDR) 172 #define MEM_FLAG_SDR BIT(MEM_SDR)
173 #define MEM_FLAG_RDR BIT(MEM_RDR) 173 #define MEM_FLAG_RDR BIT(MEM_RDR)
174 #define MEM_FLAG_DDR BIT(MEM_DDR) 174 #define MEM_FLAG_DDR BIT(MEM_DDR)
175 #define MEM_FLAG_RDDR BIT(MEM_RDDR) 175 #define MEM_FLAG_RDDR BIT(MEM_RDDR)
176 #define MEM_FLAG_RMBS BIT(MEM_RMBS) 176 #define MEM_FLAG_RMBS BIT(MEM_RMBS)
177 #define MEM_FLAG_DDR2 BIT(MEM_DDR2) 177 #define MEM_FLAG_DDR2 BIT(MEM_DDR2)
178 #define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2) 178 #define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
179 #define MEM_FLAG_RDDR2 BIT(MEM_RDDR2) 179 #define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
180 #define MEM_FLAG_XDR BIT(MEM_XDR) 180 #define MEM_FLAG_XDR BIT(MEM_XDR)
181 #define MEM_FLAG_DDR3 BIT(MEM_DDR3) 181 #define MEM_FLAG_DDR3 BIT(MEM_DDR3)
182 #define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) 182 #define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
183 183
184 /** 184 /**
185 * enum edac-type - Error Detection and Correction capabilities and mode 185 * enum edac-type - Error Detection and Correction capabilities and mode
186 * @EDAC_UNKNOWN: Unknown if ECC is available 186 * @EDAC_UNKNOWN: Unknown if ECC is available
187 * @EDAC_NONE: Doesn't support ECC 187 * @EDAC_NONE: Doesn't support ECC
188 * @EDAC_RESERVED: Reserved ECC type 188 * @EDAC_RESERVED: Reserved ECC type
189 * @EDAC_PARITY: Detects parity errors 189 * @EDAC_PARITY: Detects parity errors
190 * @EDAC_EC: Error Checking - no correction 190 * @EDAC_EC: Error Checking - no correction
191 * @EDAC_SECDED: Single bit error correction, Double detection 191 * @EDAC_SECDED: Single bit error correction, Double detection
192 * @EDAC_S2ECD2ED: Chipkill x2 devices - do these exist? 192 * @EDAC_S2ECD2ED: Chipkill x2 devices - do these exist?
193 * @EDAC_S4ECD4ED: Chipkill x4 devices 193 * @EDAC_S4ECD4ED: Chipkill x4 devices
194 * @EDAC_S8ECD8ED: Chipkill x8 devices 194 * @EDAC_S8ECD8ED: Chipkill x8 devices
195 * @EDAC_S16ECD16ED: Chipkill x16 devices 195 * @EDAC_S16ECD16ED: Chipkill x16 devices
196 */ 196 */
197 enum edac_type { 197 enum edac_type {
198 EDAC_UNKNOWN = 0, 198 EDAC_UNKNOWN = 0,
199 EDAC_NONE, 199 EDAC_NONE,
200 EDAC_RESERVED, 200 EDAC_RESERVED,
201 EDAC_PARITY, 201 EDAC_PARITY,
202 EDAC_EC, 202 EDAC_EC,
203 EDAC_SECDED, 203 EDAC_SECDED,
204 EDAC_S2ECD2ED, 204 EDAC_S2ECD2ED,
205 EDAC_S4ECD4ED, 205 EDAC_S4ECD4ED,
206 EDAC_S8ECD8ED, 206 EDAC_S8ECD8ED,
207 EDAC_S16ECD16ED, 207 EDAC_S16ECD16ED,
208 }; 208 };
209 209
210 #define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN) 210 #define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN)
211 #define EDAC_FLAG_NONE BIT(EDAC_NONE) 211 #define EDAC_FLAG_NONE BIT(EDAC_NONE)
212 #define EDAC_FLAG_PARITY BIT(EDAC_PARITY) 212 #define EDAC_FLAG_PARITY BIT(EDAC_PARITY)
213 #define EDAC_FLAG_EC BIT(EDAC_EC) 213 #define EDAC_FLAG_EC BIT(EDAC_EC)
214 #define EDAC_FLAG_SECDED BIT(EDAC_SECDED) 214 #define EDAC_FLAG_SECDED BIT(EDAC_SECDED)
215 #define EDAC_FLAG_S2ECD2ED BIT(EDAC_S2ECD2ED) 215 #define EDAC_FLAG_S2ECD2ED BIT(EDAC_S2ECD2ED)
216 #define EDAC_FLAG_S4ECD4ED BIT(EDAC_S4ECD4ED) 216 #define EDAC_FLAG_S4ECD4ED BIT(EDAC_S4ECD4ED)
217 #define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED) 217 #define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED)
218 #define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED) 218 #define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED)
219 219
220 /** 220 /**
221 * enum scrub_type - scrubbing capabilities 221 * enum scrub_type - scrubbing capabilities
222 * @SCRUB_UNKNOWN Unknown if scrubber is available 222 * @SCRUB_UNKNOWN Unknown if scrubber is available
223 * @SCRUB_NONE: No scrubber 223 * @SCRUB_NONE: No scrubber
224 * @SCRUB_SW_PROG: SW progressive (sequential) scrubbing 224 * @SCRUB_SW_PROG: SW progressive (sequential) scrubbing
225 * @SCRUB_SW_SRC: Software scrub only errors 225 * @SCRUB_SW_SRC: Software scrub only errors
226 * @SCRUB_SW_PROG_SRC: Progressive software scrub from an error 226 * @SCRUB_SW_PROG_SRC: Progressive software scrub from an error
227 * @SCRUB_SW_TUNABLE: Software scrub frequency is tunable 227 * @SCRUB_SW_TUNABLE: Software scrub frequency is tunable
228 * @SCRUB_HW_PROG: HW progressive (sequential) scrubbing 228 * @SCRUB_HW_PROG: HW progressive (sequential) scrubbing
229 * @SCRUB_HW_SRC: Hardware scrub only errors 229 * @SCRUB_HW_SRC: Hardware scrub only errors
230 * @SCRUB_HW_PROG_SRC: Progressive hardware scrub from an error 230 * @SCRUB_HW_PROG_SRC: Progressive hardware scrub from an error
231 * SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable 231 * SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable
232 */ 232 */
233 enum scrub_type { 233 enum scrub_type {
234 SCRUB_UNKNOWN = 0, 234 SCRUB_UNKNOWN = 0,
235 SCRUB_NONE, 235 SCRUB_NONE,
236 SCRUB_SW_PROG, 236 SCRUB_SW_PROG,
237 SCRUB_SW_SRC, 237 SCRUB_SW_SRC,
238 SCRUB_SW_PROG_SRC, 238 SCRUB_SW_PROG_SRC,
239 SCRUB_SW_TUNABLE, 239 SCRUB_SW_TUNABLE,
240 SCRUB_HW_PROG, 240 SCRUB_HW_PROG,
241 SCRUB_HW_SRC, 241 SCRUB_HW_SRC,
242 SCRUB_HW_PROG_SRC, 242 SCRUB_HW_PROG_SRC,
243 SCRUB_HW_TUNABLE 243 SCRUB_HW_TUNABLE
244 }; 244 };
245 245
246 #define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG) 246 #define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG)
247 #define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC) 247 #define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC)
248 #define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC) 248 #define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC)
249 #define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE) 249 #define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE)
250 #define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG) 250 #define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG)
251 #define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC) 251 #define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC)
252 #define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC) 252 #define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC)
253 #define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE) 253 #define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE)
254 254
255 /* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */ 255 /* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */
256 256
257 /* EDAC internal operation states */ 257 /* EDAC internal operation states */
258 #define OP_ALLOC 0x100 258 #define OP_ALLOC 0x100
259 #define OP_RUNNING_POLL 0x201 259 #define OP_RUNNING_POLL 0x201
260 #define OP_RUNNING_INTERRUPT 0x202 260 #define OP_RUNNING_INTERRUPT 0x202
261 #define OP_RUNNING_POLL_INTR 0x203 261 #define OP_RUNNING_POLL_INTR 0x203
262 #define OP_OFFLINE 0x300 262 #define OP_OFFLINE 0x300
263 263
264 /* 264 /*
265 * Concepts used at the EDAC subsystem 265 * Concepts used at the EDAC subsystem
266 * 266 *
267 * There are several things to be aware of that aren't at all obvious: 267 * There are several things to be aware of that aren't at all obvious:
268 * 268 *
269 * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc.. 269 * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc..
270 * 270 *
271 * These are some of the many terms that are thrown about that don't always 271 * These are some of the many terms that are thrown about that don't always
272 * mean what people think they mean (Inconceivable!). In the interest of 272 * mean what people think they mean (Inconceivable!). In the interest of
273 * creating a common ground for discussion, terms and their definitions 273 * creating a common ground for discussion, terms and their definitions
274 * will be established. 274 * will be established.
275 * 275 *
276 * Memory devices: The individual DRAM chips on a memory stick. These 276 * Memory devices: The individual DRAM chips on a memory stick. These
277 * devices commonly output 4 and 8 bits each (x4, x8). 277 * devices commonly output 4 and 8 bits each (x4, x8).
278 * Grouping several of these in parallel provides the 278 * Grouping several of these in parallel provides the
279 * number of bits that the memory controller expects: 279 * number of bits that the memory controller expects:
280 * typically 72 bits, in order to provide 64 bits + 280 * typically 72 bits, in order to provide 64 bits +
281 * 8 bits of ECC data. 281 * 8 bits of ECC data.
282 * 282 *
283 * Memory Stick: A printed circuit board that aggregates multiple 283 * Memory Stick: A printed circuit board that aggregates multiple
284 * memory devices in parallel. In general, this is the 284 * memory devices in parallel. In general, this is the
285 * Field Replaceable Unit (FRU) which gets replaced, in 285 * Field Replaceable Unit (FRU) which gets replaced, in
286 * the case of excessive errors. Most often it is also 286 * the case of excessive errors. Most often it is also
287 * called DIMM (Dual Inline Memory Module). 287 * called DIMM (Dual Inline Memory Module).
288 * 288 *
289 * Memory Socket: A physical connector on the motherboard that accepts 289 * Memory Socket: A physical connector on the motherboard that accepts
290 * a single memory stick. Also called as "slot" on several 290 * a single memory stick. Also called as "slot" on several
291 * datasheets. 291 * datasheets.
292 * 292 *
293 * Channel: A memory controller channel, responsible to communicate 293 * Channel: A memory controller channel, responsible to communicate
294 * with a group of DIMMs. Each channel has its own 294 * with a group of DIMMs. Each channel has its own
295 * independent control (command) and data bus, and can 295 * independent control (command) and data bus, and can
296 * be used independently or grouped with other channels. 296 * be used independently or grouped with other channels.
297 * 297 *
298 * Branch: It is typically the highest hierarchy on a 298 * Branch: It is typically the highest hierarchy on a
299 * Fully-Buffered DIMM memory controller. 299 * Fully-Buffered DIMM memory controller.
300 * Typically, it contains two channels. 300 * Typically, it contains two channels.
301 * Two channels at the same branch can be used in single 301 * Two channels at the same branch can be used in single
302 * mode or in lockstep mode. 302 * mode or in lockstep mode.
303 * When lockstep is enabled, the cacheline is doubled, 303 * When lockstep is enabled, the cacheline is doubled,
304 * but it generally brings some performance penalty. 304 * but it generally brings some performance penalty.
305 * Also, it is generally not possible to point to just one 305 * Also, it is generally not possible to point to just one
306 * memory stick when an error occurs, as the error 306 * memory stick when an error occurs, as the error
307 * correction code is calculated using two DIMMs instead 307 * correction code is calculated using two DIMMs instead
308 * of one. Due to that, it is capable of correcting more 308 * of one. Due to that, it is capable of correcting more
309 * errors than on single mode. 309 * errors than on single mode.
310 * 310 *
311 * Single-channel: The data accessed by the memory controller is contained 311 * Single-channel: The data accessed by the memory controller is contained
312 * into one dimm only. E. g. if the data is 64 bits-wide, 312 * into one dimm only. E. g. if the data is 64 bits-wide,
313 * the data flows to the CPU using one 64 bits parallel 313 * the data flows to the CPU using one 64 bits parallel
314 * access. 314 * access.
315 * Typically used with SDR, DDR, DDR2 and DDR3 memories. 315 * Typically used with SDR, DDR, DDR2 and DDR3 memories.
316 * FB-DIMM and RAMBUS use a different concept for channel, 316 * FB-DIMM and RAMBUS use a different concept for channel,
317 * so this concept doesn't apply there. 317 * so this concept doesn't apply there.
318 * 318 *
319 * Double-channel: The data size accessed by the memory controller is 319 * Double-channel: The data size accessed by the memory controller is
320 * interlaced into two dimms, accessed at the same time. 320 * interlaced into two dimms, accessed at the same time.
321 * E. g. if the DIMM is 64 bits-wide (72 bits with ECC), 321 * E. g. if the DIMM is 64 bits-wide (72 bits with ECC),
322 * the data flows to the CPU using a 128 bits parallel 322 * the data flows to the CPU using a 128 bits parallel
323 * access. 323 * access.
324 * 324 *
325 * Chip-select row: This is the name of the DRAM signal used to select the 325 * Chip-select row: This is the name of the DRAM signal used to select the
326 * DRAM ranks to be accessed. Common chip-select rows for 326 * DRAM ranks to be accessed. Common chip-select rows for
327 * single channel are 64 bits, for dual channel 128 bits. 327 * single channel are 64 bits, for dual channel 128 bits.
328 * It may not be visible by the memory controller, as some 328 * It may not be visible by the memory controller, as some
329 * DIMM types have a memory buffer that can hide direct 329 * DIMM types have a memory buffer that can hide direct
330 * access to it from the Memory Controller. 330 * access to it from the Memory Controller.
331 * 331 *
332 * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memory. 332 * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memory.
333 * Motherboards commonly drive two chip-select pins to 333 * Motherboards commonly drive two chip-select pins to
334 * a memory stick. A single-ranked stick, will occupy 334 * a memory stick. A single-ranked stick, will occupy
335 * only one of those rows. The other will be unused. 335 * only one of those rows. The other will be unused.
336 * 336 *
337 * Double-Ranked stick: A double-ranked stick has two chip-select rows which 337 * Double-Ranked stick: A double-ranked stick has two chip-select rows which
338 * access different sets of memory devices. The two 338 * access different sets of memory devices. The two
339 * rows cannot be accessed concurrently. 339 * rows cannot be accessed concurrently.
340 * 340 *
341 * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick. 341 * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick.
342 * A double-sided stick has two chip-select rows which 342 * A double-sided stick has two chip-select rows which
343 * access different sets of memory devices. The two 343 * access different sets of memory devices. The two
344 * rows cannot be accessed concurrently. "Double-sided" 344 * rows cannot be accessed concurrently. "Double-sided"
345 * is irrespective of the memory devices being mounted 345 * is irrespective of the memory devices being mounted
346 * on both sides of the memory stick. 346 * on both sides of the memory stick.
347 * 347 *
348 * Socket set: All of the memory sticks that are required for 348 * Socket set: All of the memory sticks that are required for
349 * a single memory access or all of the memory sticks 349 * a single memory access or all of the memory sticks
350 * spanned by a chip-select row. A single socket set 350 * spanned by a chip-select row. A single socket set
351 * has two chip-select rows and if double-sided sticks 351 * has two chip-select rows and if double-sided sticks
352 * are used these will occupy those chip-select rows. 352 * are used these will occupy those chip-select rows.
353 * 353 *
354 * Bank: This term is avoided because it is unclear when 354 * Bank: This term is avoided because it is unclear when
355 * needing to distinguish between chip-select rows and 355 * needing to distinguish between chip-select rows and
356 * socket sets. 356 * socket sets.
357 * 357 *
358 * Controller pages: 358 * Controller pages:
359 * 359 *
360 * Physical pages: 360 * Physical pages:
361 * 361 *
362 * Virtual pages: 362 * Virtual pages:
363 * 363 *
364 * 364 *
365 * STRUCTURE ORGANIZATION AND CHOICES 365 * STRUCTURE ORGANIZATION AND CHOICES
366 * 366 *
367 * 367 *
368 * 368 *
369 * PS - I enjoyed writing all that about as much as you enjoyed reading it. 369 * PS - I enjoyed writing all that about as much as you enjoyed reading it.
370 */ 370 */
371 371
372 /** 372 /**
373 * enum edac_mc_layer - memory controller hierarchy layer 373 * enum edac_mc_layer - memory controller hierarchy layer
374 * 374 *
375 * @EDAC_MC_LAYER_BRANCH: memory layer is named "branch" 375 * @EDAC_MC_LAYER_BRANCH: memory layer is named "branch"
376 * @EDAC_MC_LAYER_CHANNEL: memory layer is named "channel" 376 * @EDAC_MC_LAYER_CHANNEL: memory layer is named "channel"
377 * @EDAC_MC_LAYER_SLOT: memory layer is named "slot" 377 * @EDAC_MC_LAYER_SLOT: memory layer is named "slot"
378 * @EDAC_MC_LAYER_CHIP_SELECT: memory layer is named "chip select" 378 * @EDAC_MC_LAYER_CHIP_SELECT: memory layer is named "chip select"
379 * 379 *
380 * This enum is used by the drivers to tell edac_mc_sysfs what name should 380 * This enum is used by the drivers to tell edac_mc_sysfs what name should
381 * be used when describing a memory stick location. 381 * be used when describing a memory stick location.
382 */ 382 */
383 enum edac_mc_layer_type { 383 enum edac_mc_layer_type {
384 EDAC_MC_LAYER_BRANCH, 384 EDAC_MC_LAYER_BRANCH,
385 EDAC_MC_LAYER_CHANNEL, 385 EDAC_MC_LAYER_CHANNEL,
386 EDAC_MC_LAYER_SLOT, 386 EDAC_MC_LAYER_SLOT,
387 EDAC_MC_LAYER_CHIP_SELECT, 387 EDAC_MC_LAYER_CHIP_SELECT,
388 }; 388 };
389 389
390 /** 390 /**
391 * struct edac_mc_layer - describes the memory controller hierarchy 391 * struct edac_mc_layer - describes the memory controller hierarchy
392 * @layer: layer type 392 * @layer: layer type
393 * @size: number of components per layer. For example, 393 * @size: number of components per layer. For example,
394 * if the channel layer has two channels, size = 2 394 * if the channel layer has two channels, size = 2
395 * @is_virt_csrow: This layer is part of the "csrow" when old API 395 * @is_virt_csrow: This layer is part of the "csrow" when old API
396 * compatibility mode is enabled. Otherwise, it is 396 * compatibility mode is enabled. Otherwise, it is
397 * a channel 397 * a channel
398 */ 398 */
399 struct edac_mc_layer { 399 struct edac_mc_layer {
400 enum edac_mc_layer_type type; 400 enum edac_mc_layer_type type;
401 unsigned size; 401 unsigned size;
402 bool is_virt_csrow; 402 bool is_virt_csrow;
403 }; 403 };
404 404
405 /* 405 /*
406 * Maximum number of layers used by the memory controller to uniquely 406 * Maximum number of layers used by the memory controller to uniquely
407 * identify a single memory stick. 407 * identify a single memory stick.
408 * NOTE: Changing this constant requires not only to change the constant 408 * NOTE: Changing this constant requires not only to change the constant
409 * below, but also to change the existing code at the core, as there are 409 * below, but also to change the existing code at the core, as there are
410 * some code there that are optimized for 3 layers. 410 * some code there that are optimized for 3 layers.
411 */ 411 */
412 #define EDAC_MAX_LAYERS 3 412 #define EDAC_MAX_LAYERS 3
413 413
414 /** 414 /**
415 * EDAC_DIMM_PTR - Macro responsible to find a pointer inside a pointer array 415 * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer array
416 * for the element given by [layer0,layer1,layer2] position 416 * for the element given by [layer0,layer1,layer2] position
417 * 417 *
418 * @layers: a struct edac_mc_layer array, describing how many elements 418 * @layers: a struct edac_mc_layer array, describing how many elements
419 * were allocated for each layer 419 * were allocated for each layer
420 * @var: name of the var where we want to get the pointer
421 * (like mci->dimms)
422 * @n_layers: Number of layers at the @layers array 420 * @n_layers: Number of layers at the @layers array
423 * @layer0: layer0 position 421 * @layer0: layer0 position
424 * @layer1: layer1 position. Unused if n_layers < 2 422 * @layer1: layer1 position. Unused if n_layers < 2
425 * @layer2: layer2 position. Unused if n_layers < 3 423 * @layer2: layer2 position. Unused if n_layers < 3
426 * 424 *
427 * For 1 layer, this macro returns &var[layer0] 425 * For 1 layer, this macro returns &var[layer0] - &var
428 * For 2 layers, this macro is similar to allocate a bi-dimensional array 426 * For 2 layers, this macro is similar to allocate a bi-dimensional array
429 * and to return "&var[layer0][layer1]" 427 * and to return "&var[layer0][layer1] - &var"
430 * For 3 layers, this macro is similar to allocate a tri-dimensional array 428 * For 3 layers, this macro is similar to allocate a tri-dimensional array
431 * and to return "&var[layer0][layer1][layer2]" 429 * and to return "&var[layer0][layer1][layer2] - &var"
432 * 430 *
433 * A loop could be used here to make it more generic, but, as we only have 431 * A loop could be used here to make it more generic, but, as we only have
434 * 3 layers, this is a little faster. 432 * 3 layers, this is a little faster.
435 * By design, layers can never be 0 or more than 3. If that ever happens, 433 * By design, layers can never be 0 or more than 3. If that ever happens,
436 * a NULL is returned, causing an OOPS during the memory allocation routine, 434 * a NULL is returned, causing an OOPS during the memory allocation routine,
437 * with would point to the developer that he's doing something wrong. 435 * with would point to the developer that he's doing something wrong.
438 */ 436 */
439 #define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \ 437 #define EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2) ({ \
440 typeof(var) __p; \ 438 int __i; \
441 if ((nlayers) == 1) \ 439 if ((nlayers) == 1) \
442 __p = &var[layer0]; \ 440 __i = layer0; \
443 else if ((nlayers) == 2) \ 441 else if ((nlayers) == 2) \
444 __p = &var[(layer1) + ((layers[1]).size * (layer0))]; \ 442 __i = (layer1) + ((layers[1]).size * (layer0)); \
445 else if ((nlayers) == 3) \ 443 else if ((nlayers) == 3) \
446 __p = &var[(layer2) + ((layers[2]).size * ((layer1) + \ 444 __i = (layer2) + ((layers[2]).size * ((layer1) + \
447 ((layers[1]).size * (layer0))))]; \ 445 ((layers[1]).size * (layer0)))); \
448 else \ 446 else \
447 __i = -EINVAL; \
448 __i; \
449 })
450
451 /**
452 * EDAC_DIMM_PTR - Macro responsible to get a pointer inside a pointer array
453 * for the element given by [layer0,layer1,layer2] position
454 *
455 * @layers: a struct edac_mc_layer array, describing how many elements
456 * were allocated for each layer
457 * @var: name of the var where we want to get the pointer
458 * (like mci->dimms)
459 * @n_layers: Number of layers at the @layers array
460 * @layer0: layer0 position
461 * @layer1: layer1 position. Unused if n_layers < 2
462 * @layer2: layer2 position. Unused if n_layers < 3
463 *
464 * For 1 layer, this macro returns &var[layer0]
465 * For 2 layers, this macro is similar to allocate a bi-dimensional array
466 * and to return "&var[layer0][layer1]"
467 * For 3 layers, this macro is similar to allocate a tri-dimensional array
468 * and to return "&var[layer0][layer1][layer2]"
469 */
470 #define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \
471 typeof(*var) __p; \
472 int ___i = EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2); \
473 if (___i < 0) \
449 __p = NULL; \ 474 __p = NULL; \
475 else \
476 __p = (var)[___i]; \
450 __p; \ 477 __p; \
451 }) 478 })
452 479
453 struct dimm_info { 480 struct dimm_info {
454 struct device dev; 481 struct device dev;
455 482
456 char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ 483 char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
457 484
458 /* Memory location data */ 485 /* Memory location data */
459 unsigned location[EDAC_MAX_LAYERS]; 486 unsigned location[EDAC_MAX_LAYERS];
460 487
461 struct mem_ctl_info *mci; /* the parent */ 488 struct mem_ctl_info *mci; /* the parent */
462 489
463 u32 grain; /* granularity of reported error in bytes */ 490 u32 grain; /* granularity of reported error in bytes */
464 enum dev_type dtype; /* memory device type */ 491 enum dev_type dtype; /* memory device type */
465 enum mem_type mtype; /* memory dimm type */ 492 enum mem_type mtype; /* memory dimm type */
466 enum edac_type edac_mode; /* EDAC mode for this dimm */ 493 enum edac_type edac_mode; /* EDAC mode for this dimm */
467 494
468 u32 nr_pages; /* number of pages on this dimm */ 495 u32 nr_pages; /* number of pages on this dimm */
469 496
470 unsigned csrow, cschannel; /* Points to the old API data */ 497 unsigned csrow, cschannel; /* Points to the old API data */
471 }; 498 };
472 499
473 /** 500 /**
474 * struct rank_info - contains the information for one DIMM rank 501 * struct rank_info - contains the information for one DIMM rank
475 * 502 *
476 * @chan_idx: channel number where the rank is (typically, 0 or 1) 503 * @chan_idx: channel number where the rank is (typically, 0 or 1)
477 * @ce_count: number of correctable errors for this rank 504 * @ce_count: number of correctable errors for this rank
478 * @csrow: A pointer to the chip select row structure (the parent 505 * @csrow: A pointer to the chip select row structure (the parent
479 * structure). The location of the rank is given by 506 * structure). The location of the rank is given by
480 * the (csrow->csrow_idx, chan_idx) vector. 507 * the (csrow->csrow_idx, chan_idx) vector.
481 * @dimm: A pointer to the DIMM structure, where the DIMM label 508 * @dimm: A pointer to the DIMM structure, where the DIMM label
482 * information is stored. 509 * information is stored.
483 * 510 *
484 * FIXME: Currently, the EDAC core model will assume one DIMM per rank. 511 * FIXME: Currently, the EDAC core model will assume one DIMM per rank.
485 * This is a bad assumption, but it makes this patch easier. Later 512 * This is a bad assumption, but it makes this patch easier. Later
486 * patches in this series will fix this issue. 513 * patches in this series will fix this issue.
487 */ 514 */
488 struct rank_info { 515 struct rank_info {
489 struct device dev;
490
491 int chan_idx; 516 int chan_idx;
492 struct csrow_info *csrow; 517 struct csrow_info *csrow;
493 struct dimm_info *dimm; 518 struct dimm_info *dimm;
494 519
495 u32 ce_count; /* Correctable Errors for this csrow */ 520 u32 ce_count; /* Correctable Errors for this csrow */
496 }; 521 };
497 522
498 struct csrow_info { 523 struct csrow_info {
499 struct device dev; 524 struct device dev;
500 525
501 /* Used only by edac_mc_find_csrow_by_page() */ 526 /* Used only by edac_mc_find_csrow_by_page() */
502 unsigned long first_page; /* first page number in csrow */ 527 unsigned long first_page; /* first page number in csrow */
503 unsigned long last_page; /* last page number in csrow */ 528 unsigned long last_page; /* last page number in csrow */
504 unsigned long page_mask; /* used for interleaving - 529 unsigned long page_mask; /* used for interleaving -
505 * 0UL for non intlv */ 530 * 0UL for non intlv */
506 531
507 int csrow_idx; /* the chip-select row */ 532 int csrow_idx; /* the chip-select row */
508 533
509 u32 ue_count; /* Uncorrectable Errors for this csrow */ 534 u32 ue_count; /* Uncorrectable Errors for this csrow */
510 u32 ce_count; /* Correctable Errors for this csrow */ 535 u32 ce_count; /* Correctable Errors for this csrow */
511 536
512 struct mem_ctl_info *mci; /* the parent */ 537 struct mem_ctl_info *mci; /* the parent */
513 538
514 /* channel information for this csrow */ 539 /* channel information for this csrow */
515 u32 nr_channels; 540 u32 nr_channels;
516 struct rank_info *channels; 541 struct rank_info **channels;
517 }; 542 };
518 543
519 /* 544 /*
520 * struct errcount_attribute - used to store the several error counts 545 * struct errcount_attribute - used to store the several error counts
521 */ 546 */
522 struct errcount_attribute_data { 547 struct errcount_attribute_data {
523 int n_layers; 548 int n_layers;
524 int pos[EDAC_MAX_LAYERS]; 549 int pos[EDAC_MAX_LAYERS];
525 int layer0, layer1, layer2; 550 int layer0, layer1, layer2;
526 }; 551 };
527 552
528 /* MEMORY controller information structure 553 /* MEMORY controller information structure
529 */ 554 */
530 struct mem_ctl_info { 555 struct mem_ctl_info {
531 struct device dev; 556 struct device dev;
532 struct bus_type bus; 557 struct bus_type bus;
533 558
534 struct list_head link; /* for global list of mem_ctl_info structs */ 559 struct list_head link; /* for global list of mem_ctl_info structs */
535 560
536 struct module *owner; /* Module owner of this control struct */ 561 struct module *owner; /* Module owner of this control struct */
537 562
538 unsigned long mtype_cap; /* memory types supported by mc */ 563 unsigned long mtype_cap; /* memory types supported by mc */
539 unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */ 564 unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */
540 unsigned long edac_cap; /* configuration capabilities - this is 565 unsigned long edac_cap; /* configuration capabilities - this is
541 * closely related to edac_ctl_cap. The 566 * closely related to edac_ctl_cap. The
542 * difference is that the controller may be 567 * difference is that the controller may be
543 * capable of s4ecd4ed which would be listed 568 * capable of s4ecd4ed which would be listed
544 * in edac_ctl_cap, but if channels aren't 569 * in edac_ctl_cap, but if channels aren't
545 * capable of s4ecd4ed then the edac_cap would 570 * capable of s4ecd4ed then the edac_cap would
546 * not have that capability. 571 * not have that capability.
547 */ 572 */
548 unsigned long scrub_cap; /* chipset scrub capabilities */ 573 unsigned long scrub_cap; /* chipset scrub capabilities */
549 enum scrub_type scrub_mode; /* current scrub mode */ 574 enum scrub_type scrub_mode; /* current scrub mode */
550 575
551 /* Translates sdram memory scrub rate given in bytes/sec to the 576 /* Translates sdram memory scrub rate given in bytes/sec to the
552 internal representation and configures whatever else needs 577 internal representation and configures whatever else needs
553 to be configured. 578 to be configured.
554 */ 579 */
555 int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 bw); 580 int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 bw);
556 581
557 /* Get the current sdram memory scrub rate from the internal 582 /* Get the current sdram memory scrub rate from the internal
558 representation and converts it to the closest matching 583 representation and converts it to the closest matching
559 bandwidth in bytes/sec. 584 bandwidth in bytes/sec.
560 */ 585 */
561 int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci); 586 int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci);
562 587
563 588
564 /* pointer to edac checking routine */ 589 /* pointer to edac checking routine */
565 void (*edac_check) (struct mem_ctl_info * mci); 590 void (*edac_check) (struct mem_ctl_info * mci);
566 591
567 /* 592 /*
568 * Remaps memory pages: controller pages to physical pages. 593 * Remaps memory pages: controller pages to physical pages.
569 * For most MC's, this will be NULL. 594 * For most MC's, this will be NULL.
570 */ 595 */
571 /* FIXME - why not send the phys page to begin with? */ 596 /* FIXME - why not send the phys page to begin with? */
572 unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci, 597 unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
573 unsigned long page); 598 unsigned long page);
574 int mc_idx; 599 int mc_idx;
575 struct csrow_info *csrows; 600 struct csrow_info **csrows;
576 unsigned nr_csrows, num_cschannel; 601 unsigned nr_csrows, num_cschannel;
577 602
578 /* 603 /*
579 * Memory Controller hierarchy 604 * Memory Controller hierarchy
580 * 605 *
581 * There are basically two types of memory controller: the ones that 606 * There are basically two types of memory controller: the ones that
582 * sees memory sticks ("dimms"), and the ones that sees memory ranks. 607 * sees memory sticks ("dimms"), and the ones that sees memory ranks.
583 * All old memory controllers enumerate memories per rank, but most 608 * All old memory controllers enumerate memories per rank, but most
584 * of the recent drivers enumerate memories per DIMM, instead. 609 * of the recent drivers enumerate memories per DIMM, instead.
585 * When the memory controller is per rank, mem_is_per_rank is true. 610 * When the memory controller is per rank, mem_is_per_rank is true.
586 */ 611 */
587 unsigned n_layers; 612 unsigned n_layers;
588 struct edac_mc_layer *layers; 613 struct edac_mc_layer *layers;
589 bool mem_is_per_rank; 614 bool mem_is_per_rank;
590 615
591 /* 616 /*
592 * DIMM info. Will eventually remove the entire csrows_info some day 617 * DIMM info. Will eventually remove the entire csrows_info some day
593 */ 618 */
594 unsigned tot_dimms; 619 unsigned tot_dimms;
595 struct dimm_info *dimms; 620 struct dimm_info **dimms;
596 621
597 /* 622 /*
598 * FIXME - what about controllers on other busses? - IDs must be 623 * FIXME - what about controllers on other busses? - IDs must be
599 * unique. dev pointer should be sufficiently unique, but 624 * unique. dev pointer should be sufficiently unique, but
600 * BUS:SLOT.FUNC numbers may not be unique. 625 * BUS:SLOT.FUNC numbers may not be unique.
601 */ 626 */
602 struct device *pdev; 627 struct device *pdev;
603 const char *mod_name; 628 const char *mod_name;
604 const char *mod_ver; 629 const char *mod_ver;
605 const char *ctl_name; 630 const char *ctl_name;
606 const char *dev_name; 631 const char *dev_name;
607 char proc_name[MC_PROC_NAME_MAX_LEN + 1]; 632 char proc_name[MC_PROC_NAME_MAX_LEN + 1];
608 void *pvt_info; 633 void *pvt_info;
609 unsigned long start_time; /* mci load start time (in jiffies) */ 634 unsigned long start_time; /* mci load start time (in jiffies) */
610 635
611 /* 636 /*
612 * drivers shouldn't access those fields directly, as the core 637 * drivers shouldn't access those fields directly, as the core
613 * already handles that. 638 * already handles that.
614 */ 639 */
615 u32 ce_noinfo_count, ue_noinfo_count; 640 u32 ce_noinfo_count, ue_noinfo_count;
616 u32 ue_mc, ce_mc; 641 u32 ue_mc, ce_mc;
617 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS]; 642 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
618 643
619 struct completion complete; 644 struct completion complete;
620 645
621 /* Additional top controller level attributes, but specified 646 /* Additional top controller level attributes, but specified
622 * by the low level driver. 647 * by the low level driver.
623 * 648 *
624 * Set by the low level driver to provide attributes at the 649 * Set by the low level driver to provide attributes at the
625 * controller level. 650 * controller level.
626 * An array of structures, NULL terminated 651 * An array of structures, NULL terminated
627 * 652 *
628 * If attributes are desired, then set to array of attributes 653 * If attributes are desired, then set to array of attributes
629 * If no attributes are desired, leave NULL 654 * If no attributes are desired, leave NULL
630 */ 655 */
631 const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes; 656 const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes;
632 657
633 /* work struct for this MC */ 658 /* work struct for this MC */
634 struct delayed_work work; 659 struct delayed_work work;
635 660
636 /* the internal state of this controller instance */ 661 /* the internal state of this controller instance */
637 int op_state; 662 int op_state;
638 663
639 #ifdef CONFIG_EDAC_DEBUG 664 #ifdef CONFIG_EDAC_DEBUG
640 struct dentry *debugfs; 665 struct dentry *debugfs;
641 u8 fake_inject_layer[EDAC_MAX_LAYERS]; 666 u8 fake_inject_layer[EDAC_MAX_LAYERS];
642 u32 fake_inject_ue; 667 u32 fake_inject_ue;
643 #endif 668 #endif