Commit 390944439f746824faec51b576f50cb5ef18745b

Authored by Borislav Petkov
1 parent 360b7f3c60

EDAC: Fixup scrubrate manipulation

Make the ->{get|set}_sdram_scrub_rate return the actual scrub rate
bandwidth it succeeded setting and remove superfluous arg pointer used
for that. A negative value returned still means that an error occurred
while setting the scrubrate. Document this for future reference.

Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>

Showing 7 changed files with 52 additions and 63 deletions Inline Diff

drivers/edac/amd64_edac.c
1 #include "amd64_edac.h" 1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h> 2 #include <asm/amd_nb.h>
3 3
4 static struct edac_pci_ctl_info *amd64_ctl_pci; 4 static struct edac_pci_ctl_info *amd64_ctl_pci;
5 5
6 static int report_gart_errors; 6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644); 7 module_param(report_gart_errors, int, 0644);
8 8
9 /* 9 /*
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is 10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver. 11 * cleared to prevent re-enabling the hardware by this driver.
12 */ 12 */
13 static int ecc_enable_override; 13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644); 14 module_param(ecc_enable_override, int, 0644);
15 15
16 static struct msr __percpu *msrs; 16 static struct msr __percpu *msrs;
17 17
18 /* 18 /*
19 * count successfully initialized driver instances for setup_pci_device() 19 * count successfully initialized driver instances for setup_pci_device()
20 */ 20 */
21 static atomic_t drv_instances = ATOMIC_INIT(0); 21 static atomic_t drv_instances = ATOMIC_INIT(0);
22 22
23 /* Per-node driver instances */ 23 /* Per-node driver instances */
24 static struct mem_ctl_info **mcis; 24 static struct mem_ctl_info **mcis;
25 static struct ecc_settings **ecc_stngs; 25 static struct ecc_settings **ecc_stngs;
26 26
27 /* 27 /*
28 * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and 28 * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
29 * later. 29 * later.
30 */ 30 */
31 static int ddr2_dbam_revCG[] = { 31 static int ddr2_dbam_revCG[] = {
32 [0] = 32, 32 [0] = 32,
33 [1] = 64, 33 [1] = 64,
34 [2] = 128, 34 [2] = 128,
35 [3] = 256, 35 [3] = 256,
36 [4] = 512, 36 [4] = 512,
37 [5] = 1024, 37 [5] = 1024,
38 [6] = 2048, 38 [6] = 2048,
39 }; 39 };
40 40
41 static int ddr2_dbam_revD[] = { 41 static int ddr2_dbam_revD[] = {
42 [0] = 32, 42 [0] = 32,
43 [1] = 64, 43 [1] = 64,
44 [2 ... 3] = 128, 44 [2 ... 3] = 128,
45 [4] = 256, 45 [4] = 256,
46 [5] = 512, 46 [5] = 512,
47 [6] = 256, 47 [6] = 256,
48 [7] = 512, 48 [7] = 512,
49 [8 ... 9] = 1024, 49 [8 ... 9] = 1024,
50 [10] = 2048, 50 [10] = 2048,
51 }; 51 };
52 52
53 static int ddr2_dbam[] = { [0] = 128, 53 static int ddr2_dbam[] = { [0] = 128,
54 [1] = 256, 54 [1] = 256,
55 [2 ... 4] = 512, 55 [2 ... 4] = 512,
56 [5 ... 6] = 1024, 56 [5 ... 6] = 1024,
57 [7 ... 8] = 2048, 57 [7 ... 8] = 2048,
58 [9 ... 10] = 4096, 58 [9 ... 10] = 4096,
59 [11] = 8192, 59 [11] = 8192,
60 }; 60 };
61 61
62 static int ddr3_dbam[] = { [0] = -1, 62 static int ddr3_dbam[] = { [0] = -1,
63 [1] = 256, 63 [1] = 256,
64 [2] = 512, 64 [2] = 512,
65 [3 ... 4] = -1, 65 [3 ... 4] = -1,
66 [5 ... 6] = 1024, 66 [5 ... 6] = 1024,
67 [7 ... 8] = 2048, 67 [7 ... 8] = 2048,
68 [9 ... 10] = 4096, 68 [9 ... 10] = 4096,
69 [11] = 8192, 69 [11] = 8192,
70 }; 70 };
71 71
72 /* 72 /*
73 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing 73 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
74 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching- 74 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
75 * or higher value'. 75 * or higher value'.
76 * 76 *
77 *FIXME: Produce a better mapping/linearisation. 77 *FIXME: Produce a better mapping/linearisation.
78 */ 78 */
79 79
80 struct scrubrate scrubrates[] = { 80
81 struct scrubrate {
82 u32 scrubval; /* bit pattern for scrub rate */
83 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
84 } scrubrates[] = {
81 { 0x01, 1600000000UL}, 85 { 0x01, 1600000000UL},
82 { 0x02, 800000000UL}, 86 { 0x02, 800000000UL},
83 { 0x03, 400000000UL}, 87 { 0x03, 400000000UL},
84 { 0x04, 200000000UL}, 88 { 0x04, 200000000UL},
85 { 0x05, 100000000UL}, 89 { 0x05, 100000000UL},
86 { 0x06, 50000000UL}, 90 { 0x06, 50000000UL},
87 { 0x07, 25000000UL}, 91 { 0x07, 25000000UL},
88 { 0x08, 12284069UL}, 92 { 0x08, 12284069UL},
89 { 0x09, 6274509UL}, 93 { 0x09, 6274509UL},
90 { 0x0A, 3121951UL}, 94 { 0x0A, 3121951UL},
91 { 0x0B, 1560975UL}, 95 { 0x0B, 1560975UL},
92 { 0x0C, 781440UL}, 96 { 0x0C, 781440UL},
93 { 0x0D, 390720UL}, 97 { 0x0D, 390720UL},
94 { 0x0E, 195300UL}, 98 { 0x0E, 195300UL},
95 { 0x0F, 97650UL}, 99 { 0x0F, 97650UL},
96 { 0x10, 48854UL}, 100 { 0x10, 48854UL},
97 { 0x11, 24427UL}, 101 { 0x11, 24427UL},
98 { 0x12, 12213UL}, 102 { 0x12, 12213UL},
99 { 0x13, 6101UL}, 103 { 0x13, 6101UL},
100 { 0x14, 3051UL}, 104 { 0x14, 3051UL},
101 { 0x15, 1523UL}, 105 { 0x15, 1523UL},
102 { 0x16, 761UL}, 106 { 0x16, 761UL},
103 { 0x00, 0UL}, /* scrubbing off */ 107 { 0x00, 0UL}, /* scrubbing off */
104 }; 108 };
105 109
106 /* 110 /*
107 * Memory scrubber control interface. For K8, memory scrubbing is handled by 111 * Memory scrubber control interface. For K8, memory scrubbing is handled by
108 * hardware and can involve L2 cache, dcache as well as the main memory. With 112 * hardware and can involve L2 cache, dcache as well as the main memory. With
109 * F10, this is extended to L3 cache scrubbing on CPU models sporting that 113 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
110 * functionality. 114 * functionality.
111 * 115 *
112 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks 116 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
113 * (dram) over to cache lines. This is nasty, so we will use bandwidth in 117 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
114 * bytes/sec for the setting. 118 * bytes/sec for the setting.
115 * 119 *
116 * Currently, we only do dram scrubbing. If the scrubbing is done in software on 120 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
117 * other archs, we might not have access to the caches directly. 121 * other archs, we might not have access to the caches directly.
118 */ 122 */
119 123
120 /* 124 /*
121 * scan the scrub rate mapping table for a close or matching bandwidth value to 125 * scan the scrub rate mapping table for a close or matching bandwidth value to
122 * issue. If requested is too big, then use last maximum value found. 126 * issue. If requested is too big, then use last maximum value found.
123 */ 127 */
124 static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) 128 static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
125 { 129 {
126 u32 scrubval; 130 u32 scrubval;
127 int i; 131 int i;
128 132
129 /* 133 /*
130 * map the configured rate (new_bw) to a value specific to the AMD64 134 * map the configured rate (new_bw) to a value specific to the AMD64
131 * memory controller and apply to register. Search for the first 135 * memory controller and apply to register. Search for the first
132 * bandwidth entry that is greater or equal than the setting requested 136 * bandwidth entry that is greater or equal than the setting requested
133 * and program that. If at last entry, turn off DRAM scrubbing. 137 * and program that. If at last entry, turn off DRAM scrubbing.
134 */ 138 */
135 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { 139 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
136 /* 140 /*
137 * skip scrub rates which aren't recommended 141 * skip scrub rates which aren't recommended
138 * (see F10 BKDG, F3x58) 142 * (see F10 BKDG, F3x58)
139 */ 143 */
140 if (scrubrates[i].scrubval < min_rate) 144 if (scrubrates[i].scrubval < min_rate)
141 continue; 145 continue;
142 146
143 if (scrubrates[i].bandwidth <= new_bw) 147 if (scrubrates[i].bandwidth <= new_bw)
144 break; 148 break;
145 149
146 /* 150 /*
147 * if no suitable bandwidth found, turn off DRAM scrubbing 151 * if no suitable bandwidth found, turn off DRAM scrubbing
148 * entirely by falling back to the last element in the 152 * entirely by falling back to the last element in the
149 * scrubrates array. 153 * scrubrates array.
150 */ 154 */
151 } 155 }
152 156
153 scrubval = scrubrates[i].scrubval; 157 scrubval = scrubrates[i].scrubval;
154 if (scrubval)
155 amd64_info("Setting scrub rate bandwidth: %u\n",
156 scrubrates[i].bandwidth);
157 else
158 amd64_info("Turning scrubbing off.\n");
159 158
160 pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F); 159 pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
161 160
161 if (scrubval)
162 return scrubrates[i].bandwidth;
163
162 return 0; 164 return 0;
163 } 165 }
164 166
165 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw) 167 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
166 { 168 {
167 struct amd64_pvt *pvt = mci->pvt_info; 169 struct amd64_pvt *pvt = mci->pvt_info;
168 170
169 return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate); 171 return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate);
170 } 172 }
171 173
172 static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) 174 static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
173 { 175 {
174 struct amd64_pvt *pvt = mci->pvt_info; 176 struct amd64_pvt *pvt = mci->pvt_info;
175 u32 scrubval = 0; 177 u32 scrubval = 0;
176 int status = -1, i; 178 int i, retval = -EINVAL;
177 179
178 amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval); 180 amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval);
179 181
180 scrubval = scrubval & 0x001F; 182 scrubval = scrubval & 0x001F;
181 183
182 amd64_debug("pci-read, sdram scrub control value: %d\n", scrubval); 184 amd64_debug("pci-read, sdram scrub control value: %d\n", scrubval);
183 185
184 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { 186 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
185 if (scrubrates[i].scrubval == scrubval) { 187 if (scrubrates[i].scrubval == scrubval) {
186 *bw = scrubrates[i].bandwidth; 188 retval = scrubrates[i].bandwidth;
187 status = 0;
188 break; 189 break;
189 } 190 }
190 } 191 }
191 192 return retval;
192 return status;
193 } 193 }
194 194
195 /* Map from a CSROW entry to the mask entry that operates on it */ 195 /* Map from a CSROW entry to the mask entry that operates on it */
196 static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) 196 static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
197 { 197 {
198 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) 198 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
199 return csrow; 199 return csrow;
200 else 200 else
201 return csrow >> 1; 201 return csrow >> 1;
202 } 202 }
203 203
204 /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ 204 /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
205 static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow) 205 static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
206 { 206 {
207 if (dct == 0) 207 if (dct == 0)
208 return pvt->dcsb0[csrow]; 208 return pvt->dcsb0[csrow];
209 else 209 else
210 return pvt->dcsb1[csrow]; 210 return pvt->dcsb1[csrow];
211 } 211 }
212 212
213 /* 213 /*
214 * Return the 'mask' address the i'th CS entry. This function is needed because 214 * Return the 'mask' address the i'th CS entry. This function is needed because
215 * there number of DCSM registers on Rev E and prior vs Rev F and later is 215 * there number of DCSM registers on Rev E and prior vs Rev F and later is
216 * different. 216 * different.
217 */ 217 */
218 static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow) 218 static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
219 { 219 {
220 if (dct == 0) 220 if (dct == 0)
221 return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)]; 221 return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
222 else 222 else
223 return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)]; 223 return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
224 } 224 }
225 225
226 226
227 /* 227 /*
228 * In *base and *limit, pass back the full 40-bit base and limit physical 228 * In *base and *limit, pass back the full 40-bit base and limit physical
229 * addresses for the node given by node_id. This information is obtained from 229 * addresses for the node given by node_id. This information is obtained from
230 * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The 230 * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
231 * base and limit addresses are of type SysAddr, as defined at the start of 231 * base and limit addresses are of type SysAddr, as defined at the start of
232 * section 3.4.4 (p. 70). They are the lowest and highest physical addresses 232 * section 3.4.4 (p. 70). They are the lowest and highest physical addresses
233 * in the address range they represent. 233 * in the address range they represent.
234 */ 234 */
235 static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id, 235 static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
236 u64 *base, u64 *limit) 236 u64 *base, u64 *limit)
237 { 237 {
238 *base = pvt->dram_base[node_id]; 238 *base = pvt->dram_base[node_id];
239 *limit = pvt->dram_limit[node_id]; 239 *limit = pvt->dram_limit[node_id];
240 } 240 }
241 241
242 /* 242 /*
243 * Return 1 if the SysAddr given by sys_addr matches the base/limit associated 243 * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
244 * with node_id 244 * with node_id
245 */ 245 */
246 static int amd64_base_limit_match(struct amd64_pvt *pvt, 246 static int amd64_base_limit_match(struct amd64_pvt *pvt,
247 u64 sys_addr, int node_id) 247 u64 sys_addr, int node_id)
248 { 248 {
249 u64 base, limit, addr; 249 u64 base, limit, addr;
250 250
251 amd64_get_base_and_limit(pvt, node_id, &base, &limit); 251 amd64_get_base_and_limit(pvt, node_id, &base, &limit);
252 252
253 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be 253 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
254 * all ones if the most significant implemented address bit is 1. 254 * all ones if the most significant implemented address bit is 1.
255 * Here we discard bits 63-40. See section 3.4.2 of AMD publication 255 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
256 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1 256 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
257 * Application Programming. 257 * Application Programming.
258 */ 258 */
259 addr = sys_addr & 0x000000ffffffffffull; 259 addr = sys_addr & 0x000000ffffffffffull;
260 260
261 return (addr >= base) && (addr <= limit); 261 return (addr >= base) && (addr <= limit);
262 } 262 }
263 263
264 /* 264 /*
265 * Attempt to map a SysAddr to a node. On success, return a pointer to the 265 * Attempt to map a SysAddr to a node. On success, return a pointer to the
266 * mem_ctl_info structure for the node that the SysAddr maps to. 266 * mem_ctl_info structure for the node that the SysAddr maps to.
267 * 267 *
268 * On failure, return NULL. 268 * On failure, return NULL.
269 */ 269 */
270 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, 270 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
271 u64 sys_addr) 271 u64 sys_addr)
272 { 272 {
273 struct amd64_pvt *pvt; 273 struct amd64_pvt *pvt;
274 int node_id; 274 int node_id;
275 u32 intlv_en, bits; 275 u32 intlv_en, bits;
276 276
277 /* 277 /*
278 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section 278 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
279 * 3.4.4.2) registers to map the SysAddr to a node ID. 279 * 3.4.4.2) registers to map the SysAddr to a node ID.
280 */ 280 */
281 pvt = mci->pvt_info; 281 pvt = mci->pvt_info;
282 282
283 /* 283 /*
284 * The value of this field should be the same for all DRAM Base 284 * The value of this field should be the same for all DRAM Base
285 * registers. Therefore we arbitrarily choose to read it from the 285 * registers. Therefore we arbitrarily choose to read it from the
286 * register for node 0. 286 * register for node 0.
287 */ 287 */
288 intlv_en = pvt->dram_IntlvEn[0]; 288 intlv_en = pvt->dram_IntlvEn[0];
289 289
290 if (intlv_en == 0) { 290 if (intlv_en == 0) {
291 for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) { 291 for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
292 if (amd64_base_limit_match(pvt, sys_addr, node_id)) 292 if (amd64_base_limit_match(pvt, sys_addr, node_id))
293 goto found; 293 goto found;
294 } 294 }
295 goto err_no_match; 295 goto err_no_match;
296 } 296 }
297 297
298 if (unlikely((intlv_en != 0x01) && 298 if (unlikely((intlv_en != 0x01) &&
299 (intlv_en != 0x03) && 299 (intlv_en != 0x03) &&
300 (intlv_en != 0x07))) { 300 (intlv_en != 0x07))) {
301 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en); 301 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
302 return NULL; 302 return NULL;
303 } 303 }
304 304
305 bits = (((u32) sys_addr) >> 12) & intlv_en; 305 bits = (((u32) sys_addr) >> 12) & intlv_en;
306 306
307 for (node_id = 0; ; ) { 307 for (node_id = 0; ; ) {
308 if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits) 308 if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
309 break; /* intlv_sel field matches */ 309 break; /* intlv_sel field matches */
310 310
311 if (++node_id >= DRAM_REG_COUNT) 311 if (++node_id >= DRAM_REG_COUNT)
312 goto err_no_match; 312 goto err_no_match;
313 } 313 }
314 314
315 /* sanity test for sys_addr */ 315 /* sanity test for sys_addr */
316 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { 316 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
317 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address" 317 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
318 "range for node %d with node interleaving enabled.\n", 318 "range for node %d with node interleaving enabled.\n",
319 __func__, sys_addr, node_id); 319 __func__, sys_addr, node_id);
320 return NULL; 320 return NULL;
321 } 321 }
322 322
323 found: 323 found:
324 return edac_mc_find(node_id); 324 return edac_mc_find(node_id);
325 325
326 err_no_match: 326 err_no_match:
327 debugf2("sys_addr 0x%lx doesn't match any node\n", 327 debugf2("sys_addr 0x%lx doesn't match any node\n",
328 (unsigned long)sys_addr); 328 (unsigned long)sys_addr);
329 329
330 return NULL; 330 return NULL;
331 } 331 }
332 332
333 /* 333 /*
334 * Extract the DRAM CS base address from selected csrow register. 334 * Extract the DRAM CS base address from selected csrow register.
335 */ 335 */
336 static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow) 336 static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
337 { 337 {
338 return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) << 338 return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
339 pvt->dcs_shift; 339 pvt->dcs_shift;
340 } 340 }
341 341
342 /* 342 /*
343 * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way. 343 * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
344 */ 344 */
345 static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow) 345 static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
346 { 346 {
347 u64 dcsm_bits, other_bits; 347 u64 dcsm_bits, other_bits;
348 u64 mask; 348 u64 mask;
349 349
350 /* Extract bits from DRAM CS Mask. */ 350 /* Extract bits from DRAM CS Mask. */
351 dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask; 351 dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
352 352
353 other_bits = pvt->dcsm_mask; 353 other_bits = pvt->dcsm_mask;
354 other_bits = ~(other_bits << pvt->dcs_shift); 354 other_bits = ~(other_bits << pvt->dcs_shift);
355 355
356 /* 356 /*
357 * The extracted bits from DCSM belong in the spaces represented by 357 * The extracted bits from DCSM belong in the spaces represented by
358 * the cleared bits in other_bits. 358 * the cleared bits in other_bits.
359 */ 359 */
360 mask = (dcsm_bits << pvt->dcs_shift) | other_bits; 360 mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
361 361
362 return mask; 362 return mask;
363 } 363 }
364 364
365 /* 365 /*
366 * @input_addr is an InputAddr associated with the node given by mci. Return the 366 * @input_addr is an InputAddr associated with the node given by mci. Return the
367 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr). 367 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
368 */ 368 */
369 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) 369 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
370 { 370 {
371 struct amd64_pvt *pvt; 371 struct amd64_pvt *pvt;
372 int csrow; 372 int csrow;
373 u64 base, mask; 373 u64 base, mask;
374 374
375 pvt = mci->pvt_info; 375 pvt = mci->pvt_info;
376 376
377 /* 377 /*
378 * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS 378 * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
379 * base/mask register pair, test the condition shown near the start of 379 * base/mask register pair, test the condition shown near the start of
380 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). 380 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
381 */ 381 */
382 for (csrow = 0; csrow < pvt->cs_count; csrow++) { 382 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
383 383
384 /* This DRAM chip select is disabled on this node */ 384 /* This DRAM chip select is disabled on this node */
385 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) 385 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
386 continue; 386 continue;
387 387
388 base = base_from_dct_base(pvt, csrow); 388 base = base_from_dct_base(pvt, csrow);
389 mask = ~mask_from_dct_mask(pvt, csrow); 389 mask = ~mask_from_dct_mask(pvt, csrow);
390 390
391 if ((input_addr & mask) == (base & mask)) { 391 if ((input_addr & mask) == (base & mask)) {
392 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", 392 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
393 (unsigned long)input_addr, csrow, 393 (unsigned long)input_addr, csrow,
394 pvt->mc_node_id); 394 pvt->mc_node_id);
395 395
396 return csrow; 396 return csrow;
397 } 397 }
398 } 398 }
399 399
400 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", 400 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
401 (unsigned long)input_addr, pvt->mc_node_id); 401 (unsigned long)input_addr, pvt->mc_node_id);
402 402
403 return -1; 403 return -1;
404 } 404 }
405 405
406 /* 406 /*
407 * Return the base value defined by the DRAM Base register for the node 407 * Return the base value defined by the DRAM Base register for the node
408 * represented by mci. This function returns the full 40-bit value despite the 408 * represented by mci. This function returns the full 40-bit value despite the
409 * fact that the register only stores bits 39-24 of the value. See section 409 * fact that the register only stores bits 39-24 of the value. See section
410 * 3.4.4.1 (BKDG #26094, K8, revA-E) 410 * 3.4.4.1 (BKDG #26094, K8, revA-E)
411 */ 411 */
412 static inline u64 get_dram_base(struct mem_ctl_info *mci) 412 static inline u64 get_dram_base(struct mem_ctl_info *mci)
413 { 413 {
414 struct amd64_pvt *pvt = mci->pvt_info; 414 struct amd64_pvt *pvt = mci->pvt_info;
415 415
416 return pvt->dram_base[pvt->mc_node_id]; 416 return pvt->dram_base[pvt->mc_node_id];
417 } 417 }
418 418
419 /* 419 /*
420 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094) 420 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
421 * for the node represented by mci. Info is passed back in *hole_base, 421 * for the node represented by mci. Info is passed back in *hole_base,
422 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if 422 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
423 * info is invalid. Info may be invalid for either of the following reasons: 423 * info is invalid. Info may be invalid for either of the following reasons:
424 * 424 *
425 * - The revision of the node is not E or greater. In this case, the DRAM Hole 425 * - The revision of the node is not E or greater. In this case, the DRAM Hole
426 * Address Register does not exist. 426 * Address Register does not exist.
427 * 427 *
428 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register, 428 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
429 * indicating that its contents are not valid. 429 * indicating that its contents are not valid.
430 * 430 *
431 * The values passed back in *hole_base, *hole_offset, and *hole_size are 431 * The values passed back in *hole_base, *hole_offset, and *hole_size are
432 * complete 32-bit values despite the fact that the bitfields in the DHAR 432 * complete 32-bit values despite the fact that the bitfields in the DHAR
433 * only represent bits 31-24 of the base and offset values. 433 * only represent bits 31-24 of the base and offset values.
434 */ 434 */
435 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, 435 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
436 u64 *hole_offset, u64 *hole_size) 436 u64 *hole_offset, u64 *hole_size)
437 { 437 {
438 struct amd64_pvt *pvt = mci->pvt_info; 438 struct amd64_pvt *pvt = mci->pvt_info;
439 u64 base; 439 u64 base;
440 440
441 /* only revE and later have the DRAM Hole Address Register */ 441 /* only revE and later have the DRAM Hole Address Register */
442 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) { 442 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
443 debugf1(" revision %d for node %d does not support DHAR\n", 443 debugf1(" revision %d for node %d does not support DHAR\n",
444 pvt->ext_model, pvt->mc_node_id); 444 pvt->ext_model, pvt->mc_node_id);
445 return 1; 445 return 1;
446 } 446 }
447 447
448 /* only valid for Fam10h */ 448 /* only valid for Fam10h */
449 if (boot_cpu_data.x86 == 0x10 && 449 if (boot_cpu_data.x86 == 0x10 &&
450 (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) { 450 (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
451 debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); 451 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
452 return 1; 452 return 1;
453 } 453 }
454 454
455 if ((pvt->dhar & DHAR_VALID) == 0) { 455 if ((pvt->dhar & DHAR_VALID) == 0) {
456 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", 456 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
457 pvt->mc_node_id); 457 pvt->mc_node_id);
458 return 1; 458 return 1;
459 } 459 }
460 460
461 /* This node has Memory Hoisting */ 461 /* This node has Memory Hoisting */
462 462
463 /* +------------------+--------------------+--------------------+----- 463 /* +------------------+--------------------+--------------------+-----
464 * | memory | DRAM hole | relocated | 464 * | memory | DRAM hole | relocated |
465 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from | 465 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
466 * | | | DRAM hole | 466 * | | | DRAM hole |
467 * | | | [0x100000000, | 467 * | | | [0x100000000, |
468 * | | | (0x100000000+ | 468 * | | | (0x100000000+ |
469 * | | | (0xffffffff-x))] | 469 * | | | (0xffffffff-x))] |
470 * +------------------+--------------------+--------------------+----- 470 * +------------------+--------------------+--------------------+-----
471 * 471 *
472 * Above is a diagram of physical memory showing the DRAM hole and the 472 * Above is a diagram of physical memory showing the DRAM hole and the
473 * relocated addresses from the DRAM hole. As shown, the DRAM hole 473 * relocated addresses from the DRAM hole. As shown, the DRAM hole
474 * starts at address x (the base address) and extends through address 474 * starts at address x (the base address) and extends through address
475 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the 475 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
476 * addresses in the hole so that they start at 0x100000000. 476 * addresses in the hole so that they start at 0x100000000.
477 */ 477 */
478 478
479 base = dhar_base(pvt->dhar); 479 base = dhar_base(pvt->dhar);
480 480
481 *hole_base = base; 481 *hole_base = base;
482 *hole_size = (0x1ull << 32) - base; 482 *hole_size = (0x1ull << 32) - base;
483 483
484 if (boot_cpu_data.x86 > 0xf) 484 if (boot_cpu_data.x86 > 0xf)
485 *hole_offset = f10_dhar_offset(pvt->dhar); 485 *hole_offset = f10_dhar_offset(pvt->dhar);
486 else 486 else
487 *hole_offset = k8_dhar_offset(pvt->dhar); 487 *hole_offset = k8_dhar_offset(pvt->dhar);
488 488
489 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", 489 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
490 pvt->mc_node_id, (unsigned long)*hole_base, 490 pvt->mc_node_id, (unsigned long)*hole_base,
491 (unsigned long)*hole_offset, (unsigned long)*hole_size); 491 (unsigned long)*hole_offset, (unsigned long)*hole_size);
492 492
493 return 0; 493 return 0;
494 } 494 }
495 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info); 495 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
496 496
497 /* 497 /*
498 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is 498 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
499 * assumed that sys_addr maps to the node given by mci. 499 * assumed that sys_addr maps to the node given by mci.
500 * 500 *
501 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section 501 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
502 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a 502 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
503 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled, 503 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
504 * then it is also involved in translating a SysAddr to a DramAddr. Sections 504 * then it is also involved in translating a SysAddr to a DramAddr. Sections
505 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting. 505 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
506 * These parts of the documentation are unclear. I interpret them as follows: 506 * These parts of the documentation are unclear. I interpret them as follows:
507 * 507 *
508 * When node n receives a SysAddr, it processes the SysAddr as follows: 508 * When node n receives a SysAddr, it processes the SysAddr as follows:
509 * 509 *
510 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM 510 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
511 * Limit registers for node n. If the SysAddr is not within the range 511 * Limit registers for node n. If the SysAddr is not within the range
512 * specified by the base and limit values, then node n ignores the Sysaddr 512 * specified by the base and limit values, then node n ignores the Sysaddr
513 * (since it does not map to node n). Otherwise continue to step 2 below. 513 * (since it does not map to node n). Otherwise continue to step 2 below.
514 * 514 *
515 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is 515 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
516 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within 516 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
517 * the range of relocated addresses (starting at 0x100000000) from the DRAM 517 * the range of relocated addresses (starting at 0x100000000) from the DRAM
518 * hole. If not, skip to step 3 below. Else get the value of the 518 * hole. If not, skip to step 3 below. Else get the value of the
519 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the 519 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
520 * offset defined by this value from the SysAddr. 520 * offset defined by this value from the SysAddr.
521 * 521 *
522 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM 522 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
523 * Base register for node n. To obtain the DramAddr, subtract the base 523 * Base register for node n. To obtain the DramAddr, subtract the base
524 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70). 524 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
525 */ 525 */
526 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) 526 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
527 { 527 {
528 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr; 528 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
529 int ret = 0; 529 int ret = 0;
530 530
531 dram_base = get_dram_base(mci); 531 dram_base = get_dram_base(mci);
532 532
533 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, 533 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
534 &hole_size); 534 &hole_size);
535 if (!ret) { 535 if (!ret) {
536 if ((sys_addr >= (1ull << 32)) && 536 if ((sys_addr >= (1ull << 32)) &&
537 (sys_addr < ((1ull << 32) + hole_size))) { 537 (sys_addr < ((1ull << 32) + hole_size))) {
538 /* use DHAR to translate SysAddr to DramAddr */ 538 /* use DHAR to translate SysAddr to DramAddr */
539 dram_addr = sys_addr - hole_offset; 539 dram_addr = sys_addr - hole_offset;
540 540
541 debugf2("using DHAR to translate SysAddr 0x%lx to " 541 debugf2("using DHAR to translate SysAddr 0x%lx to "
542 "DramAddr 0x%lx\n", 542 "DramAddr 0x%lx\n",
543 (unsigned long)sys_addr, 543 (unsigned long)sys_addr,
544 (unsigned long)dram_addr); 544 (unsigned long)dram_addr);
545 545
546 return dram_addr; 546 return dram_addr;
547 } 547 }
548 } 548 }
549 549
550 /* 550 /*
551 * Translate the SysAddr to a DramAddr as shown near the start of 551 * Translate the SysAddr to a DramAddr as shown near the start of
552 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8 552 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
553 * only deals with 40-bit values. Therefore we discard bits 63-40 of 553 * only deals with 40-bit values. Therefore we discard bits 63-40 of
554 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we 554 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
555 * discard are all 1s. Otherwise the bits we discard are all 0s. See 555 * discard are all 1s. Otherwise the bits we discard are all 0s. See
556 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture 556 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
557 * Programmer's Manual Volume 1 Application Programming. 557 * Programmer's Manual Volume 1 Application Programming.
558 */ 558 */
559 dram_addr = (sys_addr & 0xffffffffffull) - dram_base; 559 dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
560 560
561 debugf2("using DRAM Base register to translate SysAddr 0x%lx to " 561 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
562 "DramAddr 0x%lx\n", (unsigned long)sys_addr, 562 "DramAddr 0x%lx\n", (unsigned long)sys_addr,
563 (unsigned long)dram_addr); 563 (unsigned long)dram_addr);
564 return dram_addr; 564 return dram_addr;
565 } 565 }
566 566
567 /* 567 /*
568 * @intlv_en is the value of the IntlvEn field from a DRAM Base register 568 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
569 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used 569 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
570 * for node interleaving. 570 * for node interleaving.
571 */ 571 */
572 static int num_node_interleave_bits(unsigned intlv_en) 572 static int num_node_interleave_bits(unsigned intlv_en)
573 { 573 {
574 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 }; 574 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
575 int n; 575 int n;
576 576
577 BUG_ON(intlv_en > 7); 577 BUG_ON(intlv_en > 7);
578 n = intlv_shift_table[intlv_en]; 578 n = intlv_shift_table[intlv_en];
579 return n; 579 return n;
580 } 580 }
581 581
582 /* Translate the DramAddr given by @dram_addr to an InputAddr. */ 582 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
583 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr) 583 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
584 { 584 {
585 struct amd64_pvt *pvt; 585 struct amd64_pvt *pvt;
586 int intlv_shift; 586 int intlv_shift;
587 u64 input_addr; 587 u64 input_addr;
588 588
589 pvt = mci->pvt_info; 589 pvt = mci->pvt_info;
590 590
591 /* 591 /*
592 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) 592 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
593 * concerning translating a DramAddr to an InputAddr. 593 * concerning translating a DramAddr to an InputAddr.
594 */ 594 */
595 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); 595 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
596 input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) + 596 input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
597 (dram_addr & 0xfff); 597 (dram_addr & 0xfff);
598 598
599 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", 599 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
600 intlv_shift, (unsigned long)dram_addr, 600 intlv_shift, (unsigned long)dram_addr,
601 (unsigned long)input_addr); 601 (unsigned long)input_addr);
602 602
603 return input_addr; 603 return input_addr;
604 } 604 }
605 605
606 /* 606 /*
607 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is 607 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
608 * assumed that @sys_addr maps to the node given by mci. 608 * assumed that @sys_addr maps to the node given by mci.
609 */ 609 */
610 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr) 610 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
611 { 611 {
612 u64 input_addr; 612 u64 input_addr;
613 613
614 input_addr = 614 input_addr =
615 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr)); 615 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
616 616
617 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n", 617 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
618 (unsigned long)sys_addr, (unsigned long)input_addr); 618 (unsigned long)sys_addr, (unsigned long)input_addr);
619 619
620 return input_addr; 620 return input_addr;
621 } 621 }
622 622
623 623
624 /* 624 /*
625 * @input_addr is an InputAddr associated with the node represented by mci. 625 * @input_addr is an InputAddr associated with the node represented by mci.
626 * Translate @input_addr to a DramAddr and return the result. 626 * Translate @input_addr to a DramAddr and return the result.
627 */ 627 */
628 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) 628 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
629 { 629 {
630 struct amd64_pvt *pvt; 630 struct amd64_pvt *pvt;
631 int node_id, intlv_shift; 631 int node_id, intlv_shift;
632 u64 bits, dram_addr; 632 u64 bits, dram_addr;
633 u32 intlv_sel; 633 u32 intlv_sel;
634 634
635 /* 635 /*
636 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) 636 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
637 * shows how to translate a DramAddr to an InputAddr. Here we reverse 637 * shows how to translate a DramAddr to an InputAddr. Here we reverse
638 * this procedure. When translating from a DramAddr to an InputAddr, the 638 * this procedure. When translating from a DramAddr to an InputAddr, the
639 * bits used for node interleaving are discarded. Here we recover these 639 * bits used for node interleaving are discarded. Here we recover these
640 * bits from the IntlvSel field of the DRAM Limit register (section 640 * bits from the IntlvSel field of the DRAM Limit register (section
641 * 3.4.4.2) for the node that input_addr is associated with. 641 * 3.4.4.2) for the node that input_addr is associated with.
642 */ 642 */
643 pvt = mci->pvt_info; 643 pvt = mci->pvt_info;
644 node_id = pvt->mc_node_id; 644 node_id = pvt->mc_node_id;
645 BUG_ON((node_id < 0) || (node_id > 7)); 645 BUG_ON((node_id < 0) || (node_id > 7));
646 646
647 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); 647 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
648 648
649 if (intlv_shift == 0) { 649 if (intlv_shift == 0) {
650 debugf1(" InputAddr 0x%lx translates to DramAddr of " 650 debugf1(" InputAddr 0x%lx translates to DramAddr of "
651 "same value\n", (unsigned long)input_addr); 651 "same value\n", (unsigned long)input_addr);
652 652
653 return input_addr; 653 return input_addr;
654 } 654 }
655 655
656 bits = ((input_addr & 0xffffff000ull) << intlv_shift) + 656 bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
657 (input_addr & 0xfff); 657 (input_addr & 0xfff);
658 658
659 intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1); 659 intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
660 dram_addr = bits + (intlv_sel << 12); 660 dram_addr = bits + (intlv_sel << 12);
661 661
662 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx " 662 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
663 "(%d node interleave bits)\n", (unsigned long)input_addr, 663 "(%d node interleave bits)\n", (unsigned long)input_addr,
664 (unsigned long)dram_addr, intlv_shift); 664 (unsigned long)dram_addr, intlv_shift);
665 665
666 return dram_addr; 666 return dram_addr;
667 } 667 }
668 668
669 /* 669 /*
670 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert 670 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
671 * @dram_addr to a SysAddr. 671 * @dram_addr to a SysAddr.
672 */ 672 */
673 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) 673 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
674 { 674 {
675 struct amd64_pvt *pvt = mci->pvt_info; 675 struct amd64_pvt *pvt = mci->pvt_info;
676 u64 hole_base, hole_offset, hole_size, base, limit, sys_addr; 676 u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
677 int ret = 0; 677 int ret = 0;
678 678
679 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, 679 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
680 &hole_size); 680 &hole_size);
681 if (!ret) { 681 if (!ret) {
682 if ((dram_addr >= hole_base) && 682 if ((dram_addr >= hole_base) &&
683 (dram_addr < (hole_base + hole_size))) { 683 (dram_addr < (hole_base + hole_size))) {
684 sys_addr = dram_addr + hole_offset; 684 sys_addr = dram_addr + hole_offset;
685 685
686 debugf1("using DHAR to translate DramAddr 0x%lx to " 686 debugf1("using DHAR to translate DramAddr 0x%lx to "
687 "SysAddr 0x%lx\n", (unsigned long)dram_addr, 687 "SysAddr 0x%lx\n", (unsigned long)dram_addr,
688 (unsigned long)sys_addr); 688 (unsigned long)sys_addr);
689 689
690 return sys_addr; 690 return sys_addr;
691 } 691 }
692 } 692 }
693 693
694 amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit); 694 amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
695 sys_addr = dram_addr + base; 695 sys_addr = dram_addr + base;
696 696
697 /* 697 /*
698 * The sys_addr we have computed up to this point is a 40-bit value 698 * The sys_addr we have computed up to this point is a 40-bit value
699 * because the k8 deals with 40-bit values. However, the value we are 699 * because the k8 deals with 40-bit values. However, the value we are
700 * supposed to return is a full 64-bit physical address. The AMD 700 * supposed to return is a full 64-bit physical address. The AMD
701 * x86-64 architecture specifies that the most significant implemented 701 * x86-64 architecture specifies that the most significant implemented
702 * address bit through bit 63 of a physical address must be either all 702 * address bit through bit 63 of a physical address must be either all
703 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a 703 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
704 * 64-bit value below. See section 3.4.2 of AMD publication 24592: 704 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
705 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application 705 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
706 * Programming. 706 * Programming.
707 */ 707 */
708 sys_addr |= ~((sys_addr & (1ull << 39)) - 1); 708 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
709 709
710 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n", 710 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
711 pvt->mc_node_id, (unsigned long)dram_addr, 711 pvt->mc_node_id, (unsigned long)dram_addr,
712 (unsigned long)sys_addr); 712 (unsigned long)sys_addr);
713 713
714 return sys_addr; 714 return sys_addr;
715 } 715 }
716 716
717 /* 717 /*
718 * @input_addr is an InputAddr associated with the node given by mci. Translate 718 * @input_addr is an InputAddr associated with the node given by mci. Translate
719 * @input_addr to a SysAddr. 719 * @input_addr to a SysAddr.
720 */ 720 */
721 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci, 721 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
722 u64 input_addr) 722 u64 input_addr)
723 { 723 {
724 return dram_addr_to_sys_addr(mci, 724 return dram_addr_to_sys_addr(mci,
725 input_addr_to_dram_addr(mci, input_addr)); 725 input_addr_to_dram_addr(mci, input_addr));
726 } 726 }
727 727
728 /* 728 /*
729 * Find the minimum and maximum InputAddr values that map to the given @csrow. 729 * Find the minimum and maximum InputAddr values that map to the given @csrow.
730 * Pass back these values in *input_addr_min and *input_addr_max. 730 * Pass back these values in *input_addr_min and *input_addr_max.
731 */ 731 */
732 static void find_csrow_limits(struct mem_ctl_info *mci, int csrow, 732 static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
733 u64 *input_addr_min, u64 *input_addr_max) 733 u64 *input_addr_min, u64 *input_addr_max)
734 { 734 {
735 struct amd64_pvt *pvt; 735 struct amd64_pvt *pvt;
736 u64 base, mask; 736 u64 base, mask;
737 737
738 pvt = mci->pvt_info; 738 pvt = mci->pvt_info;
739 BUG_ON((csrow < 0) || (csrow >= pvt->cs_count)); 739 BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
740 740
741 base = base_from_dct_base(pvt, csrow); 741 base = base_from_dct_base(pvt, csrow);
742 mask = mask_from_dct_mask(pvt, csrow); 742 mask = mask_from_dct_mask(pvt, csrow);
743 743
744 *input_addr_min = base & ~mask; 744 *input_addr_min = base & ~mask;
745 *input_addr_max = base | mask | pvt->dcs_mask_notused; 745 *input_addr_max = base | mask | pvt->dcs_mask_notused;
746 } 746 }
747 747
748 /* Map the Error address to a PAGE and PAGE OFFSET. */ 748 /* Map the Error address to a PAGE and PAGE OFFSET. */
749 static inline void error_address_to_page_and_offset(u64 error_address, 749 static inline void error_address_to_page_and_offset(u64 error_address,
750 u32 *page, u32 *offset) 750 u32 *page, u32 *offset)
751 { 751 {
752 *page = (u32) (error_address >> PAGE_SHIFT); 752 *page = (u32) (error_address >> PAGE_SHIFT);
753 *offset = ((u32) error_address) & ~PAGE_MASK; 753 *offset = ((u32) error_address) & ~PAGE_MASK;
754 } 754 }
755 755
756 /* 756 /*
757 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address 757 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
758 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers 758 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
759 * of a node that detected an ECC memory error. mci represents the node that 759 * of a node that detected an ECC memory error. mci represents the node that
760 * the error address maps to (possibly different from the node that detected 760 * the error address maps to (possibly different from the node that detected
761 * the error). Return the number of the csrow that sys_addr maps to, or -1 on 761 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
762 * error. 762 * error.
763 */ 763 */
764 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr) 764 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
765 { 765 {
766 int csrow; 766 int csrow;
767 767
768 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr)); 768 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
769 769
770 if (csrow == -1) 770 if (csrow == -1)
771 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for " 771 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
772 "address 0x%lx\n", (unsigned long)sys_addr); 772 "address 0x%lx\n", (unsigned long)sys_addr);
773 return csrow; 773 return csrow;
774 } 774 }
775 775
776 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); 776 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
777 777
778 static u16 extract_syndrome(struct err_regs *err) 778 static u16 extract_syndrome(struct err_regs *err)
779 { 779 {
780 return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00); 780 return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00);
781 } 781 }
782 782
783 /* 783 /*
784 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs 784 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
785 * are ECC capable. 785 * are ECC capable.
786 */ 786 */
787 static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) 787 static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
788 { 788 {
789 int bit; 789 int bit;
790 enum dev_type edac_cap = EDAC_FLAG_NONE; 790 enum dev_type edac_cap = EDAC_FLAG_NONE;
791 791
792 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) 792 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
793 ? 19 793 ? 19
794 : 17; 794 : 17;
795 795
796 if (pvt->dclr0 & BIT(bit)) 796 if (pvt->dclr0 & BIT(bit))
797 edac_cap = EDAC_FLAG_SECDED; 797 edac_cap = EDAC_FLAG_SECDED;
798 798
799 return edac_cap; 799 return edac_cap;
800 } 800 }
801 801
802 802
803 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt); 803 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
804 804
805 static void amd64_dump_dramcfg_low(u32 dclr, int chan) 805 static void amd64_dump_dramcfg_low(u32 dclr, int chan)
806 { 806 {
807 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); 807 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
808 808
809 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n", 809 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
810 (dclr & BIT(16)) ? "un" : "", 810 (dclr & BIT(16)) ? "un" : "",
811 (dclr & BIT(19)) ? "yes" : "no"); 811 (dclr & BIT(19)) ? "yes" : "no");
812 812
813 debugf1(" PAR/ERR parity: %s\n", 813 debugf1(" PAR/ERR parity: %s\n",
814 (dclr & BIT(8)) ? "enabled" : "disabled"); 814 (dclr & BIT(8)) ? "enabled" : "disabled");
815 815
816 debugf1(" DCT 128bit mode width: %s\n", 816 debugf1(" DCT 128bit mode width: %s\n",
817 (dclr & BIT(11)) ? "128b" : "64b"); 817 (dclr & BIT(11)) ? "128b" : "64b");
818 818
819 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", 819 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
820 (dclr & BIT(12)) ? "yes" : "no", 820 (dclr & BIT(12)) ? "yes" : "no",
821 (dclr & BIT(13)) ? "yes" : "no", 821 (dclr & BIT(13)) ? "yes" : "no",
822 (dclr & BIT(14)) ? "yes" : "no", 822 (dclr & BIT(14)) ? "yes" : "no",
823 (dclr & BIT(15)) ? "yes" : "no"); 823 (dclr & BIT(15)) ? "yes" : "no");
824 } 824 }
825 825
826 /* Display and decode various NB registers for debug purposes. */ 826 /* Display and decode various NB registers for debug purposes. */
827 static void amd64_dump_misc_regs(struct amd64_pvt *pvt) 827 static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
828 { 828 {
829 int ganged; 829 int ganged;
830 830
831 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); 831 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
832 832
833 debugf1(" NB two channel DRAM capable: %s\n", 833 debugf1(" NB two channel DRAM capable: %s\n",
834 (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no"); 834 (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
835 835
836 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", 836 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
837 (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no", 837 (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
838 (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no"); 838 (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
839 839
840 amd64_dump_dramcfg_low(pvt->dclr0, 0); 840 amd64_dump_dramcfg_low(pvt->dclr0, 0);
841 841
842 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); 842 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
843 843
844 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " 844 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
845 "offset: 0x%08x\n", 845 "offset: 0x%08x\n",
846 pvt->dhar, 846 pvt->dhar,
847 dhar_base(pvt->dhar), 847 dhar_base(pvt->dhar),
848 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar) 848 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar)
849 : f10_dhar_offset(pvt->dhar)); 849 : f10_dhar_offset(pvt->dhar));
850 850
851 debugf1(" DramHoleValid: %s\n", 851 debugf1(" DramHoleValid: %s\n",
852 (pvt->dhar & DHAR_VALID) ? "yes" : "no"); 852 (pvt->dhar & DHAR_VALID) ? "yes" : "no");
853 853
854 /* everything below this point is Fam10h and above */ 854 /* everything below this point is Fam10h and above */
855 if (boot_cpu_data.x86 == 0xf) { 855 if (boot_cpu_data.x86 == 0xf) {
856 amd64_debug_display_dimm_sizes(0, pvt); 856 amd64_debug_display_dimm_sizes(0, pvt);
857 return; 857 return;
858 } 858 }
859 859
860 amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4")); 860 amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
861 861
862 /* Only if NOT ganged does dclr1 have valid info */ 862 /* Only if NOT ganged does dclr1 have valid info */
863 if (!dct_ganging_enabled(pvt)) 863 if (!dct_ganging_enabled(pvt))
864 amd64_dump_dramcfg_low(pvt->dclr1, 1); 864 amd64_dump_dramcfg_low(pvt->dclr1, 1);
865 865
866 /* 866 /*
867 * Determine if ganged and then dump memory sizes for first controller, 867 * Determine if ganged and then dump memory sizes for first controller,
868 * and if NOT ganged dump info for 2nd controller. 868 * and if NOT ganged dump info for 2nd controller.
869 */ 869 */
870 ganged = dct_ganging_enabled(pvt); 870 ganged = dct_ganging_enabled(pvt);
871 871
872 amd64_debug_display_dimm_sizes(0, pvt); 872 amd64_debug_display_dimm_sizes(0, pvt);
873 873
874 if (!ganged) 874 if (!ganged)
875 amd64_debug_display_dimm_sizes(1, pvt); 875 amd64_debug_display_dimm_sizes(1, pvt);
876 } 876 }
877 877
878 /* Read in both of DBAM registers */ 878 /* Read in both of DBAM registers */
879 static void amd64_read_dbam_reg(struct amd64_pvt *pvt) 879 static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
880 { 880 {
881 amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0); 881 amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0);
882 882
883 if (boot_cpu_data.x86 >= 0x10) 883 if (boot_cpu_data.x86 >= 0x10)
884 amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1); 884 amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1);
885 } 885 }
886 886
887 /* 887 /*
888 * NOTE: CPU Revision Dependent code: Rev E and Rev F 888 * NOTE: CPU Revision Dependent code: Rev E and Rev F
889 * 889 *
890 * Set the DCSB and DCSM mask values depending on the CPU revision value. Also 890 * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
891 * set the shift factor for the DCSB and DCSM values. 891 * set the shift factor for the DCSB and DCSM values.
892 * 892 *
893 * ->dcs_mask_notused, RevE: 893 * ->dcs_mask_notused, RevE:
894 * 894 *
895 * To find the max InputAddr for the csrow, start with the base address and set 895 * To find the max InputAddr for the csrow, start with the base address and set
896 * all bits that are "don't care" bits in the test at the start of section 896 * all bits that are "don't care" bits in the test at the start of section
897 * 3.5.4 (p. 84). 897 * 3.5.4 (p. 84).
898 * 898 *
899 * The "don't care" bits are all set bits in the mask and all bits in the gaps 899 * The "don't care" bits are all set bits in the mask and all bits in the gaps
900 * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS 900 * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
901 * represents bits [24:20] and [12:0], which are all bits in the above-mentioned 901 * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
902 * gaps. 902 * gaps.
903 * 903 *
904 * ->dcs_mask_notused, RevF and later: 904 * ->dcs_mask_notused, RevF and later:
905 * 905 *
906 * To find the max InputAddr for the csrow, start with the base address and set 906 * To find the max InputAddr for the csrow, start with the base address and set
907 * all bits that are "don't care" bits in the test at the start of NPT section 907 * all bits that are "don't care" bits in the test at the start of NPT section
908 * 4.5.4 (p. 87). 908 * 4.5.4 (p. 87).
909 * 909 *
910 * The "don't care" bits are all set bits in the mask and all bits in the gaps 910 * The "don't care" bits are all set bits in the mask and all bits in the gaps
911 * between bit ranges [36:27] and [21:13]. 911 * between bit ranges [36:27] and [21:13].
912 * 912 *
913 * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0], 913 * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
914 * which are all bits in the above-mentioned gaps. 914 * which are all bits in the above-mentioned gaps.
915 */ 915 */
916 static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) 916 static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
917 { 917 {
918 918
919 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { 919 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
920 pvt->dcsb_base = REV_E_DCSB_BASE_BITS; 920 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
921 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; 921 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
922 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; 922 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
923 pvt->dcs_shift = REV_E_DCS_SHIFT; 923 pvt->dcs_shift = REV_E_DCS_SHIFT;
924 pvt->cs_count = 8; 924 pvt->cs_count = 8;
925 pvt->num_dcsm = 8; 925 pvt->num_dcsm = 8;
926 } else { 926 } else {
927 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; 927 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
928 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; 928 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
929 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; 929 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
930 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; 930 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
931 pvt->cs_count = 8; 931 pvt->cs_count = 8;
932 pvt->num_dcsm = 4; 932 pvt->num_dcsm = 4;
933 } 933 }
934 } 934 }
935 935
936 /* 936 /*
937 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers 937 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
938 */ 938 */
939 static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) 939 static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
940 { 940 {
941 int cs, reg; 941 int cs, reg;
942 942
943 amd64_set_dct_base_and_mask(pvt); 943 amd64_set_dct_base_and_mask(pvt);
944 944
945 for (cs = 0; cs < pvt->cs_count; cs++) { 945 for (cs = 0; cs < pvt->cs_count; cs++) {
946 reg = K8_DCSB0 + (cs * 4); 946 reg = K8_DCSB0 + (cs * 4);
947 if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs])) 947 if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs]))
948 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", 948 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
949 cs, pvt->dcsb0[cs], reg); 949 cs, pvt->dcsb0[cs], reg);
950 950
951 /* If DCT are NOT ganged, then read in DCT1's base */ 951 /* If DCT are NOT ganged, then read in DCT1's base */
952 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { 952 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
953 reg = F10_DCSB1 + (cs * 4); 953 reg = F10_DCSB1 + (cs * 4);
954 if (!amd64_read_pci_cfg(pvt->F2, reg, 954 if (!amd64_read_pci_cfg(pvt->F2, reg,
955 &pvt->dcsb1[cs])) 955 &pvt->dcsb1[cs]))
956 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", 956 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
957 cs, pvt->dcsb1[cs], reg); 957 cs, pvt->dcsb1[cs], reg);
958 } else { 958 } else {
959 pvt->dcsb1[cs] = 0; 959 pvt->dcsb1[cs] = 0;
960 } 960 }
961 } 961 }
962 962
963 for (cs = 0; cs < pvt->num_dcsm; cs++) { 963 for (cs = 0; cs < pvt->num_dcsm; cs++) {
964 reg = K8_DCSM0 + (cs * 4); 964 reg = K8_DCSM0 + (cs * 4);
965 if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs])) 965 if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs]))
966 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", 966 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
967 cs, pvt->dcsm0[cs], reg); 967 cs, pvt->dcsm0[cs], reg);
968 968
969 /* If DCT are NOT ganged, then read in DCT1's mask */ 969 /* If DCT are NOT ganged, then read in DCT1's mask */
970 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { 970 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
971 reg = F10_DCSM1 + (cs * 4); 971 reg = F10_DCSM1 + (cs * 4);
972 if (!amd64_read_pci_cfg(pvt->F2, reg, 972 if (!amd64_read_pci_cfg(pvt->F2, reg,
973 &pvt->dcsm1[cs])) 973 &pvt->dcsm1[cs]))
974 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", 974 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
975 cs, pvt->dcsm1[cs], reg); 975 cs, pvt->dcsm1[cs], reg);
976 } else { 976 } else {
977 pvt->dcsm1[cs] = 0; 977 pvt->dcsm1[cs] = 0;
978 } 978 }
979 } 979 }
980 } 980 }
981 981
982 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs) 982 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
983 { 983 {
984 enum mem_type type; 984 enum mem_type type;
985 985
986 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) { 986 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
987 if (pvt->dchr0 & DDR3_MODE) 987 if (pvt->dchr0 & DDR3_MODE)
988 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; 988 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
989 else 989 else
990 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2; 990 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
991 } else { 991 } else {
992 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; 992 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
993 } 993 }
994 994
995 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]); 995 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
996 996
997 return type; 997 return type;
998 } 998 }
999 999
1000 /* 1000 /*
1001 * Read the DRAM Configuration Low register. It differs between CG, D & E revs 1001 * Read the DRAM Configuration Low register. It differs between CG, D & E revs
1002 * and the later RevF memory controllers (DDR vs DDR2) 1002 * and the later RevF memory controllers (DDR vs DDR2)
1003 * 1003 *
1004 * Return: 1004 * Return:
1005 * number of memory channels in operation 1005 * number of memory channels in operation
1006 * Pass back: 1006 * Pass back:
1007 * contents of the DCL0_LOW register 1007 * contents of the DCL0_LOW register
1008 */ 1008 */
1009 static int k8_early_channel_count(struct amd64_pvt *pvt) 1009 static int k8_early_channel_count(struct amd64_pvt *pvt)
1010 { 1010 {
1011 int flag, err = 0; 1011 int flag, err = 0;
1012 1012
1013 err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0); 1013 err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
1014 if (err) 1014 if (err)
1015 return err; 1015 return err;
1016 1016
1017 if (pvt->ext_model >= K8_REV_F) 1017 if (pvt->ext_model >= K8_REV_F)
1018 /* RevF (NPT) and later */ 1018 /* RevF (NPT) and later */
1019 flag = pvt->dclr0 & F10_WIDTH_128; 1019 flag = pvt->dclr0 & F10_WIDTH_128;
1020 else 1020 else
1021 /* RevE and earlier */ 1021 /* RevE and earlier */
1022 flag = pvt->dclr0 & REVE_WIDTH_128; 1022 flag = pvt->dclr0 & REVE_WIDTH_128;
1023 1023
1024 /* not used */ 1024 /* not used */
1025 pvt->dclr1 = 0; 1025 pvt->dclr1 = 0;
1026 1026
1027 return (flag) ? 2 : 1; 1027 return (flag) ? 2 : 1;
1028 } 1028 }
1029 1029
1030 /* extract the ERROR ADDRESS for the K8 CPUs */ 1030 /* extract the ERROR ADDRESS for the K8 CPUs */
1031 static u64 k8_get_error_address(struct mem_ctl_info *mci, 1031 static u64 k8_get_error_address(struct mem_ctl_info *mci,
1032 struct err_regs *info) 1032 struct err_regs *info)
1033 { 1033 {
1034 return (((u64) (info->nbeah & 0xff)) << 32) + 1034 return (((u64) (info->nbeah & 0xff)) << 32) +
1035 (info->nbeal & ~0x03); 1035 (info->nbeal & ~0x03);
1036 } 1036 }
1037 1037
1038 /* 1038 /*
1039 * Read the Base and Limit registers for K8 based Memory controllers; extract 1039 * Read the Base and Limit registers for K8 based Memory controllers; extract
1040 * fields from the 'raw' reg into separate data fields 1040 * fields from the 'raw' reg into separate data fields
1041 * 1041 *
1042 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN 1042 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
1043 */ 1043 */
1044 static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) 1044 static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1045 { 1045 {
1046 u32 low; 1046 u32 low;
1047 u32 off = dram << 3; /* 8 bytes between DRAM entries */ 1047 u32 off = dram << 3; /* 8 bytes between DRAM entries */
1048 1048
1049 amd64_read_pci_cfg(pvt->F1, K8_DRAM_BASE_LOW + off, &low); 1049 amd64_read_pci_cfg(pvt->F1, K8_DRAM_BASE_LOW + off, &low);
1050 1050
1051 /* Extract parts into separate data entries */ 1051 /* Extract parts into separate data entries */
1052 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; 1052 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
1053 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; 1053 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1054 pvt->dram_rw_en[dram] = (low & 0x3); 1054 pvt->dram_rw_en[dram] = (low & 0x3);
1055 1055
1056 amd64_read_pci_cfg(pvt->F1, K8_DRAM_LIMIT_LOW + off, &low); 1056 amd64_read_pci_cfg(pvt->F1, K8_DRAM_LIMIT_LOW + off, &low);
1057 1057
1058 /* 1058 /*
1059 * Extract parts into separate data entries. Limit is the HIGHEST memory 1059 * Extract parts into separate data entries. Limit is the HIGHEST memory
1060 * location of the region, so lower 24 bits need to be all ones 1060 * location of the region, so lower 24 bits need to be all ones
1061 */ 1061 */
1062 pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF; 1062 pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
1063 pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7; 1063 pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
1064 pvt->dram_DstNode[dram] = (low & 0x7); 1064 pvt->dram_DstNode[dram] = (low & 0x7);
1065 } 1065 }
1066 1066
1067 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, 1067 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1068 struct err_regs *err_info, u64 sys_addr) 1068 struct err_regs *err_info, u64 sys_addr)
1069 { 1069 {
1070 struct mem_ctl_info *src_mci; 1070 struct mem_ctl_info *src_mci;
1071 int channel, csrow; 1071 int channel, csrow;
1072 u32 page, offset; 1072 u32 page, offset;
1073 u16 syndrome; 1073 u16 syndrome;
1074 1074
1075 syndrome = extract_syndrome(err_info); 1075 syndrome = extract_syndrome(err_info);
1076 1076
1077 /* CHIPKILL enabled */ 1077 /* CHIPKILL enabled */
1078 if (err_info->nbcfg & K8_NBCFG_CHIPKILL) { 1078 if (err_info->nbcfg & K8_NBCFG_CHIPKILL) {
1079 channel = get_channel_from_ecc_syndrome(mci, syndrome); 1079 channel = get_channel_from_ecc_syndrome(mci, syndrome);
1080 if (channel < 0) { 1080 if (channel < 0) {
1081 /* 1081 /*
1082 * Syndrome didn't map, so we don't know which of the 1082 * Syndrome didn't map, so we don't know which of the
1083 * 2 DIMMs is in error. So we need to ID 'both' of them 1083 * 2 DIMMs is in error. So we need to ID 'both' of them
1084 * as suspect. 1084 * as suspect.
1085 */ 1085 */
1086 amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible " 1086 amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
1087 "error reporting race\n", syndrome); 1087 "error reporting race\n", syndrome);
1088 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); 1088 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1089 return; 1089 return;
1090 } 1090 }
1091 } else { 1091 } else {
1092 /* 1092 /*
1093 * non-chipkill ecc mode 1093 * non-chipkill ecc mode
1094 * 1094 *
1095 * The k8 documentation is unclear about how to determine the 1095 * The k8 documentation is unclear about how to determine the
1096 * channel number when using non-chipkill memory. This method 1096 * channel number when using non-chipkill memory. This method
1097 * was obtained from email communication with someone at AMD. 1097 * was obtained from email communication with someone at AMD.
1098 * (Wish the email was placed in this comment - norsk) 1098 * (Wish the email was placed in this comment - norsk)
1099 */ 1099 */
1100 channel = ((sys_addr & BIT(3)) != 0); 1100 channel = ((sys_addr & BIT(3)) != 0);
1101 } 1101 }
1102 1102
1103 /* 1103 /*
1104 * Find out which node the error address belongs to. This may be 1104 * Find out which node the error address belongs to. This may be
1105 * different from the node that detected the error. 1105 * different from the node that detected the error.
1106 */ 1106 */
1107 src_mci = find_mc_by_sys_addr(mci, sys_addr); 1107 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1108 if (!src_mci) { 1108 if (!src_mci) {
1109 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n", 1109 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1110 (unsigned long)sys_addr); 1110 (unsigned long)sys_addr);
1111 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); 1111 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1112 return; 1112 return;
1113 } 1113 }
1114 1114
1115 /* Now map the sys_addr to a CSROW */ 1115 /* Now map the sys_addr to a CSROW */
1116 csrow = sys_addr_to_csrow(src_mci, sys_addr); 1116 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1117 if (csrow < 0) { 1117 if (csrow < 0) {
1118 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR); 1118 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1119 } else { 1119 } else {
1120 error_address_to_page_and_offset(sys_addr, &page, &offset); 1120 error_address_to_page_and_offset(sys_addr, &page, &offset);
1121 1121
1122 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow, 1122 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1123 channel, EDAC_MOD_STR); 1123 channel, EDAC_MOD_STR);
1124 } 1124 }
1125 } 1125 }
1126 1126
1127 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) 1127 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1128 { 1128 {
1129 int *dbam_map; 1129 int *dbam_map;
1130 1130
1131 if (pvt->ext_model >= K8_REV_F) 1131 if (pvt->ext_model >= K8_REV_F)
1132 dbam_map = ddr2_dbam; 1132 dbam_map = ddr2_dbam;
1133 else if (pvt->ext_model >= K8_REV_D) 1133 else if (pvt->ext_model >= K8_REV_D)
1134 dbam_map = ddr2_dbam_revD; 1134 dbam_map = ddr2_dbam_revD;
1135 else 1135 else
1136 dbam_map = ddr2_dbam_revCG; 1136 dbam_map = ddr2_dbam_revCG;
1137 1137
1138 return dbam_map[cs_mode]; 1138 return dbam_map[cs_mode];
1139 } 1139 }
1140 1140
1141 /* 1141 /*
1142 * Get the number of DCT channels in use. 1142 * Get the number of DCT channels in use.
1143 * 1143 *
1144 * Return: 1144 * Return:
1145 * number of Memory Channels in operation 1145 * number of Memory Channels in operation
1146 * Pass back: 1146 * Pass back:
1147 * contents of the DCL0_LOW register 1147 * contents of the DCL0_LOW register
1148 */ 1148 */
1149 static int f10_early_channel_count(struct amd64_pvt *pvt) 1149 static int f10_early_channel_count(struct amd64_pvt *pvt)
1150 { 1150 {
1151 int dbams[] = { DBAM0, DBAM1 }; 1151 int dbams[] = { DBAM0, DBAM1 };
1152 int i, j, channels = 0; 1152 int i, j, channels = 0;
1153 u32 dbam; 1153 u32 dbam;
1154 1154
1155 /* If we are in 128 bit mode, then we are using 2 channels */ 1155 /* If we are in 128 bit mode, then we are using 2 channels */
1156 if (pvt->dclr0 & F10_WIDTH_128) { 1156 if (pvt->dclr0 & F10_WIDTH_128) {
1157 channels = 2; 1157 channels = 2;
1158 return channels; 1158 return channels;
1159 } 1159 }
1160 1160
1161 /* 1161 /*
1162 * Need to check if in unganged mode: In such, there are 2 channels, 1162 * Need to check if in unganged mode: In such, there are 2 channels,
1163 * but they are not in 128 bit mode and thus the above 'dclr0' status 1163 * but they are not in 128 bit mode and thus the above 'dclr0' status
1164 * bit will be OFF. 1164 * bit will be OFF.
1165 * 1165 *
1166 * Need to check DCT0[0] and DCT1[0] to see if only one of them has 1166 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1167 * their CSEnable bit on. If so, then SINGLE DIMM case. 1167 * their CSEnable bit on. If so, then SINGLE DIMM case.
1168 */ 1168 */
1169 debugf0("Data width is not 128 bits - need more decoding\n"); 1169 debugf0("Data width is not 128 bits - need more decoding\n");
1170 1170
1171 /* 1171 /*
1172 * Check DRAM Bank Address Mapping values for each DIMM to see if there 1172 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1173 * is more than just one DIMM present in unganged mode. Need to check 1173 * is more than just one DIMM present in unganged mode. Need to check
1174 * both controllers since DIMMs can be placed in either one. 1174 * both controllers since DIMMs can be placed in either one.
1175 */ 1175 */
1176 for (i = 0; i < ARRAY_SIZE(dbams); i++) { 1176 for (i = 0; i < ARRAY_SIZE(dbams); i++) {
1177 if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam)) 1177 if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam))
1178 goto err_reg; 1178 goto err_reg;
1179 1179
1180 for (j = 0; j < 4; j++) { 1180 for (j = 0; j < 4; j++) {
1181 if (DBAM_DIMM(j, dbam) > 0) { 1181 if (DBAM_DIMM(j, dbam) > 0) {
1182 channels++; 1182 channels++;
1183 break; 1183 break;
1184 } 1184 }
1185 } 1185 }
1186 } 1186 }
1187 1187
1188 if (channels > 2) 1188 if (channels > 2)
1189 channels = 2; 1189 channels = 2;
1190 1190
1191 amd64_info("MCT channel count: %d\n", channels); 1191 amd64_info("MCT channel count: %d\n", channels);
1192 1192
1193 return channels; 1193 return channels;
1194 1194
1195 err_reg: 1195 err_reg:
1196 return -1; 1196 return -1;
1197 1197
1198 } 1198 }
1199 1199
1200 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) 1200 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1201 { 1201 {
1202 int *dbam_map; 1202 int *dbam_map;
1203 1203
1204 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) 1204 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1205 dbam_map = ddr3_dbam; 1205 dbam_map = ddr3_dbam;
1206 else 1206 else
1207 dbam_map = ddr2_dbam; 1207 dbam_map = ddr2_dbam;
1208 1208
1209 return dbam_map[cs_mode]; 1209 return dbam_map[cs_mode];
1210 } 1210 }
1211 1211
1212 static u64 f10_get_error_address(struct mem_ctl_info *mci, 1212 static u64 f10_get_error_address(struct mem_ctl_info *mci,
1213 struct err_regs *info) 1213 struct err_regs *info)
1214 { 1214 {
1215 return (((u64) (info->nbeah & 0xffff)) << 32) + 1215 return (((u64) (info->nbeah & 0xffff)) << 32) +
1216 (info->nbeal & ~0x01); 1216 (info->nbeal & ~0x01);
1217 } 1217 }
1218 1218
1219 /* 1219 /*
1220 * Read the Base and Limit registers for F10 based Memory controllers. Extract 1220 * Read the Base and Limit registers for F10 based Memory controllers. Extract
1221 * fields from the 'raw' reg into separate data fields. 1221 * fields from the 'raw' reg into separate data fields.
1222 * 1222 *
1223 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN. 1223 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
1224 */ 1224 */
1225 static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) 1225 static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1226 { 1226 {
1227 u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit; 1227 u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
1228 1228
1229 low_offset = K8_DRAM_BASE_LOW + (dram << 3); 1229 low_offset = K8_DRAM_BASE_LOW + (dram << 3);
1230 high_offset = F10_DRAM_BASE_HIGH + (dram << 3); 1230 high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
1231 1231
1232 /* read the 'raw' DRAM BASE Address register */ 1232 /* read the 'raw' DRAM BASE Address register */
1233 amd64_read_pci_cfg(pvt->F1, low_offset, &low_base); 1233 amd64_read_pci_cfg(pvt->F1, low_offset, &low_base);
1234 amd64_read_pci_cfg(pvt->F1, high_offset, &high_base); 1234 amd64_read_pci_cfg(pvt->F1, high_offset, &high_base);
1235 1235
1236 /* Extract parts into separate data entries */ 1236 /* Extract parts into separate data entries */
1237 pvt->dram_rw_en[dram] = (low_base & 0x3); 1237 pvt->dram_rw_en[dram] = (low_base & 0x3);
1238 1238
1239 if (pvt->dram_rw_en[dram] == 0) 1239 if (pvt->dram_rw_en[dram] == 0)
1240 return; 1240 return;
1241 1241
1242 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; 1242 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
1243 1243
1244 pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) | 1244 pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
1245 (((u64)low_base & 0xFFFF0000) << 8); 1245 (((u64)low_base & 0xFFFF0000) << 8);
1246 1246
1247 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); 1247 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
1248 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); 1248 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
1249 1249
1250 /* read the 'raw' LIMIT registers */ 1250 /* read the 'raw' LIMIT registers */
1251 amd64_read_pci_cfg(pvt->F1, low_offset, &low_limit); 1251 amd64_read_pci_cfg(pvt->F1, low_offset, &low_limit);
1252 amd64_read_pci_cfg(pvt->F1, high_offset, &high_limit); 1252 amd64_read_pci_cfg(pvt->F1, high_offset, &high_limit);
1253 1253
1254 pvt->dram_DstNode[dram] = (low_limit & 0x7); 1254 pvt->dram_DstNode[dram] = (low_limit & 0x7);
1255 pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7; 1255 pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
1256 1256
1257 /* 1257 /*
1258 * Extract address values and form a LIMIT address. Limit is the HIGHEST 1258 * Extract address values and form a LIMIT address. Limit is the HIGHEST
1259 * memory location of the region, so low 24 bits need to be all ones. 1259 * memory location of the region, so low 24 bits need to be all ones.
1260 */ 1260 */
1261 pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) | 1261 pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
1262 (((u64) low_limit & 0xFFFF0000) << 8) | 1262 (((u64) low_limit & 0xFFFF0000) << 8) |
1263 0x00FFFFFF; 1263 0x00FFFFFF;
1264 } 1264 }
1265 1265
1266 static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) 1266 static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1267 { 1267 {
1268 1268
1269 if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW, 1269 if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW,
1270 &pvt->dram_ctl_select_low)) { 1270 &pvt->dram_ctl_select_low)) {
1271 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, " 1271 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
1272 "High range addresses at: 0x%x\n", 1272 "High range addresses at: 0x%x\n",
1273 pvt->dram_ctl_select_low, 1273 pvt->dram_ctl_select_low,
1274 dct_sel_baseaddr(pvt)); 1274 dct_sel_baseaddr(pvt));
1275 1275
1276 debugf0(" DCT mode: %s, All DCTs on: %s\n", 1276 debugf0(" DCT mode: %s, All DCTs on: %s\n",
1277 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"), 1277 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
1278 (dct_dram_enabled(pvt) ? "yes" : "no")); 1278 (dct_dram_enabled(pvt) ? "yes" : "no"));
1279 1279
1280 if (!dct_ganging_enabled(pvt)) 1280 if (!dct_ganging_enabled(pvt))
1281 debugf0(" Address range split per DCT: %s\n", 1281 debugf0(" Address range split per DCT: %s\n",
1282 (dct_high_range_enabled(pvt) ? "yes" : "no")); 1282 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1283 1283
1284 debugf0(" DCT data interleave for ECC: %s, " 1284 debugf0(" DCT data interleave for ECC: %s, "
1285 "DRAM cleared since last warm reset: %s\n", 1285 "DRAM cleared since last warm reset: %s\n",
1286 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), 1286 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1287 (dct_memory_cleared(pvt) ? "yes" : "no")); 1287 (dct_memory_cleared(pvt) ? "yes" : "no"));
1288 1288
1289 debugf0(" DCT channel interleave: %s, " 1289 debugf0(" DCT channel interleave: %s, "
1290 "DCT interleave bits selector: 0x%x\n", 1290 "DCT interleave bits selector: 0x%x\n",
1291 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), 1291 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1292 dct_sel_interleave_addr(pvt)); 1292 dct_sel_interleave_addr(pvt));
1293 } 1293 }
1294 1294
1295 amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH, 1295 amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH,
1296 &pvt->dram_ctl_select_high); 1296 &pvt->dram_ctl_select_high);
1297 } 1297 }
1298 1298
1299 /* 1299 /*
1300 * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory 1300 * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1301 * Interleaving Modes. 1301 * Interleaving Modes.
1302 */ 1302 */
1303 static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, 1303 static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1304 int hi_range_sel, u32 intlv_en) 1304 int hi_range_sel, u32 intlv_en)
1305 { 1305 {
1306 u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1; 1306 u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
1307 1307
1308 if (dct_ganging_enabled(pvt)) 1308 if (dct_ganging_enabled(pvt))
1309 cs = 0; 1309 cs = 0;
1310 else if (hi_range_sel) 1310 else if (hi_range_sel)
1311 cs = dct_sel_high; 1311 cs = dct_sel_high;
1312 else if (dct_interleave_enabled(pvt)) { 1312 else if (dct_interleave_enabled(pvt)) {
1313 /* 1313 /*
1314 * see F2x110[DctSelIntLvAddr] - channel interleave mode 1314 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1315 */ 1315 */
1316 if (dct_sel_interleave_addr(pvt) == 0) 1316 if (dct_sel_interleave_addr(pvt) == 0)
1317 cs = sys_addr >> 6 & 1; 1317 cs = sys_addr >> 6 & 1;
1318 else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) { 1318 else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
1319 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2; 1319 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1320 1320
1321 if (dct_sel_interleave_addr(pvt) & 1) 1321 if (dct_sel_interleave_addr(pvt) & 1)
1322 cs = (sys_addr >> 9 & 1) ^ temp; 1322 cs = (sys_addr >> 9 & 1) ^ temp;
1323 else 1323 else
1324 cs = (sys_addr >> 6 & 1) ^ temp; 1324 cs = (sys_addr >> 6 & 1) ^ temp;
1325 } else if (intlv_en & 4) 1325 } else if (intlv_en & 4)
1326 cs = sys_addr >> 15 & 1; 1326 cs = sys_addr >> 15 & 1;
1327 else if (intlv_en & 2) 1327 else if (intlv_en & 2)
1328 cs = sys_addr >> 14 & 1; 1328 cs = sys_addr >> 14 & 1;
1329 else if (intlv_en & 1) 1329 else if (intlv_en & 1)
1330 cs = sys_addr >> 13 & 1; 1330 cs = sys_addr >> 13 & 1;
1331 else 1331 else
1332 cs = sys_addr >> 12 & 1; 1332 cs = sys_addr >> 12 & 1;
1333 } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt)) 1333 } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
1334 cs = ~dct_sel_high & 1; 1334 cs = ~dct_sel_high & 1;
1335 else 1335 else
1336 cs = 0; 1336 cs = 0;
1337 1337
1338 return cs; 1338 return cs;
1339 } 1339 }
1340 1340
1341 static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en) 1341 static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
1342 { 1342 {
1343 if (intlv_en == 1) 1343 if (intlv_en == 1)
1344 return 1; 1344 return 1;
1345 else if (intlv_en == 3) 1345 else if (intlv_en == 3)
1346 return 2; 1346 return 2;
1347 else if (intlv_en == 7) 1347 else if (intlv_en == 7)
1348 return 3; 1348 return 3;
1349 1349
1350 return 0; 1350 return 0;
1351 } 1351 }
1352 1352
1353 /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */ 1353 /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
1354 static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel, 1354 static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
1355 u32 dct_sel_base_addr, 1355 u32 dct_sel_base_addr,
1356 u64 dct_sel_base_off, 1356 u64 dct_sel_base_off,
1357 u32 hole_valid, u32 hole_off, 1357 u32 hole_valid, u32 hole_off,
1358 u64 dram_base) 1358 u64 dram_base)
1359 { 1359 {
1360 u64 chan_off; 1360 u64 chan_off;
1361 1361
1362 if (hi_range_sel) { 1362 if (hi_range_sel) {
1363 if (!(dct_sel_base_addr & 0xFFFF0000) && 1363 if (!(dct_sel_base_addr & 0xFFFF0000) &&
1364 hole_valid && (sys_addr >= 0x100000000ULL)) 1364 hole_valid && (sys_addr >= 0x100000000ULL))
1365 chan_off = hole_off << 16; 1365 chan_off = hole_off << 16;
1366 else 1366 else
1367 chan_off = dct_sel_base_off; 1367 chan_off = dct_sel_base_off;
1368 } else { 1368 } else {
1369 if (hole_valid && (sys_addr >= 0x100000000ULL)) 1369 if (hole_valid && (sys_addr >= 0x100000000ULL))
1370 chan_off = hole_off << 16; 1370 chan_off = hole_off << 16;
1371 else 1371 else
1372 chan_off = dram_base & 0xFFFFF8000000ULL; 1372 chan_off = dram_base & 0xFFFFF8000000ULL;
1373 } 1373 }
1374 1374
1375 return (sys_addr & 0x0000FFFFFFFFFFC0ULL) - 1375 return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
1376 (chan_off & 0x0000FFFFFF800000ULL); 1376 (chan_off & 0x0000FFFFFF800000ULL);
1377 } 1377 }
1378 1378
1379 /* Hack for the time being - Can we get this from BIOS?? */ 1379 /* Hack for the time being - Can we get this from BIOS?? */
1380 #define CH0SPARE_RANK 0 1380 #define CH0SPARE_RANK 0
1381 #define CH1SPARE_RANK 1 1381 #define CH1SPARE_RANK 1
1382 1382
1383 /* 1383 /*
1384 * checks if the csrow passed in is marked as SPARED, if so returns the new 1384 * checks if the csrow passed in is marked as SPARED, if so returns the new
1385 * spare row 1385 * spare row
1386 */ 1386 */
1387 static inline int f10_process_possible_spare(int csrow, 1387 static inline int f10_process_possible_spare(int csrow,
1388 u32 cs, struct amd64_pvt *pvt) 1388 u32 cs, struct amd64_pvt *pvt)
1389 { 1389 {
1390 u32 swap_done; 1390 u32 swap_done;
1391 u32 bad_dram_cs; 1391 u32 bad_dram_cs;
1392 1392
1393 /* Depending on channel, isolate respective SPARING info */ 1393 /* Depending on channel, isolate respective SPARING info */
1394 if (cs) { 1394 if (cs) {
1395 swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare); 1395 swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
1396 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare); 1396 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
1397 if (swap_done && (csrow == bad_dram_cs)) 1397 if (swap_done && (csrow == bad_dram_cs))
1398 csrow = CH1SPARE_RANK; 1398 csrow = CH1SPARE_RANK;
1399 } else { 1399 } else {
1400 swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare); 1400 swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
1401 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare); 1401 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
1402 if (swap_done && (csrow == bad_dram_cs)) 1402 if (swap_done && (csrow == bad_dram_cs))
1403 csrow = CH0SPARE_RANK; 1403 csrow = CH0SPARE_RANK;
1404 } 1404 }
1405 return csrow; 1405 return csrow;
1406 } 1406 }
1407 1407
1408 /* 1408 /*
1409 * Iterate over the DRAM DCT "base" and "mask" registers looking for a 1409 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1410 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID' 1410 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1411 * 1411 *
1412 * Return: 1412 * Return:
1413 * -EINVAL: NOT FOUND 1413 * -EINVAL: NOT FOUND
1414 * 0..csrow = Chip-Select Row 1414 * 0..csrow = Chip-Select Row
1415 */ 1415 */
1416 static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) 1416 static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1417 { 1417 {
1418 struct mem_ctl_info *mci; 1418 struct mem_ctl_info *mci;
1419 struct amd64_pvt *pvt; 1419 struct amd64_pvt *pvt;
1420 u32 cs_base, cs_mask; 1420 u32 cs_base, cs_mask;
1421 int cs_found = -EINVAL; 1421 int cs_found = -EINVAL;
1422 int csrow; 1422 int csrow;
1423 1423
1424 mci = mcis[nid]; 1424 mci = mcis[nid];
1425 if (!mci) 1425 if (!mci)
1426 return cs_found; 1426 return cs_found;
1427 1427
1428 pvt = mci->pvt_info; 1428 pvt = mci->pvt_info;
1429 1429
1430 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); 1430 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
1431 1431
1432 for (csrow = 0; csrow < pvt->cs_count; csrow++) { 1432 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
1433 1433
1434 cs_base = amd64_get_dct_base(pvt, cs, csrow); 1434 cs_base = amd64_get_dct_base(pvt, cs, csrow);
1435 if (!(cs_base & K8_DCSB_CS_ENABLE)) 1435 if (!(cs_base & K8_DCSB_CS_ENABLE))
1436 continue; 1436 continue;
1437 1437
1438 /* 1438 /*
1439 * We have an ENABLED CSROW, Isolate just the MASK bits of the 1439 * We have an ENABLED CSROW, Isolate just the MASK bits of the
1440 * target: [28:19] and [13:5], which map to [36:27] and [21:13] 1440 * target: [28:19] and [13:5], which map to [36:27] and [21:13]
1441 * of the actual address. 1441 * of the actual address.
1442 */ 1442 */
1443 cs_base &= REV_F_F1Xh_DCSB_BASE_BITS; 1443 cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
1444 1444
1445 /* 1445 /*
1446 * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and 1446 * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
1447 * [4:0] to become ON. Then mask off bits [28:0] ([36:8]) 1447 * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
1448 */ 1448 */
1449 cs_mask = amd64_get_dct_mask(pvt, cs, csrow); 1449 cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
1450 1450
1451 debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n", 1451 debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
1452 csrow, cs_base, cs_mask); 1452 csrow, cs_base, cs_mask);
1453 1453
1454 cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF; 1454 cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
1455 1455
1456 debugf1(" Final CSMask=0x%x\n", cs_mask); 1456 debugf1(" Final CSMask=0x%x\n", cs_mask);
1457 debugf1(" (InputAddr & ~CSMask)=0x%x " 1457 debugf1(" (InputAddr & ~CSMask)=0x%x "
1458 "(CSBase & ~CSMask)=0x%x\n", 1458 "(CSBase & ~CSMask)=0x%x\n",
1459 (in_addr & ~cs_mask), (cs_base & ~cs_mask)); 1459 (in_addr & ~cs_mask), (cs_base & ~cs_mask));
1460 1460
1461 if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) { 1461 if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
1462 cs_found = f10_process_possible_spare(csrow, cs, pvt); 1462 cs_found = f10_process_possible_spare(csrow, cs, pvt);
1463 1463
1464 debugf1(" MATCH csrow=%d\n", cs_found); 1464 debugf1(" MATCH csrow=%d\n", cs_found);
1465 break; 1465 break;
1466 } 1466 }
1467 } 1467 }
1468 return cs_found; 1468 return cs_found;
1469 } 1469 }
1470 1470
1471 /* For a given @dram_range, check if @sys_addr falls within it. */ 1471 /* For a given @dram_range, check if @sys_addr falls within it. */
1472 static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, 1472 static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
1473 u64 sys_addr, int *nid, int *chan_sel) 1473 u64 sys_addr, int *nid, int *chan_sel)
1474 { 1474 {
1475 int node_id, cs_found = -EINVAL, high_range = 0; 1475 int node_id, cs_found = -EINVAL, high_range = 0;
1476 u32 intlv_en, intlv_sel, intlv_shift, hole_off; 1476 u32 intlv_en, intlv_sel, intlv_shift, hole_off;
1477 u32 hole_valid, tmp, dct_sel_base, channel; 1477 u32 hole_valid, tmp, dct_sel_base, channel;
1478 u64 dram_base, chan_addr, dct_sel_base_off; 1478 u64 dram_base, chan_addr, dct_sel_base_off;
1479 1479
1480 dram_base = pvt->dram_base[dram_range]; 1480 dram_base = pvt->dram_base[dram_range];
1481 intlv_en = pvt->dram_IntlvEn[dram_range]; 1481 intlv_en = pvt->dram_IntlvEn[dram_range];
1482 1482
1483 node_id = pvt->dram_DstNode[dram_range]; 1483 node_id = pvt->dram_DstNode[dram_range];
1484 intlv_sel = pvt->dram_IntlvSel[dram_range]; 1484 intlv_sel = pvt->dram_IntlvSel[dram_range];
1485 1485
1486 debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n", 1486 debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
1487 dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]); 1487 dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
1488 1488
1489 /* 1489 /*
1490 * This assumes that one node's DHAR is the same as all the other 1490 * This assumes that one node's DHAR is the same as all the other
1491 * nodes' DHAR. 1491 * nodes' DHAR.
1492 */ 1492 */
1493 hole_off = (pvt->dhar & 0x0000FF80); 1493 hole_off = (pvt->dhar & 0x0000FF80);
1494 hole_valid = (pvt->dhar & 0x1); 1494 hole_valid = (pvt->dhar & 0x1);
1495 dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16; 1495 dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
1496 1496
1497 debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n", 1497 debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
1498 hole_off, hole_valid, intlv_sel); 1498 hole_off, hole_valid, intlv_sel);
1499 1499
1500 if (intlv_en && 1500 if (intlv_en &&
1501 (intlv_sel != ((sys_addr >> 12) & intlv_en))) 1501 (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1502 return -EINVAL; 1502 return -EINVAL;
1503 1503
1504 dct_sel_base = dct_sel_baseaddr(pvt); 1504 dct_sel_base = dct_sel_baseaddr(pvt);
1505 1505
1506 /* 1506 /*
1507 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to 1507 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1508 * select between DCT0 and DCT1. 1508 * select between DCT0 and DCT1.
1509 */ 1509 */
1510 if (dct_high_range_enabled(pvt) && 1510 if (dct_high_range_enabled(pvt) &&
1511 !dct_ganging_enabled(pvt) && 1511 !dct_ganging_enabled(pvt) &&
1512 ((sys_addr >> 27) >= (dct_sel_base >> 11))) 1512 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1513 high_range = 1; 1513 high_range = 1;
1514 1514
1515 channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en); 1515 channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
1516 1516
1517 chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base, 1517 chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
1518 dct_sel_base_off, hole_valid, 1518 dct_sel_base_off, hole_valid,
1519 hole_off, dram_base); 1519 hole_off, dram_base);
1520 1520
1521 intlv_shift = f10_map_intlv_en_to_shift(intlv_en); 1521 intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
1522 1522
1523 /* remove Node ID (in case of memory interleaving) */ 1523 /* remove Node ID (in case of memory interleaving) */
1524 tmp = chan_addr & 0xFC0; 1524 tmp = chan_addr & 0xFC0;
1525 1525
1526 chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp; 1526 chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
1527 1527
1528 /* remove channel interleave and hash */ 1528 /* remove channel interleave and hash */
1529 if (dct_interleave_enabled(pvt) && 1529 if (dct_interleave_enabled(pvt) &&
1530 !dct_high_range_enabled(pvt) && 1530 !dct_high_range_enabled(pvt) &&
1531 !dct_ganging_enabled(pvt)) { 1531 !dct_ganging_enabled(pvt)) {
1532 if (dct_sel_interleave_addr(pvt) != 1) 1532 if (dct_sel_interleave_addr(pvt) != 1)
1533 chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL; 1533 chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
1534 else { 1534 else {
1535 tmp = chan_addr & 0xFC0; 1535 tmp = chan_addr & 0xFC0;
1536 chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1) 1536 chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
1537 | tmp; 1537 | tmp;
1538 } 1538 }
1539 } 1539 }
1540 1540
1541 debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n", 1541 debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
1542 chan_addr, (u32)(chan_addr >> 8)); 1542 chan_addr, (u32)(chan_addr >> 8));
1543 1543
1544 cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel); 1544 cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
1545 1545
1546 if (cs_found >= 0) { 1546 if (cs_found >= 0) {
1547 *nid = node_id; 1547 *nid = node_id;
1548 *chan_sel = channel; 1548 *chan_sel = channel;
1549 } 1549 }
1550 return cs_found; 1550 return cs_found;
1551 } 1551 }
1552 1552
1553 static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, 1553 static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1554 int *node, int *chan_sel) 1554 int *node, int *chan_sel)
1555 { 1555 {
1556 int dram_range, cs_found = -EINVAL; 1556 int dram_range, cs_found = -EINVAL;
1557 u64 dram_base, dram_limit; 1557 u64 dram_base, dram_limit;
1558 1558
1559 for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) { 1559 for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
1560 1560
1561 if (!pvt->dram_rw_en[dram_range]) 1561 if (!pvt->dram_rw_en[dram_range])
1562 continue; 1562 continue;
1563 1563
1564 dram_base = pvt->dram_base[dram_range]; 1564 dram_base = pvt->dram_base[dram_range];
1565 dram_limit = pvt->dram_limit[dram_range]; 1565 dram_limit = pvt->dram_limit[dram_range];
1566 1566
1567 if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) { 1567 if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
1568 1568
1569 cs_found = f10_match_to_this_node(pvt, dram_range, 1569 cs_found = f10_match_to_this_node(pvt, dram_range,
1570 sys_addr, node, 1570 sys_addr, node,
1571 chan_sel); 1571 chan_sel);
1572 if (cs_found >= 0) 1572 if (cs_found >= 0)
1573 break; 1573 break;
1574 } 1574 }
1575 } 1575 }
1576 return cs_found; 1576 return cs_found;
1577 } 1577 }
1578 1578
1579 /* 1579 /*
1580 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps 1580 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1581 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW). 1581 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1582 * 1582 *
1583 * The @sys_addr is usually an error address received from the hardware 1583 * The @sys_addr is usually an error address received from the hardware
1584 * (MCX_ADDR). 1584 * (MCX_ADDR).
1585 */ 1585 */
1586 static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, 1586 static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1587 struct err_regs *err_info, 1587 struct err_regs *err_info,
1588 u64 sys_addr) 1588 u64 sys_addr)
1589 { 1589 {
1590 struct amd64_pvt *pvt = mci->pvt_info; 1590 struct amd64_pvt *pvt = mci->pvt_info;
1591 u32 page, offset; 1591 u32 page, offset;
1592 int nid, csrow, chan = 0; 1592 int nid, csrow, chan = 0;
1593 u16 syndrome; 1593 u16 syndrome;
1594 1594
1595 csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); 1595 csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1596 1596
1597 if (csrow < 0) { 1597 if (csrow < 0) {
1598 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); 1598 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1599 return; 1599 return;
1600 } 1600 }
1601 1601
1602 error_address_to_page_and_offset(sys_addr, &page, &offset); 1602 error_address_to_page_and_offset(sys_addr, &page, &offset);
1603 1603
1604 syndrome = extract_syndrome(err_info); 1604 syndrome = extract_syndrome(err_info);
1605 1605
1606 /* 1606 /*
1607 * We need the syndromes for channel detection only when we're 1607 * We need the syndromes for channel detection only when we're
1608 * ganged. Otherwise @chan should already contain the channel at 1608 * ganged. Otherwise @chan should already contain the channel at
1609 * this point. 1609 * this point.
1610 */ 1610 */
1611 if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL)) 1611 if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL))
1612 chan = get_channel_from_ecc_syndrome(mci, syndrome); 1612 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1613 1613
1614 if (chan >= 0) 1614 if (chan >= 0)
1615 edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan, 1615 edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
1616 EDAC_MOD_STR); 1616 EDAC_MOD_STR);
1617 else 1617 else
1618 /* 1618 /*
1619 * Channel unknown, report all channels on this CSROW as failed. 1619 * Channel unknown, report all channels on this CSROW as failed.
1620 */ 1620 */
1621 for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++) 1621 for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
1622 edac_mc_handle_ce(mci, page, offset, syndrome, 1622 edac_mc_handle_ce(mci, page, offset, syndrome,
1623 csrow, chan, EDAC_MOD_STR); 1623 csrow, chan, EDAC_MOD_STR);
1624 } 1624 }
1625 1625
1626 /* 1626 /*
1627 * debug routine to display the memory sizes of all logical DIMMs and its 1627 * debug routine to display the memory sizes of all logical DIMMs and its
1628 * CSROWs as well 1628 * CSROWs as well
1629 */ 1629 */
1630 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) 1630 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1631 { 1631 {
1632 int dimm, size0, size1, factor = 0; 1632 int dimm, size0, size1, factor = 0;
1633 u32 dbam; 1633 u32 dbam;
1634 u32 *dcsb; 1634 u32 *dcsb;
1635 1635
1636 if (boot_cpu_data.x86 == 0xf) { 1636 if (boot_cpu_data.x86 == 0xf) {
1637 if (pvt->dclr0 & F10_WIDTH_128) 1637 if (pvt->dclr0 & F10_WIDTH_128)
1638 factor = 1; 1638 factor = 1;
1639 1639
1640 /* K8 families < revF not supported yet */ 1640 /* K8 families < revF not supported yet */
1641 if (pvt->ext_model < K8_REV_F) 1641 if (pvt->ext_model < K8_REV_F)
1642 return; 1642 return;
1643 else 1643 else
1644 WARN_ON(ctrl != 0); 1644 WARN_ON(ctrl != 0);
1645 } 1645 }
1646 1646
1647 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", 1647 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1648 ctrl, ctrl ? pvt->dbam1 : pvt->dbam0); 1648 ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
1649 1649
1650 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; 1650 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1651 dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0; 1651 dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
1652 1652
1653 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); 1653 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1654 1654
1655 /* Dump memory sizes for DIMM and its CSROWs */ 1655 /* Dump memory sizes for DIMM and its CSROWs */
1656 for (dimm = 0; dimm < 4; dimm++) { 1656 for (dimm = 0; dimm < 4; dimm++) {
1657 1657
1658 size0 = 0; 1658 size0 = 0;
1659 if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE) 1659 if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
1660 size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); 1660 size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1661 1661
1662 size1 = 0; 1662 size1 = 0;
1663 if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE) 1663 if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
1664 size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); 1664 size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1665 1665
1666 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", 1666 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1667 dimm * 2, size0 << factor, 1667 dimm * 2, size0 << factor,
1668 dimm * 2 + 1, size1 << factor); 1668 dimm * 2 + 1, size1 << factor);
1669 } 1669 }
1670 } 1670 }
1671 1671
1672 static struct amd64_family_type amd64_family_types[] = { 1672 static struct amd64_family_type amd64_family_types[] = {
1673 [K8_CPUS] = { 1673 [K8_CPUS] = {
1674 .ctl_name = "K8", 1674 .ctl_name = "K8",
1675 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, 1675 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1676 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC, 1676 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1677 .ops = { 1677 .ops = {
1678 .early_channel_count = k8_early_channel_count, 1678 .early_channel_count = k8_early_channel_count,
1679 .get_error_address = k8_get_error_address, 1679 .get_error_address = k8_get_error_address,
1680 .read_dram_base_limit = k8_read_dram_base_limit, 1680 .read_dram_base_limit = k8_read_dram_base_limit,
1681 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, 1681 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1682 .dbam_to_cs = k8_dbam_to_chip_select, 1682 .dbam_to_cs = k8_dbam_to_chip_select,
1683 } 1683 }
1684 }, 1684 },
1685 [F10_CPUS] = { 1685 [F10_CPUS] = {
1686 .ctl_name = "F10h", 1686 .ctl_name = "F10h",
1687 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP, 1687 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1688 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC, 1688 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1689 .ops = { 1689 .ops = {
1690 .early_channel_count = f10_early_channel_count, 1690 .early_channel_count = f10_early_channel_count,
1691 .get_error_address = f10_get_error_address, 1691 .get_error_address = f10_get_error_address,
1692 .read_dram_base_limit = f10_read_dram_base_limit, 1692 .read_dram_base_limit = f10_read_dram_base_limit,
1693 .read_dram_ctl_register = f10_read_dram_ctl_register, 1693 .read_dram_ctl_register = f10_read_dram_ctl_register,
1694 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, 1694 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1695 .dbam_to_cs = f10_dbam_to_chip_select, 1695 .dbam_to_cs = f10_dbam_to_chip_select,
1696 } 1696 }
1697 }, 1697 },
1698 }; 1698 };
1699 1699
1700 static struct pci_dev *pci_get_related_function(unsigned int vendor, 1700 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1701 unsigned int device, 1701 unsigned int device,
1702 struct pci_dev *related) 1702 struct pci_dev *related)
1703 { 1703 {
1704 struct pci_dev *dev = NULL; 1704 struct pci_dev *dev = NULL;
1705 1705
1706 dev = pci_get_device(vendor, device, dev); 1706 dev = pci_get_device(vendor, device, dev);
1707 while (dev) { 1707 while (dev) {
1708 if ((dev->bus->number == related->bus->number) && 1708 if ((dev->bus->number == related->bus->number) &&
1709 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn))) 1709 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1710 break; 1710 break;
1711 dev = pci_get_device(vendor, device, dev); 1711 dev = pci_get_device(vendor, device, dev);
1712 } 1712 }
1713 1713
1714 return dev; 1714 return dev;
1715 } 1715 }
1716 1716
1717 /* 1717 /*
1718 * These are tables of eigenvectors (one per line) which can be used for the 1718 * These are tables of eigenvectors (one per line) which can be used for the
1719 * construction of the syndrome tables. The modified syndrome search algorithm 1719 * construction of the syndrome tables. The modified syndrome search algorithm
1720 * uses those to find the symbol in error and thus the DIMM. 1720 * uses those to find the symbol in error and thus the DIMM.
1721 * 1721 *
1722 * Algorithm courtesy of Ross LaFetra from AMD. 1722 * Algorithm courtesy of Ross LaFetra from AMD.
1723 */ 1723 */
1724 static u16 x4_vectors[] = { 1724 static u16 x4_vectors[] = {
1725 0x2f57, 0x1afe, 0x66cc, 0xdd88, 1725 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1726 0x11eb, 0x3396, 0x7f4c, 0xeac8, 1726 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1727 0x0001, 0x0002, 0x0004, 0x0008, 1727 0x0001, 0x0002, 0x0004, 0x0008,
1728 0x1013, 0x3032, 0x4044, 0x8088, 1728 0x1013, 0x3032, 0x4044, 0x8088,
1729 0x106b, 0x30d6, 0x70fc, 0xe0a8, 1729 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1730 0x4857, 0xc4fe, 0x13cc, 0x3288, 1730 0x4857, 0xc4fe, 0x13cc, 0x3288,
1731 0x1ac5, 0x2f4a, 0x5394, 0xa1e8, 1731 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1732 0x1f39, 0x251e, 0xbd6c, 0x6bd8, 1732 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1733 0x15c1, 0x2a42, 0x89ac, 0x4758, 1733 0x15c1, 0x2a42, 0x89ac, 0x4758,
1734 0x2b03, 0x1602, 0x4f0c, 0xca08, 1734 0x2b03, 0x1602, 0x4f0c, 0xca08,
1735 0x1f07, 0x3a0e, 0x6b04, 0xbd08, 1735 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1736 0x8ba7, 0x465e, 0x244c, 0x1cc8, 1736 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1737 0x2b87, 0x164e, 0x642c, 0xdc18, 1737 0x2b87, 0x164e, 0x642c, 0xdc18,
1738 0x40b9, 0x80de, 0x1094, 0x20e8, 1738 0x40b9, 0x80de, 0x1094, 0x20e8,
1739 0x27db, 0x1eb6, 0x9dac, 0x7b58, 1739 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1740 0x11c1, 0x2242, 0x84ac, 0x4c58, 1740 0x11c1, 0x2242, 0x84ac, 0x4c58,
1741 0x1be5, 0x2d7a, 0x5e34, 0xa718, 1741 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1742 0x4b39, 0x8d1e, 0x14b4, 0x28d8, 1742 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1743 0x4c97, 0xc87e, 0x11fc, 0x33a8, 1743 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1744 0x8e97, 0x497e, 0x2ffc, 0x1aa8, 1744 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1745 0x16b3, 0x3d62, 0x4f34, 0x8518, 1745 0x16b3, 0x3d62, 0x4f34, 0x8518,
1746 0x1e2f, 0x391a, 0x5cac, 0xf858, 1746 0x1e2f, 0x391a, 0x5cac, 0xf858,
1747 0x1d9f, 0x3b7a, 0x572c, 0xfe18, 1747 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1748 0x15f5, 0x2a5a, 0x5264, 0xa3b8, 1748 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1749 0x1dbb, 0x3b66, 0x715c, 0xe3f8, 1749 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1750 0x4397, 0xc27e, 0x17fc, 0x3ea8, 1750 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1751 0x1617, 0x3d3e, 0x6464, 0xb8b8, 1751 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1752 0x23ff, 0x12aa, 0xab6c, 0x56d8, 1752 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1753 0x2dfb, 0x1ba6, 0x913c, 0x7328, 1753 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1754 0x185d, 0x2ca6, 0x7914, 0x9e28, 1754 0x185d, 0x2ca6, 0x7914, 0x9e28,
1755 0x171b, 0x3e36, 0x7d7c, 0xebe8, 1755 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1756 0x4199, 0x82ee, 0x19f4, 0x2e58, 1756 0x4199, 0x82ee, 0x19f4, 0x2e58,
1757 0x4807, 0xc40e, 0x130c, 0x3208, 1757 0x4807, 0xc40e, 0x130c, 0x3208,
1758 0x1905, 0x2e0a, 0x5804, 0xac08, 1758 0x1905, 0x2e0a, 0x5804, 0xac08,
1759 0x213f, 0x132a, 0xadfc, 0x5ba8, 1759 0x213f, 0x132a, 0xadfc, 0x5ba8,
1760 0x19a9, 0x2efe, 0xb5cc, 0x6f88, 1760 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1761 }; 1761 };
1762 1762
1763 static u16 x8_vectors[] = { 1763 static u16 x8_vectors[] = {
1764 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480, 1764 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1765 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80, 1765 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1766 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80, 1766 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1767 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80, 1767 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1768 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780, 1768 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1769 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080, 1769 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1770 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080, 1770 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1771 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080, 1771 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1772 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80, 1772 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1773 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580, 1773 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1774 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880, 1774 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1775 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280, 1775 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1776 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180, 1776 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1777 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580, 1777 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1778 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280, 1778 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1779 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180, 1779 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1780 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080, 1780 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1781 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, 1781 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1782 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000, 1782 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1783 }; 1783 };
1784 1784
1785 static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs, 1785 static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
1786 int v_dim) 1786 int v_dim)
1787 { 1787 {
1788 unsigned int i, err_sym; 1788 unsigned int i, err_sym;
1789 1789
1790 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) { 1790 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1791 u16 s = syndrome; 1791 u16 s = syndrome;
1792 int v_idx = err_sym * v_dim; 1792 int v_idx = err_sym * v_dim;
1793 int v_end = (err_sym + 1) * v_dim; 1793 int v_end = (err_sym + 1) * v_dim;
1794 1794
1795 /* walk over all 16 bits of the syndrome */ 1795 /* walk over all 16 bits of the syndrome */
1796 for (i = 1; i < (1U << 16); i <<= 1) { 1796 for (i = 1; i < (1U << 16); i <<= 1) {
1797 1797
1798 /* if bit is set in that eigenvector... */ 1798 /* if bit is set in that eigenvector... */
1799 if (v_idx < v_end && vectors[v_idx] & i) { 1799 if (v_idx < v_end && vectors[v_idx] & i) {
1800 u16 ev_comp = vectors[v_idx++]; 1800 u16 ev_comp = vectors[v_idx++];
1801 1801
1802 /* ... and bit set in the modified syndrome, */ 1802 /* ... and bit set in the modified syndrome, */
1803 if (s & i) { 1803 if (s & i) {
1804 /* remove it. */ 1804 /* remove it. */
1805 s ^= ev_comp; 1805 s ^= ev_comp;
1806 1806
1807 if (!s) 1807 if (!s)
1808 return err_sym; 1808 return err_sym;
1809 } 1809 }
1810 1810
1811 } else if (s & i) 1811 } else if (s & i)
1812 /* can't get to zero, move to next symbol */ 1812 /* can't get to zero, move to next symbol */
1813 break; 1813 break;
1814 } 1814 }
1815 } 1815 }
1816 1816
1817 debugf0("syndrome(%x) not found\n", syndrome); 1817 debugf0("syndrome(%x) not found\n", syndrome);
1818 return -1; 1818 return -1;
1819 } 1819 }
1820 1820
1821 static int map_err_sym_to_channel(int err_sym, int sym_size) 1821 static int map_err_sym_to_channel(int err_sym, int sym_size)
1822 { 1822 {
1823 if (sym_size == 4) 1823 if (sym_size == 4)
1824 switch (err_sym) { 1824 switch (err_sym) {
1825 case 0x20: 1825 case 0x20:
1826 case 0x21: 1826 case 0x21:
1827 return 0; 1827 return 0;
1828 break; 1828 break;
1829 case 0x22: 1829 case 0x22:
1830 case 0x23: 1830 case 0x23:
1831 return 1; 1831 return 1;
1832 break; 1832 break;
1833 default: 1833 default:
1834 return err_sym >> 4; 1834 return err_sym >> 4;
1835 break; 1835 break;
1836 } 1836 }
1837 /* x8 symbols */ 1837 /* x8 symbols */
1838 else 1838 else
1839 switch (err_sym) { 1839 switch (err_sym) {
1840 /* imaginary bits not in a DIMM */ 1840 /* imaginary bits not in a DIMM */
1841 case 0x10: 1841 case 0x10:
1842 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n", 1842 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1843 err_sym); 1843 err_sym);
1844 return -1; 1844 return -1;
1845 break; 1845 break;
1846 1846
1847 case 0x11: 1847 case 0x11:
1848 return 0; 1848 return 0;
1849 break; 1849 break;
1850 case 0x12: 1850 case 0x12:
1851 return 1; 1851 return 1;
1852 break; 1852 break;
1853 default: 1853 default:
1854 return err_sym >> 3; 1854 return err_sym >> 3;
1855 break; 1855 break;
1856 } 1856 }
1857 return -1; 1857 return -1;
1858 } 1858 }
1859 1859
1860 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome) 1860 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1861 { 1861 {
1862 struct amd64_pvt *pvt = mci->pvt_info; 1862 struct amd64_pvt *pvt = mci->pvt_info;
1863 int err_sym = -1; 1863 int err_sym = -1;
1864 1864
1865 if (pvt->syn_type == 8) 1865 if (pvt->syn_type == 8)
1866 err_sym = decode_syndrome(syndrome, x8_vectors, 1866 err_sym = decode_syndrome(syndrome, x8_vectors,
1867 ARRAY_SIZE(x8_vectors), 1867 ARRAY_SIZE(x8_vectors),
1868 pvt->syn_type); 1868 pvt->syn_type);
1869 else if (pvt->syn_type == 4) 1869 else if (pvt->syn_type == 4)
1870 err_sym = decode_syndrome(syndrome, x4_vectors, 1870 err_sym = decode_syndrome(syndrome, x4_vectors,
1871 ARRAY_SIZE(x4_vectors), 1871 ARRAY_SIZE(x4_vectors),
1872 pvt->syn_type); 1872 pvt->syn_type);
1873 else { 1873 else {
1874 amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type); 1874 amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type);
1875 return err_sym; 1875 return err_sym;
1876 } 1876 }
1877 1877
1878 return map_err_sym_to_channel(err_sym, pvt->syn_type); 1878 return map_err_sym_to_channel(err_sym, pvt->syn_type);
1879 } 1879 }
1880 1880
1881 /* 1881 /*
1882 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR 1882 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
1883 * ADDRESS and process. 1883 * ADDRESS and process.
1884 */ 1884 */
1885 static void amd64_handle_ce(struct mem_ctl_info *mci, 1885 static void amd64_handle_ce(struct mem_ctl_info *mci,
1886 struct err_regs *info) 1886 struct err_regs *info)
1887 { 1887 {
1888 struct amd64_pvt *pvt = mci->pvt_info; 1888 struct amd64_pvt *pvt = mci->pvt_info;
1889 u64 sys_addr; 1889 u64 sys_addr;
1890 1890
1891 /* Ensure that the Error Address is VALID */ 1891 /* Ensure that the Error Address is VALID */
1892 if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) { 1892 if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
1893 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); 1893 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1894 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); 1894 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1895 return; 1895 return;
1896 } 1896 }
1897 1897
1898 sys_addr = pvt->ops->get_error_address(mci, info); 1898 sys_addr = pvt->ops->get_error_address(mci, info);
1899 1899
1900 amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr); 1900 amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
1901 1901
1902 pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr); 1902 pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
1903 } 1903 }
1904 1904
1905 /* Handle any Un-correctable Errors (UEs) */ 1905 /* Handle any Un-correctable Errors (UEs) */
1906 static void amd64_handle_ue(struct mem_ctl_info *mci, 1906 static void amd64_handle_ue(struct mem_ctl_info *mci,
1907 struct err_regs *info) 1907 struct err_regs *info)
1908 { 1908 {
1909 struct amd64_pvt *pvt = mci->pvt_info; 1909 struct amd64_pvt *pvt = mci->pvt_info;
1910 struct mem_ctl_info *log_mci, *src_mci = NULL; 1910 struct mem_ctl_info *log_mci, *src_mci = NULL;
1911 int csrow; 1911 int csrow;
1912 u64 sys_addr; 1912 u64 sys_addr;
1913 u32 page, offset; 1913 u32 page, offset;
1914 1914
1915 log_mci = mci; 1915 log_mci = mci;
1916 1916
1917 if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) { 1917 if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
1918 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); 1918 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1919 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); 1919 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1920 return; 1920 return;
1921 } 1921 }
1922 1922
1923 sys_addr = pvt->ops->get_error_address(mci, info); 1923 sys_addr = pvt->ops->get_error_address(mci, info);
1924 1924
1925 /* 1925 /*
1926 * Find out which node the error address belongs to. This may be 1926 * Find out which node the error address belongs to. This may be
1927 * different from the node that detected the error. 1927 * different from the node that detected the error.
1928 */ 1928 */
1929 src_mci = find_mc_by_sys_addr(mci, sys_addr); 1929 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1930 if (!src_mci) { 1930 if (!src_mci) {
1931 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n", 1931 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1932 (unsigned long)sys_addr); 1932 (unsigned long)sys_addr);
1933 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); 1933 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1934 return; 1934 return;
1935 } 1935 }
1936 1936
1937 log_mci = src_mci; 1937 log_mci = src_mci;
1938 1938
1939 csrow = sys_addr_to_csrow(log_mci, sys_addr); 1939 csrow = sys_addr_to_csrow(log_mci, sys_addr);
1940 if (csrow < 0) { 1940 if (csrow < 0) {
1941 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n", 1941 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1942 (unsigned long)sys_addr); 1942 (unsigned long)sys_addr);
1943 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); 1943 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1944 } else { 1944 } else {
1945 error_address_to_page_and_offset(sys_addr, &page, &offset); 1945 error_address_to_page_and_offset(sys_addr, &page, &offset);
1946 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR); 1946 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
1947 } 1947 }
1948 } 1948 }
1949 1949
1950 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, 1950 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
1951 struct err_regs *info) 1951 struct err_regs *info)
1952 { 1952 {
1953 u32 ec = ERROR_CODE(info->nbsl); 1953 u32 ec = ERROR_CODE(info->nbsl);
1954 u32 xec = EXT_ERROR_CODE(info->nbsl); 1954 u32 xec = EXT_ERROR_CODE(info->nbsl);
1955 int ecc_type = (info->nbsh >> 13) & 0x3; 1955 int ecc_type = (info->nbsh >> 13) & 0x3;
1956 1956
1957 /* Bail early out if this was an 'observed' error */ 1957 /* Bail early out if this was an 'observed' error */
1958 if (PP(ec) == K8_NBSL_PP_OBS) 1958 if (PP(ec) == K8_NBSL_PP_OBS)
1959 return; 1959 return;
1960 1960
1961 /* Do only ECC errors */ 1961 /* Do only ECC errors */
1962 if (xec && xec != F10_NBSL_EXT_ERR_ECC) 1962 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
1963 return; 1963 return;
1964 1964
1965 if (ecc_type == 2) 1965 if (ecc_type == 2)
1966 amd64_handle_ce(mci, info); 1966 amd64_handle_ce(mci, info);
1967 else if (ecc_type == 1) 1967 else if (ecc_type == 1)
1968 amd64_handle_ue(mci, info); 1968 amd64_handle_ue(mci, info);
1969 } 1969 }
1970 1970
1971 void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg) 1971 void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
1972 { 1972 {
1973 struct mem_ctl_info *mci = mcis[node_id]; 1973 struct mem_ctl_info *mci = mcis[node_id];
1974 struct err_regs regs; 1974 struct err_regs regs;
1975 1975
1976 regs.nbsl = (u32) m->status; 1976 regs.nbsl = (u32) m->status;
1977 regs.nbsh = (u32)(m->status >> 32); 1977 regs.nbsh = (u32)(m->status >> 32);
1978 regs.nbeal = (u32) m->addr; 1978 regs.nbeal = (u32) m->addr;
1979 regs.nbeah = (u32)(m->addr >> 32); 1979 regs.nbeah = (u32)(m->addr >> 32);
1980 regs.nbcfg = nbcfg; 1980 regs.nbcfg = nbcfg;
1981 1981
1982 __amd64_decode_bus_error(mci, &regs); 1982 __amd64_decode_bus_error(mci, &regs);
1983 1983
1984 /* 1984 /*
1985 * Check the UE bit of the NB status high register, if set generate some 1985 * Check the UE bit of the NB status high register, if set generate some
1986 * logs. If NOT a GART error, then process the event as a NO-INFO event. 1986 * logs. If NOT a GART error, then process the event as a NO-INFO event.
1987 * If it was a GART error, skip that process. 1987 * If it was a GART error, skip that process.
1988 * 1988 *
1989 * FIXME: this should go somewhere else, if at all. 1989 * FIXME: this should go somewhere else, if at all.
1990 */ 1990 */
1991 if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors) 1991 if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
1992 edac_mc_handle_ue_no_info(mci, "UE bit is set"); 1992 edac_mc_handle_ue_no_info(mci, "UE bit is set");
1993 1993
1994 } 1994 }
1995 1995
1996 /* 1996 /*
1997 * Use pvt->F2 which contains the F2 CPU PCI device to get the related 1997 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
1998 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error. 1998 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
1999 */ 1999 */
2000 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id) 2000 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
2001 { 2001 {
2002 /* Reserve the ADDRESS MAP Device */ 2002 /* Reserve the ADDRESS MAP Device */
2003 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2); 2003 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
2004 if (!pvt->F1) { 2004 if (!pvt->F1) {
2005 amd64_err("error address map device not found: " 2005 amd64_err("error address map device not found: "
2006 "vendor %x device 0x%x (broken BIOS?)\n", 2006 "vendor %x device 0x%x (broken BIOS?)\n",
2007 PCI_VENDOR_ID_AMD, f1_id); 2007 PCI_VENDOR_ID_AMD, f1_id);
2008 return -ENODEV; 2008 return -ENODEV;
2009 } 2009 }
2010 2010
2011 /* Reserve the MISC Device */ 2011 /* Reserve the MISC Device */
2012 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2); 2012 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
2013 if (!pvt->F3) { 2013 if (!pvt->F3) {
2014 pci_dev_put(pvt->F1); 2014 pci_dev_put(pvt->F1);
2015 pvt->F1 = NULL; 2015 pvt->F1 = NULL;
2016 2016
2017 amd64_err("error F3 device not found: " 2017 amd64_err("error F3 device not found: "
2018 "vendor %x device 0x%x (broken BIOS?)\n", 2018 "vendor %x device 0x%x (broken BIOS?)\n",
2019 PCI_VENDOR_ID_AMD, f3_id); 2019 PCI_VENDOR_ID_AMD, f3_id);
2020 2020
2021 return -ENODEV; 2021 return -ENODEV;
2022 } 2022 }
2023 debugf1("F1: %s\n", pci_name(pvt->F1)); 2023 debugf1("F1: %s\n", pci_name(pvt->F1));
2024 debugf1("F2: %s\n", pci_name(pvt->F2)); 2024 debugf1("F2: %s\n", pci_name(pvt->F2));
2025 debugf1("F3: %s\n", pci_name(pvt->F3)); 2025 debugf1("F3: %s\n", pci_name(pvt->F3));
2026 2026
2027 return 0; 2027 return 0;
2028 } 2028 }
2029 2029
2030 static void free_mc_sibling_devs(struct amd64_pvt *pvt) 2030 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2031 { 2031 {
2032 pci_dev_put(pvt->F1); 2032 pci_dev_put(pvt->F1);
2033 pci_dev_put(pvt->F3); 2033 pci_dev_put(pvt->F3);
2034 } 2034 }
2035 2035
2036 /* 2036 /*
2037 * Retrieve the hardware registers of the memory controller (this includes the 2037 * Retrieve the hardware registers of the memory controller (this includes the
2038 * 'Address Map' and 'Misc' device regs) 2038 * 'Address Map' and 'Misc' device regs)
2039 */ 2039 */
2040 static void read_mc_regs(struct amd64_pvt *pvt) 2040 static void read_mc_regs(struct amd64_pvt *pvt)
2041 { 2041 {
2042 u64 msr_val; 2042 u64 msr_val;
2043 u32 tmp; 2043 u32 tmp;
2044 int dram; 2044 int dram;
2045 2045
2046 /* 2046 /*
2047 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since 2047 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2048 * those are Read-As-Zero 2048 * those are Read-As-Zero
2049 */ 2049 */
2050 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem); 2050 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2051 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem); 2051 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
2052 2052
2053 /* check first whether TOP_MEM2 is enabled */ 2053 /* check first whether TOP_MEM2 is enabled */
2054 rdmsrl(MSR_K8_SYSCFG, msr_val); 2054 rdmsrl(MSR_K8_SYSCFG, msr_val);
2055 if (msr_val & (1U << 21)) { 2055 if (msr_val & (1U << 21)) {
2056 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2); 2056 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2057 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2); 2057 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2058 } else 2058 } else
2059 debugf0(" TOP_MEM2 disabled.\n"); 2059 debugf0(" TOP_MEM2 disabled.\n");
2060 2060
2061 amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap); 2061 amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap);
2062 2062
2063 if (pvt->ops->read_dram_ctl_register) 2063 if (pvt->ops->read_dram_ctl_register)
2064 pvt->ops->read_dram_ctl_register(pvt); 2064 pvt->ops->read_dram_ctl_register(pvt);
2065 2065
2066 for (dram = 0; dram < DRAM_REG_COUNT; dram++) { 2066 for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
2067 /* 2067 /*
2068 * Call CPU specific READ function to get the DRAM Base and 2068 * Call CPU specific READ function to get the DRAM Base and
2069 * Limit values from the DCT. 2069 * Limit values from the DCT.
2070 */ 2070 */
2071 pvt->ops->read_dram_base_limit(pvt, dram); 2071 pvt->ops->read_dram_base_limit(pvt, dram);
2072 2072
2073 /* 2073 /*
2074 * Only print out debug info on rows with both R and W Enabled. 2074 * Only print out debug info on rows with both R and W Enabled.
2075 * Normal processing, compiler should optimize this whole 'if' 2075 * Normal processing, compiler should optimize this whole 'if'
2076 * debug output block away. 2076 * debug output block away.
2077 */ 2077 */
2078 if (pvt->dram_rw_en[dram] != 0) { 2078 if (pvt->dram_rw_en[dram] != 0) {
2079 debugf1(" DRAM-BASE[%d]: 0x%016llx " 2079 debugf1(" DRAM-BASE[%d]: 0x%016llx "
2080 "DRAM-LIMIT: 0x%016llx\n", 2080 "DRAM-LIMIT: 0x%016llx\n",
2081 dram, 2081 dram,
2082 pvt->dram_base[dram], 2082 pvt->dram_base[dram],
2083 pvt->dram_limit[dram]); 2083 pvt->dram_limit[dram]);
2084 2084
2085 debugf1(" IntlvEn=%s %s %s " 2085 debugf1(" IntlvEn=%s %s %s "
2086 "IntlvSel=%d DstNode=%d\n", 2086 "IntlvSel=%d DstNode=%d\n",
2087 pvt->dram_IntlvEn[dram] ? 2087 pvt->dram_IntlvEn[dram] ?
2088 "Enabled" : "Disabled", 2088 "Enabled" : "Disabled",
2089 (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W", 2089 (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
2090 (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R", 2090 (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
2091 pvt->dram_IntlvSel[dram], 2091 pvt->dram_IntlvSel[dram],
2092 pvt->dram_DstNode[dram]); 2092 pvt->dram_DstNode[dram]);
2093 } 2093 }
2094 } 2094 }
2095 2095
2096 amd64_read_dct_base_mask(pvt); 2096 amd64_read_dct_base_mask(pvt);
2097 2097
2098 amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar); 2098 amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar);
2099 amd64_read_dbam_reg(pvt); 2099 amd64_read_dbam_reg(pvt);
2100 2100
2101 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare); 2101 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2102 2102
2103 amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0); 2103 amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
2104 amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0); 2104 amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0);
2105 2105
2106 if (boot_cpu_data.x86 >= 0x10) { 2106 if (boot_cpu_data.x86 >= 0x10) {
2107 if (!dct_ganging_enabled(pvt)) { 2107 if (!dct_ganging_enabled(pvt)) {
2108 amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1); 2108 amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1);
2109 amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1); 2109 amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1);
2110 } 2110 }
2111 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); 2111 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2112 } 2112 }
2113 2113
2114 if (boot_cpu_data.x86 == 0x10 && 2114 if (boot_cpu_data.x86 == 0x10 &&
2115 boot_cpu_data.x86_model > 7 && 2115 boot_cpu_data.x86_model > 7 &&
2116 /* F3x180[EccSymbolSize]=1 => x8 symbols */ 2116 /* F3x180[EccSymbolSize]=1 => x8 symbols */
2117 tmp & BIT(25)) 2117 tmp & BIT(25))
2118 pvt->syn_type = 8; 2118 pvt->syn_type = 8;
2119 else 2119 else
2120 pvt->syn_type = 4; 2120 pvt->syn_type = 4;
2121 2121
2122 amd64_dump_misc_regs(pvt); 2122 amd64_dump_misc_regs(pvt);
2123 } 2123 }
2124 2124
2125 /* 2125 /*
2126 * NOTE: CPU Revision Dependent code 2126 * NOTE: CPU Revision Dependent code
2127 * 2127 *
2128 * Input: 2128 * Input:
2129 * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1) 2129 * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
2130 * k8 private pointer to --> 2130 * k8 private pointer to -->
2131 * DRAM Bank Address mapping register 2131 * DRAM Bank Address mapping register
2132 * node_id 2132 * node_id
2133 * DCL register where dual_channel_active is 2133 * DCL register where dual_channel_active is
2134 * 2134 *
2135 * The DBAM register consists of 4 sets of 4 bits each definitions: 2135 * The DBAM register consists of 4 sets of 4 bits each definitions:
2136 * 2136 *
2137 * Bits: CSROWs 2137 * Bits: CSROWs
2138 * 0-3 CSROWs 0 and 1 2138 * 0-3 CSROWs 0 and 1
2139 * 4-7 CSROWs 2 and 3 2139 * 4-7 CSROWs 2 and 3
2140 * 8-11 CSROWs 4 and 5 2140 * 8-11 CSROWs 4 and 5
2141 * 12-15 CSROWs 6 and 7 2141 * 12-15 CSROWs 6 and 7
2142 * 2142 *
2143 * Values range from: 0 to 15 2143 * Values range from: 0 to 15
2144 * The meaning of the values depends on CPU revision and dual-channel state, 2144 * The meaning of the values depends on CPU revision and dual-channel state,
2145 * see relevant BKDG more info. 2145 * see relevant BKDG more info.
2146 * 2146 *
2147 * The memory controller provides for total of only 8 CSROWs in its current 2147 * The memory controller provides for total of only 8 CSROWs in its current
2148 * architecture. Each "pair" of CSROWs normally represents just one DIMM in 2148 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2149 * single channel or two (2) DIMMs in dual channel mode. 2149 * single channel or two (2) DIMMs in dual channel mode.
2150 * 2150 *
2151 * The following code logic collapses the various tables for CSROW based on CPU 2151 * The following code logic collapses the various tables for CSROW based on CPU
2152 * revision. 2152 * revision.
2153 * 2153 *
2154 * Returns: 2154 * Returns:
2155 * The number of PAGE_SIZE pages on the specified CSROW number it 2155 * The number of PAGE_SIZE pages on the specified CSROW number it
2156 * encompasses 2156 * encompasses
2157 * 2157 *
2158 */ 2158 */
2159 static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) 2159 static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2160 { 2160 {
2161 u32 cs_mode, nr_pages; 2161 u32 cs_mode, nr_pages;
2162 2162
2163 /* 2163 /*
2164 * The math on this doesn't look right on the surface because x/2*4 can 2164 * The math on this doesn't look right on the surface because x/2*4 can
2165 * be simplified to x*2 but this expression makes use of the fact that 2165 * be simplified to x*2 but this expression makes use of the fact that
2166 * it is integral math where 1/2=0. This intermediate value becomes the 2166 * it is integral math where 1/2=0. This intermediate value becomes the
2167 * number of bits to shift the DBAM register to extract the proper CSROW 2167 * number of bits to shift the DBAM register to extract the proper CSROW
2168 * field. 2168 * field.
2169 */ 2169 */
2170 cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; 2170 cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2171 2171
2172 nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT); 2172 nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
2173 2173
2174 /* 2174 /*
2175 * If dual channel then double the memory size of single channel. 2175 * If dual channel then double the memory size of single channel.
2176 * Channel count is 1 or 2 2176 * Channel count is 1 or 2
2177 */ 2177 */
2178 nr_pages <<= (pvt->channel_count - 1); 2178 nr_pages <<= (pvt->channel_count - 1);
2179 2179
2180 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode); 2180 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2181 debugf0(" nr_pages= %u channel-count = %d\n", 2181 debugf0(" nr_pages= %u channel-count = %d\n",
2182 nr_pages, pvt->channel_count); 2182 nr_pages, pvt->channel_count);
2183 2183
2184 return nr_pages; 2184 return nr_pages;
2185 } 2185 }
2186 2186
2187 /* 2187 /*
2188 * Initialize the array of csrow attribute instances, based on the values 2188 * Initialize the array of csrow attribute instances, based on the values
2189 * from pci config hardware registers. 2189 * from pci config hardware registers.
2190 */ 2190 */
2191 static int init_csrows(struct mem_ctl_info *mci) 2191 static int init_csrows(struct mem_ctl_info *mci)
2192 { 2192 {
2193 struct csrow_info *csrow; 2193 struct csrow_info *csrow;
2194 struct amd64_pvt *pvt = mci->pvt_info; 2194 struct amd64_pvt *pvt = mci->pvt_info;
2195 u64 input_addr_min, input_addr_max, sys_addr; 2195 u64 input_addr_min, input_addr_max, sys_addr;
2196 u32 val; 2196 u32 val;
2197 int i, empty = 1; 2197 int i, empty = 1;
2198 2198
2199 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &val); 2199 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &val);
2200 2200
2201 pvt->nbcfg = val; 2201 pvt->nbcfg = val;
2202 pvt->ctl_error_info.nbcfg = val; 2202 pvt->ctl_error_info.nbcfg = val;
2203 2203
2204 debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", 2204 debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2205 pvt->mc_node_id, val, 2205 pvt->mc_node_id, val,
2206 !!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE)); 2206 !!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE));
2207 2207
2208 for (i = 0; i < pvt->cs_count; i++) { 2208 for (i = 0; i < pvt->cs_count; i++) {
2209 csrow = &mci->csrows[i]; 2209 csrow = &mci->csrows[i];
2210 2210
2211 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { 2211 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
2212 debugf1("----CSROW %d EMPTY for node %d\n", i, 2212 debugf1("----CSROW %d EMPTY for node %d\n", i,
2213 pvt->mc_node_id); 2213 pvt->mc_node_id);
2214 continue; 2214 continue;
2215 } 2215 }
2216 2216
2217 debugf1("----CSROW %d VALID for MC node %d\n", 2217 debugf1("----CSROW %d VALID for MC node %d\n",
2218 i, pvt->mc_node_id); 2218 i, pvt->mc_node_id);
2219 2219
2220 empty = 0; 2220 empty = 0;
2221 csrow->nr_pages = amd64_csrow_nr_pages(i, pvt); 2221 csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
2222 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max); 2222 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2223 sys_addr = input_addr_to_sys_addr(mci, input_addr_min); 2223 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2224 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT); 2224 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2225 sys_addr = input_addr_to_sys_addr(mci, input_addr_max); 2225 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2226 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT); 2226 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2227 csrow->page_mask = ~mask_from_dct_mask(pvt, i); 2227 csrow->page_mask = ~mask_from_dct_mask(pvt, i);
2228 /* 8 bytes of resolution */ 2228 /* 8 bytes of resolution */
2229 2229
2230 csrow->mtype = amd64_determine_memory_type(pvt, i); 2230 csrow->mtype = amd64_determine_memory_type(pvt, i);
2231 2231
2232 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); 2232 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2233 debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n", 2233 debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
2234 (unsigned long)input_addr_min, 2234 (unsigned long)input_addr_min,
2235 (unsigned long)input_addr_max); 2235 (unsigned long)input_addr_max);
2236 debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n", 2236 debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
2237 (unsigned long)sys_addr, csrow->page_mask); 2237 (unsigned long)sys_addr, csrow->page_mask);
2238 debugf1(" nr_pages: %u first_page: 0x%lx " 2238 debugf1(" nr_pages: %u first_page: 0x%lx "
2239 "last_page: 0x%lx\n", 2239 "last_page: 0x%lx\n",
2240 (unsigned)csrow->nr_pages, 2240 (unsigned)csrow->nr_pages,
2241 csrow->first_page, csrow->last_page); 2241 csrow->first_page, csrow->last_page);
2242 2242
2243 /* 2243 /*
2244 * determine whether CHIPKILL or JUST ECC or NO ECC is operating 2244 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2245 */ 2245 */
2246 if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) 2246 if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
2247 csrow->edac_mode = 2247 csrow->edac_mode =
2248 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? 2248 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
2249 EDAC_S4ECD4ED : EDAC_SECDED; 2249 EDAC_S4ECD4ED : EDAC_SECDED;
2250 else 2250 else
2251 csrow->edac_mode = EDAC_NONE; 2251 csrow->edac_mode = EDAC_NONE;
2252 } 2252 }
2253 2253
2254 return empty; 2254 return empty;
2255 } 2255 }
2256 2256
2257 /* get all cores on this DCT */ 2257 /* get all cores on this DCT */
2258 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid) 2258 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
2259 { 2259 {
2260 int cpu; 2260 int cpu;
2261 2261
2262 for_each_online_cpu(cpu) 2262 for_each_online_cpu(cpu)
2263 if (amd_get_nb_id(cpu) == nid) 2263 if (amd_get_nb_id(cpu) == nid)
2264 cpumask_set_cpu(cpu, mask); 2264 cpumask_set_cpu(cpu, mask);
2265 } 2265 }
2266 2266
2267 /* check MCG_CTL on all the cpus on this node */ 2267 /* check MCG_CTL on all the cpus on this node */
2268 static bool amd64_nb_mce_bank_enabled_on_node(int nid) 2268 static bool amd64_nb_mce_bank_enabled_on_node(int nid)
2269 { 2269 {
2270 cpumask_var_t mask; 2270 cpumask_var_t mask;
2271 int cpu, nbe; 2271 int cpu, nbe;
2272 bool ret = false; 2272 bool ret = false;
2273 2273
2274 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { 2274 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2275 amd64_warn("%s: Error allocating mask\n", __func__); 2275 amd64_warn("%s: Error allocating mask\n", __func__);
2276 return false; 2276 return false;
2277 } 2277 }
2278 2278
2279 get_cpus_on_this_dct_cpumask(mask, nid); 2279 get_cpus_on_this_dct_cpumask(mask, nid);
2280 2280
2281 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs); 2281 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2282 2282
2283 for_each_cpu(cpu, mask) { 2283 for_each_cpu(cpu, mask) {
2284 struct msr *reg = per_cpu_ptr(msrs, cpu); 2284 struct msr *reg = per_cpu_ptr(msrs, cpu);
2285 nbe = reg->l & K8_MSR_MCGCTL_NBE; 2285 nbe = reg->l & K8_MSR_MCGCTL_NBE;
2286 2286
2287 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", 2287 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2288 cpu, reg->q, 2288 cpu, reg->q,
2289 (nbe ? "enabled" : "disabled")); 2289 (nbe ? "enabled" : "disabled"));
2290 2290
2291 if (!nbe) 2291 if (!nbe)
2292 goto out; 2292 goto out;
2293 } 2293 }
2294 ret = true; 2294 ret = true;
2295 2295
2296 out: 2296 out:
2297 free_cpumask_var(mask); 2297 free_cpumask_var(mask);
2298 return ret; 2298 return ret;
2299 } 2299 }
2300 2300
2301 static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on) 2301 static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
2302 { 2302 {
2303 cpumask_var_t cmask; 2303 cpumask_var_t cmask;
2304 int cpu; 2304 int cpu;
2305 2305
2306 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { 2306 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2307 amd64_warn("%s: error allocating mask\n", __func__); 2307 amd64_warn("%s: error allocating mask\n", __func__);
2308 return false; 2308 return false;
2309 } 2309 }
2310 2310
2311 get_cpus_on_this_dct_cpumask(cmask, nid); 2311 get_cpus_on_this_dct_cpumask(cmask, nid);
2312 2312
2313 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); 2313 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2314 2314
2315 for_each_cpu(cpu, cmask) { 2315 for_each_cpu(cpu, cmask) {
2316 2316
2317 struct msr *reg = per_cpu_ptr(msrs, cpu); 2317 struct msr *reg = per_cpu_ptr(msrs, cpu);
2318 2318
2319 if (on) { 2319 if (on) {
2320 if (reg->l & K8_MSR_MCGCTL_NBE) 2320 if (reg->l & K8_MSR_MCGCTL_NBE)
2321 s->flags.nb_mce_enable = 1; 2321 s->flags.nb_mce_enable = 1;
2322 2322
2323 reg->l |= K8_MSR_MCGCTL_NBE; 2323 reg->l |= K8_MSR_MCGCTL_NBE;
2324 } else { 2324 } else {
2325 /* 2325 /*
2326 * Turn off NB MCE reporting only when it was off before 2326 * Turn off NB MCE reporting only when it was off before
2327 */ 2327 */
2328 if (!s->flags.nb_mce_enable) 2328 if (!s->flags.nb_mce_enable)
2329 reg->l &= ~K8_MSR_MCGCTL_NBE; 2329 reg->l &= ~K8_MSR_MCGCTL_NBE;
2330 } 2330 }
2331 } 2331 }
2332 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); 2332 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2333 2333
2334 free_cpumask_var(cmask); 2334 free_cpumask_var(cmask);
2335 2335
2336 return 0; 2336 return 0;
2337 } 2337 }
2338 2338
2339 static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid, 2339 static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2340 struct pci_dev *F3) 2340 struct pci_dev *F3)
2341 { 2341 {
2342 bool ret = true; 2342 bool ret = true;
2343 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; 2343 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2344 2344
2345 if (toggle_ecc_err_reporting(s, nid, ON)) { 2345 if (toggle_ecc_err_reporting(s, nid, ON)) {
2346 amd64_warn("Error enabling ECC reporting over MCGCTL!\n"); 2346 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2347 return false; 2347 return false;
2348 } 2348 }
2349 2349
2350 amd64_read_pci_cfg(F3, K8_NBCTL, &value); 2350 amd64_read_pci_cfg(F3, K8_NBCTL, &value);
2351 2351
2352 /* turn on UECCEn and CECCEn bits */ 2352 /* turn on UECCEn and CECCEn bits */
2353 s->old_nbctl = value & mask; 2353 s->old_nbctl = value & mask;
2354 s->nbctl_valid = true; 2354 s->nbctl_valid = true;
2355 2355
2356 value |= mask; 2356 value |= mask;
2357 pci_write_config_dword(F3, K8_NBCTL, value); 2357 pci_write_config_dword(F3, K8_NBCTL, value);
2358 2358
2359 amd64_read_pci_cfg(F3, K8_NBCFG, &value); 2359 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2360 2360
2361 debugf0("1: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", 2361 debugf0("1: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2362 nid, value, 2362 nid, value,
2363 !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE)); 2363 !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
2364 2364
2365 if (!(value & K8_NBCFG_ECC_ENABLE)) { 2365 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2366 amd64_warn("DRAM ECC disabled on this node, enabling...\n"); 2366 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2367 2367
2368 s->flags.nb_ecc_prev = 0; 2368 s->flags.nb_ecc_prev = 0;
2369 2369
2370 /* Attempt to turn on DRAM ECC Enable */ 2370 /* Attempt to turn on DRAM ECC Enable */
2371 value |= K8_NBCFG_ECC_ENABLE; 2371 value |= K8_NBCFG_ECC_ENABLE;
2372 pci_write_config_dword(F3, K8_NBCFG, value); 2372 pci_write_config_dword(F3, K8_NBCFG, value);
2373 2373
2374 amd64_read_pci_cfg(F3, K8_NBCFG, &value); 2374 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2375 2375
2376 if (!(value & K8_NBCFG_ECC_ENABLE)) { 2376 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2377 amd64_warn("Hardware rejected DRAM ECC enable," 2377 amd64_warn("Hardware rejected DRAM ECC enable,"
2378 "check memory DIMM configuration.\n"); 2378 "check memory DIMM configuration.\n");
2379 ret = false; 2379 ret = false;
2380 } else { 2380 } else {
2381 amd64_info("Hardware accepted DRAM ECC Enable\n"); 2381 amd64_info("Hardware accepted DRAM ECC Enable\n");
2382 } 2382 }
2383 } else { 2383 } else {
2384 s->flags.nb_ecc_prev = 1; 2384 s->flags.nb_ecc_prev = 1;
2385 } 2385 }
2386 2386
2387 debugf0("2: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", 2387 debugf0("2: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2388 nid, value, 2388 nid, value,
2389 !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE)); 2389 !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
2390 2390
2391 return ret; 2391 return ret;
2392 } 2392 }
2393 2393
2394 static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid, 2394 static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2395 struct pci_dev *F3) 2395 struct pci_dev *F3)
2396 { 2396 {
2397 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; 2397 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2398 2398
2399 if (!s->nbctl_valid) 2399 if (!s->nbctl_valid)
2400 return; 2400 return;
2401 2401
2402 amd64_read_pci_cfg(F3, K8_NBCTL, &value); 2402 amd64_read_pci_cfg(F3, K8_NBCTL, &value);
2403 value &= ~mask; 2403 value &= ~mask;
2404 value |= s->old_nbctl; 2404 value |= s->old_nbctl;
2405 2405
2406 pci_write_config_dword(F3, K8_NBCTL, value); 2406 pci_write_config_dword(F3, K8_NBCTL, value);
2407 2407
2408 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */ 2408 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2409 if (!s->flags.nb_ecc_prev) { 2409 if (!s->flags.nb_ecc_prev) {
2410 amd64_read_pci_cfg(F3, K8_NBCFG, &value); 2410 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2411 value &= ~K8_NBCFG_ECC_ENABLE; 2411 value &= ~K8_NBCFG_ECC_ENABLE;
2412 pci_write_config_dword(F3, K8_NBCFG, value); 2412 pci_write_config_dword(F3, K8_NBCFG, value);
2413 } 2413 }
2414 2414
2415 /* restore the NB Enable MCGCTL bit */ 2415 /* restore the NB Enable MCGCTL bit */
2416 if (toggle_ecc_err_reporting(s, nid, OFF)) 2416 if (toggle_ecc_err_reporting(s, nid, OFF))
2417 amd64_warn("Error restoring NB MCGCTL settings!\n"); 2417 amd64_warn("Error restoring NB MCGCTL settings!\n");
2418 } 2418 }
2419 2419
2420 /* 2420 /*
2421 * EDAC requires that the BIOS have ECC enabled before 2421 * EDAC requires that the BIOS have ECC enabled before
2422 * taking over the processing of ECC errors. A command line 2422 * taking over the processing of ECC errors. A command line
2423 * option allows to force-enable hardware ECC later in 2423 * option allows to force-enable hardware ECC later in
2424 * enable_ecc_error_reporting(). 2424 * enable_ecc_error_reporting().
2425 */ 2425 */
2426 static const char *ecc_msg = 2426 static const char *ecc_msg =
2427 "ECC disabled in the BIOS or no ECC capability, module will not load.\n" 2427 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2428 " Either enable ECC checking or force module loading by setting " 2428 " Either enable ECC checking or force module loading by setting "
2429 "'ecc_enable_override'.\n" 2429 "'ecc_enable_override'.\n"
2430 " (Note that use of the override may cause unknown side effects.)\n"; 2430 " (Note that use of the override may cause unknown side effects.)\n";
2431 2431
2432 static bool ecc_enabled(struct pci_dev *F3, u8 nid) 2432 static bool ecc_enabled(struct pci_dev *F3, u8 nid)
2433 { 2433 {
2434 u32 value; 2434 u32 value;
2435 u8 ecc_en = 0; 2435 u8 ecc_en = 0;
2436 bool nb_mce_en = false; 2436 bool nb_mce_en = false;
2437 2437
2438 amd64_read_pci_cfg(F3, K8_NBCFG, &value); 2438 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2439 2439
2440 ecc_en = !!(value & K8_NBCFG_ECC_ENABLE); 2440 ecc_en = !!(value & K8_NBCFG_ECC_ENABLE);
2441 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled")); 2441 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2442 2442
2443 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid); 2443 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
2444 if (!nb_mce_en) 2444 if (!nb_mce_en)
2445 amd64_notice("NB MCE bank disabled, set MSR " 2445 amd64_notice("NB MCE bank disabled, set MSR "
2446 "0x%08x[4] on node %d to enable.\n", 2446 "0x%08x[4] on node %d to enable.\n",
2447 MSR_IA32_MCG_CTL, nid); 2447 MSR_IA32_MCG_CTL, nid);
2448 2448
2449 if (!ecc_en || !nb_mce_en) { 2449 if (!ecc_en || !nb_mce_en) {
2450 amd64_notice("%s", ecc_msg); 2450 amd64_notice("%s", ecc_msg);
2451 return false; 2451 return false;
2452 } 2452 }
2453 return true; 2453 return true;
2454 } 2454 }
2455 2455
2456 struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) + 2456 struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
2457 ARRAY_SIZE(amd64_inj_attrs) + 2457 ARRAY_SIZE(amd64_inj_attrs) +
2458 1]; 2458 1];
2459 2459
2460 struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } }; 2460 struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2461 2461
2462 static void set_mc_sysfs_attrs(struct mem_ctl_info *mci) 2462 static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2463 { 2463 {
2464 unsigned int i = 0, j = 0; 2464 unsigned int i = 0, j = 0;
2465 2465
2466 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++) 2466 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
2467 sysfs_attrs[i] = amd64_dbg_attrs[i]; 2467 sysfs_attrs[i] = amd64_dbg_attrs[i];
2468 2468
2469 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++) 2469 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
2470 sysfs_attrs[i] = amd64_inj_attrs[j]; 2470 sysfs_attrs[i] = amd64_inj_attrs[j];
2471 2471
2472 sysfs_attrs[i] = terminator; 2472 sysfs_attrs[i] = terminator;
2473 2473
2474 mci->mc_driver_sysfs_attributes = sysfs_attrs; 2474 mci->mc_driver_sysfs_attributes = sysfs_attrs;
2475 } 2475 }
2476 2476
2477 static void setup_mci_misc_attrs(struct mem_ctl_info *mci) 2477 static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
2478 { 2478 {
2479 struct amd64_pvt *pvt = mci->pvt_info; 2479 struct amd64_pvt *pvt = mci->pvt_info;
2480 2480
2481 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; 2481 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2482 mci->edac_ctl_cap = EDAC_FLAG_NONE; 2482 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2483 2483
2484 if (pvt->nbcap & K8_NBCAP_SECDED) 2484 if (pvt->nbcap & K8_NBCAP_SECDED)
2485 mci->edac_ctl_cap |= EDAC_FLAG_SECDED; 2485 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2486 2486
2487 if (pvt->nbcap & K8_NBCAP_CHIPKILL) 2487 if (pvt->nbcap & K8_NBCAP_CHIPKILL)
2488 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; 2488 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2489 2489
2490 mci->edac_cap = amd64_determine_edac_cap(pvt); 2490 mci->edac_cap = amd64_determine_edac_cap(pvt);
2491 mci->mod_name = EDAC_MOD_STR; 2491 mci->mod_name = EDAC_MOD_STR;
2492 mci->mod_ver = EDAC_AMD64_VERSION; 2492 mci->mod_ver = EDAC_AMD64_VERSION;
2493 mci->ctl_name = pvt->ctl_name; 2493 mci->ctl_name = pvt->ctl_name;
2494 mci->dev_name = pci_name(pvt->F2); 2494 mci->dev_name = pci_name(pvt->F2);
2495 mci->ctl_page_to_phys = NULL; 2495 mci->ctl_page_to_phys = NULL;
2496 2496
2497 /* memory scrubber interface */ 2497 /* memory scrubber interface */
2498 mci->set_sdram_scrub_rate = amd64_set_scrub_rate; 2498 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2499 mci->get_sdram_scrub_rate = amd64_get_scrub_rate; 2499 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2500 } 2500 }
2501 2501
2502 /* 2502 /*
2503 * returns a pointer to the family descriptor on success, NULL otherwise. 2503 * returns a pointer to the family descriptor on success, NULL otherwise.
2504 */ 2504 */
2505 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) 2505 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2506 { 2506 {
2507 u8 fam = boot_cpu_data.x86; 2507 u8 fam = boot_cpu_data.x86;
2508 struct amd64_family_type *fam_type = NULL; 2508 struct amd64_family_type *fam_type = NULL;
2509 2509
2510 switch (fam) { 2510 switch (fam) {
2511 case 0xf: 2511 case 0xf:
2512 fam_type = &amd64_family_types[K8_CPUS]; 2512 fam_type = &amd64_family_types[K8_CPUS];
2513 pvt->ops = &amd64_family_types[K8_CPUS].ops; 2513 pvt->ops = &amd64_family_types[K8_CPUS].ops;
2514 pvt->ctl_name = fam_type->ctl_name; 2514 pvt->ctl_name = fam_type->ctl_name;
2515 pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS; 2515 pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
2516 break; 2516 break;
2517 case 0x10: 2517 case 0x10:
2518 fam_type = &amd64_family_types[F10_CPUS]; 2518 fam_type = &amd64_family_types[F10_CPUS];
2519 pvt->ops = &amd64_family_types[F10_CPUS].ops; 2519 pvt->ops = &amd64_family_types[F10_CPUS].ops;
2520 pvt->ctl_name = fam_type->ctl_name; 2520 pvt->ctl_name = fam_type->ctl_name;
2521 pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS; 2521 pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
2522 break; 2522 break;
2523 2523
2524 default: 2524 default:
2525 amd64_err("Unsupported family!\n"); 2525 amd64_err("Unsupported family!\n");
2526 return NULL; 2526 return NULL;
2527 } 2527 }
2528 2528
2529 pvt->ext_model = boot_cpu_data.x86_model >> 4; 2529 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2530 2530
2531 amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name, 2531 amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name,
2532 (fam == 0xf ? 2532 (fam == 0xf ?
2533 (pvt->ext_model >= K8_REV_F ? "revF or later " 2533 (pvt->ext_model >= K8_REV_F ? "revF or later "
2534 : "revE or earlier ") 2534 : "revE or earlier ")
2535 : ""), pvt->mc_node_id); 2535 : ""), pvt->mc_node_id);
2536 return fam_type; 2536 return fam_type;
2537 } 2537 }
2538 2538
2539 static int amd64_init_one_instance(struct pci_dev *F2) 2539 static int amd64_init_one_instance(struct pci_dev *F2)
2540 { 2540 {
2541 struct amd64_pvt *pvt = NULL; 2541 struct amd64_pvt *pvt = NULL;
2542 struct amd64_family_type *fam_type = NULL; 2542 struct amd64_family_type *fam_type = NULL;
2543 struct mem_ctl_info *mci = NULL; 2543 struct mem_ctl_info *mci = NULL;
2544 int err = 0, ret; 2544 int err = 0, ret;
2545 u8 nid = get_node_id(F2); 2545 u8 nid = get_node_id(F2);
2546 2546
2547 ret = -ENOMEM; 2547 ret = -ENOMEM;
2548 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); 2548 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2549 if (!pvt) 2549 if (!pvt)
2550 goto err_ret; 2550 goto err_ret;
2551 2551
2552 pvt->mc_node_id = nid; 2552 pvt->mc_node_id = nid;
2553 pvt->F2 = F2; 2553 pvt->F2 = F2;
2554 2554
2555 ret = -EINVAL; 2555 ret = -EINVAL;
2556 fam_type = amd64_per_family_init(pvt); 2556 fam_type = amd64_per_family_init(pvt);
2557 if (!fam_type) 2557 if (!fam_type)
2558 goto err_free; 2558 goto err_free;
2559 2559
2560 ret = -ENODEV; 2560 ret = -ENODEV;
2561 err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id); 2561 err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
2562 if (err) 2562 if (err)
2563 goto err_free; 2563 goto err_free;
2564 2564
2565 read_mc_regs(pvt); 2565 read_mc_regs(pvt);
2566 2566
2567 /* 2567 /*
2568 * We need to determine how many memory channels there are. Then use 2568 * We need to determine how many memory channels there are. Then use
2569 * that information for calculating the size of the dynamic instance 2569 * that information for calculating the size of the dynamic instance
2570 * tables in the 'mci' structure. 2570 * tables in the 'mci' structure.
2571 */ 2571 */
2572 ret = -EINVAL; 2572 ret = -EINVAL;
2573 pvt->channel_count = pvt->ops->early_channel_count(pvt); 2573 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2574 if (pvt->channel_count < 0) 2574 if (pvt->channel_count < 0)
2575 goto err_siblings; 2575 goto err_siblings;
2576 2576
2577 ret = -ENOMEM; 2577 ret = -ENOMEM;
2578 mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, nid); 2578 mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, nid);
2579 if (!mci) 2579 if (!mci)
2580 goto err_siblings; 2580 goto err_siblings;
2581 2581
2582 mci->pvt_info = pvt; 2582 mci->pvt_info = pvt;
2583 mci->dev = &pvt->F2->dev; 2583 mci->dev = &pvt->F2->dev;
2584 2584
2585 setup_mci_misc_attrs(mci); 2585 setup_mci_misc_attrs(mci);
2586 2586
2587 if (init_csrows(mci)) 2587 if (init_csrows(mci))
2588 mci->edac_cap = EDAC_FLAG_NONE; 2588 mci->edac_cap = EDAC_FLAG_NONE;
2589 2589
2590 set_mc_sysfs_attrs(mci); 2590 set_mc_sysfs_attrs(mci);
2591 2591
2592 ret = -ENODEV; 2592 ret = -ENODEV;
2593 if (edac_mc_add_mc(mci)) { 2593 if (edac_mc_add_mc(mci)) {
2594 debugf1("failed edac_mc_add_mc()\n"); 2594 debugf1("failed edac_mc_add_mc()\n");
2595 goto err_add_mc; 2595 goto err_add_mc;
2596 } 2596 }
2597 2597
2598 /* register stuff with EDAC MCE */ 2598 /* register stuff with EDAC MCE */
2599 if (report_gart_errors) 2599 if (report_gart_errors)
2600 amd_report_gart_errors(true); 2600 amd_report_gart_errors(true);
2601 2601
2602 amd_register_ecc_decoder(amd64_decode_bus_error); 2602 amd_register_ecc_decoder(amd64_decode_bus_error);
2603 2603
2604 mcis[nid] = mci; 2604 mcis[nid] = mci;
2605 2605
2606 atomic_inc(&drv_instances); 2606 atomic_inc(&drv_instances);
2607 2607
2608 return 0; 2608 return 0;
2609 2609
2610 err_add_mc: 2610 err_add_mc:
2611 edac_mc_free(mci); 2611 edac_mc_free(mci);
2612 2612
2613 err_siblings: 2613 err_siblings:
2614 free_mc_sibling_devs(pvt); 2614 free_mc_sibling_devs(pvt);
2615 2615
2616 err_free: 2616 err_free:
2617 kfree(pvt); 2617 kfree(pvt);
2618 2618
2619 err_ret: 2619 err_ret:
2620 return ret; 2620 return ret;
2621 } 2621 }
2622 2622
2623 static int __devinit amd64_probe_one_instance(struct pci_dev *pdev, 2623 static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
2624 const struct pci_device_id *mc_type) 2624 const struct pci_device_id *mc_type)
2625 { 2625 {
2626 u8 nid = get_node_id(pdev); 2626 u8 nid = get_node_id(pdev);
2627 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; 2627 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2628 struct ecc_settings *s; 2628 struct ecc_settings *s;
2629 int ret = 0; 2629 int ret = 0;
2630 2630
2631 ret = pci_enable_device(pdev); 2631 ret = pci_enable_device(pdev);
2632 if (ret < 0) { 2632 if (ret < 0) {
2633 debugf0("ret=%d\n", ret); 2633 debugf0("ret=%d\n", ret);
2634 return -EIO; 2634 return -EIO;
2635 } 2635 }
2636 2636
2637 ret = -ENOMEM; 2637 ret = -ENOMEM;
2638 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL); 2638 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2639 if (!s) 2639 if (!s)
2640 goto err_out; 2640 goto err_out;
2641 2641
2642 ecc_stngs[nid] = s; 2642 ecc_stngs[nid] = s;
2643 2643
2644 if (!ecc_enabled(F3, nid)) { 2644 if (!ecc_enabled(F3, nid)) {
2645 ret = -ENODEV; 2645 ret = -ENODEV;
2646 2646
2647 if (!ecc_enable_override) 2647 if (!ecc_enable_override)
2648 goto err_enable; 2648 goto err_enable;
2649 2649
2650 amd64_warn("Forcing ECC on!\n"); 2650 amd64_warn("Forcing ECC on!\n");
2651 2651
2652 if (!enable_ecc_error_reporting(s, nid, F3)) 2652 if (!enable_ecc_error_reporting(s, nid, F3))
2653 goto err_enable; 2653 goto err_enable;
2654 } 2654 }
2655 2655
2656 ret = amd64_init_one_instance(pdev); 2656 ret = amd64_init_one_instance(pdev);
2657 if (ret < 0) { 2657 if (ret < 0) {
2658 amd64_err("Error probing instance: %d\n", nid); 2658 amd64_err("Error probing instance: %d\n", nid);
2659 restore_ecc_error_reporting(s, nid, F3); 2659 restore_ecc_error_reporting(s, nid, F3);
2660 } 2660 }
2661 2661
2662 return ret; 2662 return ret;
2663 2663
2664 err_enable: 2664 err_enable:
2665 kfree(s); 2665 kfree(s);
2666 ecc_stngs[nid] = NULL; 2666 ecc_stngs[nid] = NULL;
2667 2667
2668 err_out: 2668 err_out:
2669 return ret; 2669 return ret;
2670 } 2670 }
2671 2671
2672 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) 2672 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2673 { 2673 {
2674 struct mem_ctl_info *mci; 2674 struct mem_ctl_info *mci;
2675 struct amd64_pvt *pvt; 2675 struct amd64_pvt *pvt;
2676 u8 nid = get_node_id(pdev); 2676 u8 nid = get_node_id(pdev);
2677 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; 2677 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2678 struct ecc_settings *s = ecc_stngs[nid]; 2678 struct ecc_settings *s = ecc_stngs[nid];
2679 2679
2680 /* Remove from EDAC CORE tracking list */ 2680 /* Remove from EDAC CORE tracking list */
2681 mci = edac_mc_del_mc(&pdev->dev); 2681 mci = edac_mc_del_mc(&pdev->dev);
2682 if (!mci) 2682 if (!mci)
2683 return; 2683 return;
2684 2684
2685 pvt = mci->pvt_info; 2685 pvt = mci->pvt_info;
2686 2686
2687 restore_ecc_error_reporting(s, nid, F3); 2687 restore_ecc_error_reporting(s, nid, F3);
2688 2688
2689 free_mc_sibling_devs(pvt); 2689 free_mc_sibling_devs(pvt);
2690 2690
2691 /* unregister from EDAC MCE */ 2691 /* unregister from EDAC MCE */
2692 amd_report_gart_errors(false); 2692 amd_report_gart_errors(false);
2693 amd_unregister_ecc_decoder(amd64_decode_bus_error); 2693 amd_unregister_ecc_decoder(amd64_decode_bus_error);
2694 2694
2695 kfree(ecc_stngs[nid]); 2695 kfree(ecc_stngs[nid]);
2696 ecc_stngs[nid] = NULL; 2696 ecc_stngs[nid] = NULL;
2697 2697
2698 /* Free the EDAC CORE resources */ 2698 /* Free the EDAC CORE resources */
2699 mci->pvt_info = NULL; 2699 mci->pvt_info = NULL;
2700 mcis[nid] = NULL; 2700 mcis[nid] = NULL;
2701 2701
2702 kfree(pvt); 2702 kfree(pvt);
2703 edac_mc_free(mci); 2703 edac_mc_free(mci);
2704 } 2704 }
2705 2705
2706 /* 2706 /*
2707 * This table is part of the interface for loading drivers for PCI devices. The 2707 * This table is part of the interface for loading drivers for PCI devices. The
2708 * PCI core identifies what devices are on a system during boot, and then 2708 * PCI core identifies what devices are on a system during boot, and then
2709 * inquiry this table to see if this driver is for a given device found. 2709 * inquiry this table to see if this driver is for a given device found.
2710 */ 2710 */
2711 static const struct pci_device_id amd64_pci_table[] __devinitdata = { 2711 static const struct pci_device_id amd64_pci_table[] __devinitdata = {
2712 { 2712 {
2713 .vendor = PCI_VENDOR_ID_AMD, 2713 .vendor = PCI_VENDOR_ID_AMD,
2714 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, 2714 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2715 .subvendor = PCI_ANY_ID, 2715 .subvendor = PCI_ANY_ID,
2716 .subdevice = PCI_ANY_ID, 2716 .subdevice = PCI_ANY_ID,
2717 .class = 0, 2717 .class = 0,
2718 .class_mask = 0, 2718 .class_mask = 0,
2719 }, 2719 },
2720 { 2720 {
2721 .vendor = PCI_VENDOR_ID_AMD, 2721 .vendor = PCI_VENDOR_ID_AMD,
2722 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM, 2722 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2723 .subvendor = PCI_ANY_ID, 2723 .subvendor = PCI_ANY_ID,
2724 .subdevice = PCI_ANY_ID, 2724 .subdevice = PCI_ANY_ID,
2725 .class = 0, 2725 .class = 0,
2726 .class_mask = 0, 2726 .class_mask = 0,
2727 }, 2727 },
2728 {0, } 2728 {0, }
2729 }; 2729 };
2730 MODULE_DEVICE_TABLE(pci, amd64_pci_table); 2730 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2731 2731
2732 static struct pci_driver amd64_pci_driver = { 2732 static struct pci_driver amd64_pci_driver = {
2733 .name = EDAC_MOD_STR, 2733 .name = EDAC_MOD_STR,
2734 .probe = amd64_probe_one_instance, 2734 .probe = amd64_probe_one_instance,
2735 .remove = __devexit_p(amd64_remove_one_instance), 2735 .remove = __devexit_p(amd64_remove_one_instance),
2736 .id_table = amd64_pci_table, 2736 .id_table = amd64_pci_table,
2737 }; 2737 };
2738 2738
2739 static void setup_pci_device(void) 2739 static void setup_pci_device(void)
2740 { 2740 {
2741 struct mem_ctl_info *mci; 2741 struct mem_ctl_info *mci;
2742 struct amd64_pvt *pvt; 2742 struct amd64_pvt *pvt;
2743 2743
2744 if (amd64_ctl_pci) 2744 if (amd64_ctl_pci)
2745 return; 2745 return;
2746 2746
2747 mci = mcis[0]; 2747 mci = mcis[0];
2748 if (mci) { 2748 if (mci) {
2749 2749
2750 pvt = mci->pvt_info; 2750 pvt = mci->pvt_info;
2751 amd64_ctl_pci = 2751 amd64_ctl_pci =
2752 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR); 2752 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2753 2753
2754 if (!amd64_ctl_pci) { 2754 if (!amd64_ctl_pci) {
2755 pr_warning("%s(): Unable to create PCI control\n", 2755 pr_warning("%s(): Unable to create PCI control\n",
2756 __func__); 2756 __func__);
2757 2757
2758 pr_warning("%s(): PCI error report via EDAC not set\n", 2758 pr_warning("%s(): PCI error report via EDAC not set\n",
2759 __func__); 2759 __func__);
2760 } 2760 }
2761 } 2761 }
2762 } 2762 }
2763 2763
2764 static int __init amd64_edac_init(void) 2764 static int __init amd64_edac_init(void)
2765 { 2765 {
2766 int err = -ENODEV; 2766 int err = -ENODEV;
2767 2767
2768 edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n"); 2768 edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
2769 2769
2770 opstate_init(); 2770 opstate_init();
2771 2771
2772 if (amd_cache_northbridges() < 0) 2772 if (amd_cache_northbridges() < 0)
2773 goto err_ret; 2773 goto err_ret;
2774 2774
2775 err = -ENOMEM; 2775 err = -ENOMEM;
2776 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL); 2776 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2777 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL); 2777 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2778 if (!(mcis && ecc_stngs)) 2778 if (!(mcis && ecc_stngs))
2779 goto err_ret; 2779 goto err_ret;
2780 2780
2781 msrs = msrs_alloc(); 2781 msrs = msrs_alloc();
2782 if (!msrs) 2782 if (!msrs)
2783 goto err_free; 2783 goto err_free;
2784 2784
2785 err = pci_register_driver(&amd64_pci_driver); 2785 err = pci_register_driver(&amd64_pci_driver);
2786 if (err) 2786 if (err)
2787 goto err_pci; 2787 goto err_pci;
2788 2788
2789 err = -ENODEV; 2789 err = -ENODEV;
2790 if (!atomic_read(&drv_instances)) 2790 if (!atomic_read(&drv_instances))
2791 goto err_no_instances; 2791 goto err_no_instances;
2792 2792
2793 setup_pci_device(); 2793 setup_pci_device();
2794 return 0; 2794 return 0;
2795 2795
2796 err_no_instances: 2796 err_no_instances:
2797 pci_unregister_driver(&amd64_pci_driver); 2797 pci_unregister_driver(&amd64_pci_driver);
2798 2798
2799 err_pci: 2799 err_pci:
2800 msrs_free(msrs); 2800 msrs_free(msrs);
2801 msrs = NULL; 2801 msrs = NULL;
2802 2802
2803 err_free: 2803 err_free:
2804 kfree(mcis); 2804 kfree(mcis);
2805 mcis = NULL; 2805 mcis = NULL;
2806 2806
2807 kfree(ecc_stngs); 2807 kfree(ecc_stngs);
2808 ecc_stngs = NULL; 2808 ecc_stngs = NULL;
2809 2809
2810 err_ret: 2810 err_ret:
2811 return err; 2811 return err;
2812 } 2812 }
2813 2813
2814 static void __exit amd64_edac_exit(void) 2814 static void __exit amd64_edac_exit(void)
2815 { 2815 {
2816 if (amd64_ctl_pci) 2816 if (amd64_ctl_pci)
2817 edac_pci_release_generic_ctl(amd64_ctl_pci); 2817 edac_pci_release_generic_ctl(amd64_ctl_pci);
2818 2818
2819 pci_unregister_driver(&amd64_pci_driver); 2819 pci_unregister_driver(&amd64_pci_driver);
2820 2820
2821 kfree(ecc_stngs); 2821 kfree(ecc_stngs);
2822 ecc_stngs = NULL; 2822 ecc_stngs = NULL;
2823 2823
2824 kfree(mcis); 2824 kfree(mcis);
2825 mcis = NULL; 2825 mcis = NULL;
2826 2826
2827 msrs_free(msrs); 2827 msrs_free(msrs);
2828 msrs = NULL; 2828 msrs = NULL;
2829 } 2829 }
2830 2830
2831 module_init(amd64_edac_init); 2831 module_init(amd64_edac_init);
2832 module_exit(amd64_edac_exit); 2832 module_exit(amd64_edac_exit);
2833 2833
2834 MODULE_LICENSE("GPL"); 2834 MODULE_LICENSE("GPL");
2835 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, " 2835 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
drivers/edac/amd64_edac.h
1 /* 1 /*
2 * AMD64 class Memory Controller kernel module 2 * AMD64 class Memory Controller kernel module
3 * 3 *
4 * Copyright (c) 2009 SoftwareBitMaker. 4 * Copyright (c) 2009 SoftwareBitMaker.
5 * Copyright (c) 2009 Advanced Micro Devices, Inc. 5 * Copyright (c) 2009 Advanced Micro Devices, Inc.
6 * 6 *
7 * This file may be distributed under the terms of the 7 * This file may be distributed under the terms of the
8 * GNU General Public License. 8 * GNU General Public License.
9 * 9 *
10 * Originally Written by Thayne Harbaugh 10 * Originally Written by Thayne Harbaugh
11 * 11 *
12 * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>: 12 * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
13 * - K8 CPU Revision D and greater support 13 * - K8 CPU Revision D and greater support
14 * 14 *
15 * Changes by Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>: 15 * Changes by Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>:
16 * - Module largely rewritten, with new (and hopefully correct) 16 * - Module largely rewritten, with new (and hopefully correct)
17 * code for dealing with node and chip select interleaving, 17 * code for dealing with node and chip select interleaving,
18 * various code cleanup, and bug fixes 18 * various code cleanup, and bug fixes
19 * - Added support for memory hoisting using DRAM hole address 19 * - Added support for memory hoisting using DRAM hole address
20 * register 20 * register
21 * 21 *
22 * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>: 22 * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
23 * -K8 Rev (1207) revision support added, required Revision 23 * -K8 Rev (1207) revision support added, required Revision
24 * specific mini-driver code to support Rev F as well as 24 * specific mini-driver code to support Rev F as well as
25 * prior revisions 25 * prior revisions
26 * 26 *
27 * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>: 27 * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
28 * -Family 10h revision support added. New PCI Device IDs, 28 * -Family 10h revision support added. New PCI Device IDs,
29 * indicating new changes. Actual registers modified 29 * indicating new changes. Actual registers modified
30 * were slight, less than the Rev E to Rev F transition 30 * were slight, less than the Rev E to Rev F transition
31 * but changing the PCI Device ID was the proper thing to 31 * but changing the PCI Device ID was the proper thing to
32 * do, as it provides for almost automactic family 32 * do, as it provides for almost automactic family
33 * detection. The mods to Rev F required more family 33 * detection. The mods to Rev F required more family
34 * information detection. 34 * information detection.
35 * 35 *
36 * Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>: 36 * Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>:
37 * - misc fixes and code cleanups 37 * - misc fixes and code cleanups
38 * 38 *
39 * This module is based on the following documents 39 * This module is based on the following documents
40 * (available from http://www.amd.com/): 40 * (available from http://www.amd.com/):
41 * 41 *
42 * Title: BIOS and Kernel Developer's Guide for AMD Athlon 64 and AMD 42 * Title: BIOS and Kernel Developer's Guide for AMD Athlon 64 and AMD
43 * Opteron Processors 43 * Opteron Processors
44 * AMD publication #: 26094 44 * AMD publication #: 26094
45 *` Revision: 3.26 45 *` Revision: 3.26
46 * 46 *
47 * Title: BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh 47 * Title: BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh
48 * Processors 48 * Processors
49 * AMD publication #: 32559 49 * AMD publication #: 32559
50 * Revision: 3.00 50 * Revision: 3.00
51 * Issue Date: May 2006 51 * Issue Date: May 2006
52 * 52 *
53 * Title: BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h 53 * Title: BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h
54 * Processors 54 * Processors
55 * AMD publication #: 31116 55 * AMD publication #: 31116
56 * Revision: 3.00 56 * Revision: 3.00
57 * Issue Date: September 07, 2007 57 * Issue Date: September 07, 2007
58 * 58 *
59 * Sections in the first 2 documents are no longer in sync with each other. 59 * Sections in the first 2 documents are no longer in sync with each other.
60 * The Family 10h BKDG was totally re-written from scratch with a new 60 * The Family 10h BKDG was totally re-written from scratch with a new
61 * presentation model. 61 * presentation model.
62 * Therefore, comments that refer to a Document section might be off. 62 * Therefore, comments that refer to a Document section might be off.
63 */ 63 */
64 64
65 #include <linux/module.h> 65 #include <linux/module.h>
66 #include <linux/ctype.h> 66 #include <linux/ctype.h>
67 #include <linux/init.h> 67 #include <linux/init.h>
68 #include <linux/pci.h> 68 #include <linux/pci.h>
69 #include <linux/pci_ids.h> 69 #include <linux/pci_ids.h>
70 #include <linux/slab.h> 70 #include <linux/slab.h>
71 #include <linux/mmzone.h> 71 #include <linux/mmzone.h>
72 #include <linux/edac.h> 72 #include <linux/edac.h>
73 #include <asm/msr.h> 73 #include <asm/msr.h>
74 #include "edac_core.h" 74 #include "edac_core.h"
75 #include "mce_amd.h" 75 #include "mce_amd.h"
76 76
77 #define amd64_debug(fmt, arg...) \ 77 #define amd64_debug(fmt, arg...) \
78 edac_printk(KERN_DEBUG, "amd64", fmt, ##arg) 78 edac_printk(KERN_DEBUG, "amd64", fmt, ##arg)
79 79
80 #define amd64_info(fmt, arg...) \ 80 #define amd64_info(fmt, arg...) \
81 edac_printk(KERN_INFO, "amd64", fmt, ##arg) 81 edac_printk(KERN_INFO, "amd64", fmt, ##arg)
82 82
83 #define amd64_notice(fmt, arg...) \ 83 #define amd64_notice(fmt, arg...) \
84 edac_printk(KERN_NOTICE, "amd64", fmt, ##arg) 84 edac_printk(KERN_NOTICE, "amd64", fmt, ##arg)
85 85
86 #define amd64_warn(fmt, arg...) \ 86 #define amd64_warn(fmt, arg...) \
87 edac_printk(KERN_WARNING, "amd64", fmt, ##arg) 87 edac_printk(KERN_WARNING, "amd64", fmt, ##arg)
88 88
89 #define amd64_err(fmt, arg...) \ 89 #define amd64_err(fmt, arg...) \
90 edac_printk(KERN_ERR, "amd64", fmt, ##arg) 90 edac_printk(KERN_ERR, "amd64", fmt, ##arg)
91 91
92 #define amd64_mc_warn(mci, fmt, arg...) \ 92 #define amd64_mc_warn(mci, fmt, arg...) \
93 edac_mc_chipset_printk(mci, KERN_WARNING, "amd64", fmt, ##arg) 93 edac_mc_chipset_printk(mci, KERN_WARNING, "amd64", fmt, ##arg)
94 94
95 #define amd64_mc_err(mci, fmt, arg...) \ 95 #define amd64_mc_err(mci, fmt, arg...) \
96 edac_mc_chipset_printk(mci, KERN_ERR, "amd64", fmt, ##arg) 96 edac_mc_chipset_printk(mci, KERN_ERR, "amd64", fmt, ##arg)
97 97
98 /* 98 /*
99 * Throughout the comments in this code, the following terms are used: 99 * Throughout the comments in this code, the following terms are used:
100 * 100 *
101 * SysAddr, DramAddr, and InputAddr 101 * SysAddr, DramAddr, and InputAddr
102 * 102 *
103 * These terms come directly from the amd64 documentation 103 * These terms come directly from the amd64 documentation
104 * (AMD publication #26094). They are defined as follows: 104 * (AMD publication #26094). They are defined as follows:
105 * 105 *
106 * SysAddr: 106 * SysAddr:
107 * This is a physical address generated by a CPU core or a device 107 * This is a physical address generated by a CPU core or a device
108 * doing DMA. If generated by a CPU core, a SysAddr is the result of 108 * doing DMA. If generated by a CPU core, a SysAddr is the result of
109 * a virtual to physical address translation by the CPU core's address 109 * a virtual to physical address translation by the CPU core's address
110 * translation mechanism (MMU). 110 * translation mechanism (MMU).
111 * 111 *
112 * DramAddr: 112 * DramAddr:
113 * A DramAddr is derived from a SysAddr by subtracting an offset that 113 * A DramAddr is derived from a SysAddr by subtracting an offset that
114 * depends on which node the SysAddr maps to and whether the SysAddr 114 * depends on which node the SysAddr maps to and whether the SysAddr
115 * is within a range affected by memory hoisting. The DRAM Base 115 * is within a range affected by memory hoisting. The DRAM Base
116 * (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers 116 * (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers
117 * determine which node a SysAddr maps to. 117 * determine which node a SysAddr maps to.
118 * 118 *
119 * If the DRAM Hole Address Register (DHAR) is enabled and the SysAddr 119 * If the DRAM Hole Address Register (DHAR) is enabled and the SysAddr
120 * is within the range of addresses specified by this register, then 120 * is within the range of addresses specified by this register, then
121 * a value x from the DHAR is subtracted from the SysAddr to produce a 121 * a value x from the DHAR is subtracted from the SysAddr to produce a
122 * DramAddr. Here, x represents the base address for the node that 122 * DramAddr. Here, x represents the base address for the node that
123 * the SysAddr maps to plus an offset due to memory hoisting. See 123 * the SysAddr maps to plus an offset due to memory hoisting. See
124 * section 3.4.8 and the comments in amd64_get_dram_hole_info() and 124 * section 3.4.8 and the comments in amd64_get_dram_hole_info() and
125 * sys_addr_to_dram_addr() below for more information. 125 * sys_addr_to_dram_addr() below for more information.
126 * 126 *
127 * If the SysAddr is not affected by the DHAR then a value y is 127 * If the SysAddr is not affected by the DHAR then a value y is
128 * subtracted from the SysAddr to produce a DramAddr. Here, y is the 128 * subtracted from the SysAddr to produce a DramAddr. Here, y is the
129 * base address for the node that the SysAddr maps to. See section 129 * base address for the node that the SysAddr maps to. See section
130 * 3.4.4 and the comments in sys_addr_to_dram_addr() below for more 130 * 3.4.4 and the comments in sys_addr_to_dram_addr() below for more
131 * information. 131 * information.
132 * 132 *
133 * InputAddr: 133 * InputAddr:
134 * A DramAddr is translated to an InputAddr before being passed to the 134 * A DramAddr is translated to an InputAddr before being passed to the
135 * memory controller for the node that the DramAddr is associated 135 * memory controller for the node that the DramAddr is associated
136 * with. The memory controller then maps the InputAddr to a csrow. 136 * with. The memory controller then maps the InputAddr to a csrow.
137 * If node interleaving is not in use, then the InputAddr has the same 137 * If node interleaving is not in use, then the InputAddr has the same
138 * value as the DramAddr. Otherwise, the InputAddr is produced by 138 * value as the DramAddr. Otherwise, the InputAddr is produced by
139 * discarding the bits used for node interleaving from the DramAddr. 139 * discarding the bits used for node interleaving from the DramAddr.
140 * See section 3.4.4 for more information. 140 * See section 3.4.4 for more information.
141 * 141 *
142 * The memory controller for a given node uses its DRAM CS Base and 142 * The memory controller for a given node uses its DRAM CS Base and
143 * DRAM CS Mask registers to map an InputAddr to a csrow. See 143 * DRAM CS Mask registers to map an InputAddr to a csrow. See
144 * sections 3.5.4 and 3.5.5 for more information. 144 * sections 3.5.4 and 3.5.5 for more information.
145 */ 145 */
146 146
147 #define EDAC_AMD64_VERSION "v3.3.0" 147 #define EDAC_AMD64_VERSION "v3.3.0"
148 #define EDAC_MOD_STR "amd64_edac" 148 #define EDAC_MOD_STR "amd64_edac"
149 149
150 /* Extended Model from CPUID, for CPU Revision numbers */ 150 /* Extended Model from CPUID, for CPU Revision numbers */
151 #define K8_REV_D 1 151 #define K8_REV_D 1
152 #define K8_REV_E 2 152 #define K8_REV_E 2
153 #define K8_REV_F 4 153 #define K8_REV_F 4
154 154
155 /* Hardware limit on ChipSelect rows per MC and processors per system */ 155 /* Hardware limit on ChipSelect rows per MC and processors per system */
156 #define MAX_CS_COUNT 8 156 #define MAX_CS_COUNT 8
157 #define DRAM_REG_COUNT 8 157 #define DRAM_REG_COUNT 8
158 158
159 #define ON true 159 #define ON true
160 #define OFF false 160 #define OFF false
161 161
162 /* 162 /*
163 * PCI-defined configuration space registers 163 * PCI-defined configuration space registers
164 */ 164 */
165 165
166 166
167 /* 167 /*
168 * Function 1 - Address Map 168 * Function 1 - Address Map
169 */ 169 */
170 #define K8_DRAM_BASE_LOW 0x40 170 #define K8_DRAM_BASE_LOW 0x40
171 #define K8_DRAM_LIMIT_LOW 0x44 171 #define K8_DRAM_LIMIT_LOW 0x44
172 #define K8_DHAR 0xf0 172 #define K8_DHAR 0xf0
173 173
174 #define DHAR_VALID BIT(0) 174 #define DHAR_VALID BIT(0)
175 #define F10_DRAM_MEM_HOIST_VALID BIT(1) 175 #define F10_DRAM_MEM_HOIST_VALID BIT(1)
176 176
177 #define DHAR_BASE_MASK 0xff000000 177 #define DHAR_BASE_MASK 0xff000000
178 #define dhar_base(dhar) (dhar & DHAR_BASE_MASK) 178 #define dhar_base(dhar) (dhar & DHAR_BASE_MASK)
179 179
180 #define K8_DHAR_OFFSET_MASK 0x0000ff00 180 #define K8_DHAR_OFFSET_MASK 0x0000ff00
181 #define k8_dhar_offset(dhar) ((dhar & K8_DHAR_OFFSET_MASK) << 16) 181 #define k8_dhar_offset(dhar) ((dhar & K8_DHAR_OFFSET_MASK) << 16)
182 182
183 #define F10_DHAR_OFFSET_MASK 0x0000ff80 183 #define F10_DHAR_OFFSET_MASK 0x0000ff80
184 /* NOTE: Extra mask bit vs K8 */ 184 /* NOTE: Extra mask bit vs K8 */
185 #define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16) 185 #define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16)
186 186
187 187
188 /* F10 High BASE/LIMIT registers */ 188 /* F10 High BASE/LIMIT registers */
189 #define F10_DRAM_BASE_HIGH 0x140 189 #define F10_DRAM_BASE_HIGH 0x140
190 #define F10_DRAM_LIMIT_HIGH 0x144 190 #define F10_DRAM_LIMIT_HIGH 0x144
191 191
192 192
193 /* 193 /*
194 * Function 2 - DRAM controller 194 * Function 2 - DRAM controller
195 */ 195 */
196 #define K8_DCSB0 0x40 196 #define K8_DCSB0 0x40
197 #define F10_DCSB1 0x140 197 #define F10_DCSB1 0x140
198 198
199 #define K8_DCSB_CS_ENABLE BIT(0) 199 #define K8_DCSB_CS_ENABLE BIT(0)
200 #define K8_DCSB_NPT_SPARE BIT(1) 200 #define K8_DCSB_NPT_SPARE BIT(1)
201 #define K8_DCSB_NPT_TESTFAIL BIT(2) 201 #define K8_DCSB_NPT_TESTFAIL BIT(2)
202 202
203 /* 203 /*
204 * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form 204 * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form
205 * the address 205 * the address
206 */ 206 */
207 #define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL) 207 #define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL)
208 #define REV_E_DCS_SHIFT 4 208 #define REV_E_DCS_SHIFT 4
209 209
210 #define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL) 210 #define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL)
211 #define REV_F_F1Xh_DCS_SHIFT 8 211 #define REV_F_F1Xh_DCS_SHIFT 8
212 212
213 /* 213 /*
214 * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount 214 * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount
215 * to form the address 215 * to form the address
216 */ 216 */
217 #define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL) 217 #define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL)
218 #define REV_F_DCS_SHIFT 8 218 #define REV_F_DCS_SHIFT 8
219 219
220 /* DRAM CS Mask Registers */ 220 /* DRAM CS Mask Registers */
221 #define K8_DCSM0 0x60 221 #define K8_DCSM0 0x60
222 #define F10_DCSM1 0x160 222 #define F10_DCSM1 0x160
223 223
224 /* REV E: select [29:21] and [15:9] from DCSM */ 224 /* REV E: select [29:21] and [15:9] from DCSM */
225 #define REV_E_DCSM_MASK_BITS 0x3FE0FE00 225 #define REV_E_DCSM_MASK_BITS 0x3FE0FE00
226 226
227 /* unused bits [24:20] and [12:0] */ 227 /* unused bits [24:20] and [12:0] */
228 #define REV_E_DCS_NOTUSED_BITS 0x01F01FFF 228 #define REV_E_DCS_NOTUSED_BITS 0x01F01FFF
229 229
230 /* REV F and later: select [28:19] and [13:5] from DCSM */ 230 /* REV F and later: select [28:19] and [13:5] from DCSM */
231 #define REV_F_F1Xh_DCSM_MASK_BITS 0x1FF83FE0 231 #define REV_F_F1Xh_DCSM_MASK_BITS 0x1FF83FE0
232 232
233 /* unused bits [26:22] and [12:0] */ 233 /* unused bits [26:22] and [12:0] */
234 #define REV_F_F1Xh_DCS_NOTUSED_BITS 0x07C01FFF 234 #define REV_F_F1Xh_DCS_NOTUSED_BITS 0x07C01FFF
235 235
236 #define DBAM0 0x80 236 #define DBAM0 0x80
237 #define DBAM1 0x180 237 #define DBAM1 0x180
238 238
239 /* Extract the DIMM 'type' on the i'th DIMM from the DBAM reg value passed */ 239 /* Extract the DIMM 'type' on the i'th DIMM from the DBAM reg value passed */
240 #define DBAM_DIMM(i, reg) ((((reg) >> (4*i))) & 0xF) 240 #define DBAM_DIMM(i, reg) ((((reg) >> (4*i))) & 0xF)
241 241
242 #define DBAM_MAX_VALUE 11 242 #define DBAM_MAX_VALUE 11
243 243
244 244
245 #define F10_DCLR_0 0x90 245 #define F10_DCLR_0 0x90
246 #define F10_DCLR_1 0x190 246 #define F10_DCLR_1 0x190
247 #define REVE_WIDTH_128 BIT(16) 247 #define REVE_WIDTH_128 BIT(16)
248 #define F10_WIDTH_128 BIT(11) 248 #define F10_WIDTH_128 BIT(11)
249 249
250 250
251 #define F10_DCHR_0 0x94 251 #define F10_DCHR_0 0x94
252 #define F10_DCHR_1 0x194 252 #define F10_DCHR_1 0x194
253 253
254 #define F10_DCHR_FOUR_RANK_DIMM BIT(18) 254 #define F10_DCHR_FOUR_RANK_DIMM BIT(18)
255 #define DDR3_MODE BIT(8) 255 #define DDR3_MODE BIT(8)
256 #define F10_DCHR_MblMode BIT(6) 256 #define F10_DCHR_MblMode BIT(6)
257 257
258 258
259 #define F10_DCTL_SEL_LOW 0x110 259 #define F10_DCTL_SEL_LOW 0x110
260 #define dct_sel_baseaddr(pvt) ((pvt->dram_ctl_select_low) & 0xFFFFF800) 260 #define dct_sel_baseaddr(pvt) ((pvt->dram_ctl_select_low) & 0xFFFFF800)
261 #define dct_sel_interleave_addr(pvt) (((pvt->dram_ctl_select_low) >> 6) & 0x3) 261 #define dct_sel_interleave_addr(pvt) (((pvt->dram_ctl_select_low) >> 6) & 0x3)
262 #define dct_high_range_enabled(pvt) (pvt->dram_ctl_select_low & BIT(0)) 262 #define dct_high_range_enabled(pvt) (pvt->dram_ctl_select_low & BIT(0))
263 #define dct_interleave_enabled(pvt) (pvt->dram_ctl_select_low & BIT(2)) 263 #define dct_interleave_enabled(pvt) (pvt->dram_ctl_select_low & BIT(2))
264 #define dct_ganging_enabled(pvt) (pvt->dram_ctl_select_low & BIT(4)) 264 #define dct_ganging_enabled(pvt) (pvt->dram_ctl_select_low & BIT(4))
265 #define dct_data_intlv_enabled(pvt) (pvt->dram_ctl_select_low & BIT(5)) 265 #define dct_data_intlv_enabled(pvt) (pvt->dram_ctl_select_low & BIT(5))
266 #define dct_dram_enabled(pvt) (pvt->dram_ctl_select_low & BIT(8)) 266 #define dct_dram_enabled(pvt) (pvt->dram_ctl_select_low & BIT(8))
267 #define dct_memory_cleared(pvt) (pvt->dram_ctl_select_low & BIT(10)) 267 #define dct_memory_cleared(pvt) (pvt->dram_ctl_select_low & BIT(10))
268 268
269 #define F10_DCTL_SEL_HIGH 0x114 269 #define F10_DCTL_SEL_HIGH 0x114
270 270
271 /* 271 /*
272 * Function 3 - Misc Control 272 * Function 3 - Misc Control
273 */ 273 */
274 #define K8_NBCTL 0x40 274 #define K8_NBCTL 0x40
275 275
276 /* Correctable ECC error reporting enable */ 276 /* Correctable ECC error reporting enable */
277 #define K8_NBCTL_CECCEn BIT(0) 277 #define K8_NBCTL_CECCEn BIT(0)
278 278
279 /* UnCorrectable ECC error reporting enable */ 279 /* UnCorrectable ECC error reporting enable */
280 #define K8_NBCTL_UECCEn BIT(1) 280 #define K8_NBCTL_UECCEn BIT(1)
281 281
282 #define K8_NBCFG 0x44 282 #define K8_NBCFG 0x44
283 #define K8_NBCFG_CHIPKILL BIT(23) 283 #define K8_NBCFG_CHIPKILL BIT(23)
284 #define K8_NBCFG_ECC_ENABLE BIT(22) 284 #define K8_NBCFG_ECC_ENABLE BIT(22)
285 285
286 #define K8_NBSL 0x48 286 #define K8_NBSL 0x48
287 287
288 288
289 /* Family F10h: Normalized Extended Error Codes */ 289 /* Family F10h: Normalized Extended Error Codes */
290 #define F10_NBSL_EXT_ERR_RES 0x0 290 #define F10_NBSL_EXT_ERR_RES 0x0
291 #define F10_NBSL_EXT_ERR_ECC 0x8 291 #define F10_NBSL_EXT_ERR_ECC 0x8
292 292
293 /* Next two are overloaded values */ 293 /* Next two are overloaded values */
294 #define F10_NBSL_EXT_ERR_LINK_PROTO 0xB 294 #define F10_NBSL_EXT_ERR_LINK_PROTO 0xB
295 #define F10_NBSL_EXT_ERR_L3_PROTO 0xB 295 #define F10_NBSL_EXT_ERR_L3_PROTO 0xB
296 296
297 #define F10_NBSL_EXT_ERR_NB_ARRAY 0xC 297 #define F10_NBSL_EXT_ERR_NB_ARRAY 0xC
298 #define F10_NBSL_EXT_ERR_DRAM_PARITY 0xD 298 #define F10_NBSL_EXT_ERR_DRAM_PARITY 0xD
299 #define F10_NBSL_EXT_ERR_LINK_RETRY 0xE 299 #define F10_NBSL_EXT_ERR_LINK_RETRY 0xE
300 300
301 /* Next two are overloaded values */ 301 /* Next two are overloaded values */
302 #define F10_NBSL_EXT_ERR_GART_WALK 0xF 302 #define F10_NBSL_EXT_ERR_GART_WALK 0xF
303 #define F10_NBSL_EXT_ERR_DEV_WALK 0xF 303 #define F10_NBSL_EXT_ERR_DEV_WALK 0xF
304 304
305 /* 0x10 to 0x1B: Reserved */ 305 /* 0x10 to 0x1B: Reserved */
306 #define F10_NBSL_EXT_ERR_L3_DATA 0x1C 306 #define F10_NBSL_EXT_ERR_L3_DATA 0x1C
307 #define F10_NBSL_EXT_ERR_L3_TAG 0x1D 307 #define F10_NBSL_EXT_ERR_L3_TAG 0x1D
308 #define F10_NBSL_EXT_ERR_L3_LRU 0x1E 308 #define F10_NBSL_EXT_ERR_L3_LRU 0x1E
309 309
310 /* K8: Normalized Extended Error Codes */ 310 /* K8: Normalized Extended Error Codes */
311 #define K8_NBSL_EXT_ERR_ECC 0x0 311 #define K8_NBSL_EXT_ERR_ECC 0x0
312 #define K8_NBSL_EXT_ERR_CRC 0x1 312 #define K8_NBSL_EXT_ERR_CRC 0x1
313 #define K8_NBSL_EXT_ERR_SYNC 0x2 313 #define K8_NBSL_EXT_ERR_SYNC 0x2
314 #define K8_NBSL_EXT_ERR_MST 0x3 314 #define K8_NBSL_EXT_ERR_MST 0x3
315 #define K8_NBSL_EXT_ERR_TGT 0x4 315 #define K8_NBSL_EXT_ERR_TGT 0x4
316 #define K8_NBSL_EXT_ERR_GART 0x5 316 #define K8_NBSL_EXT_ERR_GART 0x5
317 #define K8_NBSL_EXT_ERR_RMW 0x6 317 #define K8_NBSL_EXT_ERR_RMW 0x6
318 #define K8_NBSL_EXT_ERR_WDT 0x7 318 #define K8_NBSL_EXT_ERR_WDT 0x7
319 #define K8_NBSL_EXT_ERR_CHIPKILL_ECC 0x8 319 #define K8_NBSL_EXT_ERR_CHIPKILL_ECC 0x8
320 #define K8_NBSL_EXT_ERR_DRAM_PARITY 0xD 320 #define K8_NBSL_EXT_ERR_DRAM_PARITY 0xD
321 321
322 /* 322 /*
323 * The following are for BUS type errors AFTER values have been normalized by 323 * The following are for BUS type errors AFTER values have been normalized by
324 * shifting right 324 * shifting right
325 */ 325 */
326 #define K8_NBSL_PP_SRC 0x0 326 #define K8_NBSL_PP_SRC 0x0
327 #define K8_NBSL_PP_RES 0x1 327 #define K8_NBSL_PP_RES 0x1
328 #define K8_NBSL_PP_OBS 0x2 328 #define K8_NBSL_PP_OBS 0x2
329 #define K8_NBSL_PP_GENERIC 0x3 329 #define K8_NBSL_PP_GENERIC 0x3
330 330
331 #define EXTRACT_ERR_CPU_MAP(x) ((x) & 0xF) 331 #define EXTRACT_ERR_CPU_MAP(x) ((x) & 0xF)
332 332
333 #define K8_NBEAL 0x50 333 #define K8_NBEAL 0x50
334 #define K8_NBEAH 0x54 334 #define K8_NBEAH 0x54
335 #define K8_SCRCTRL 0x58 335 #define K8_SCRCTRL 0x58
336 336
337 #define F10_NB_CFG_LOW 0x88 337 #define F10_NB_CFG_LOW 0x88
338 338
339 #define F10_ONLINE_SPARE 0xB0 339 #define F10_ONLINE_SPARE 0xB0
340 #define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1)) 340 #define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1))
341 #define F10_ONLINE_SPARE_SWAPDONE1(x) ((x) & BIT(3)) 341 #define F10_ONLINE_SPARE_SWAPDONE1(x) ((x) & BIT(3))
342 #define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007) 342 #define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007)
343 #define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007) 343 #define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007)
344 344
345 #define F10_NB_ARRAY_ADDR 0xB8 345 #define F10_NB_ARRAY_ADDR 0xB8
346 346
347 #define F10_NB_ARRAY_DRAM_ECC 0x80000000 347 #define F10_NB_ARRAY_DRAM_ECC 0x80000000
348 348
349 /* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */ 349 /* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */
350 #define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1) 350 #define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1)
351 351
352 #define F10_NB_ARRAY_DATA 0xBC 352 #define F10_NB_ARRAY_DATA 0xBC
353 353
354 #define SET_NB_DRAM_INJECTION_WRITE(word, bits) \ 354 #define SET_NB_DRAM_INJECTION_WRITE(word, bits) \
355 (BIT(((word) & 0xF) + 20) | \ 355 (BIT(((word) & 0xF) + 20) | \
356 BIT(17) | bits) 356 BIT(17) | bits)
357 357
358 #define SET_NB_DRAM_INJECTION_READ(word, bits) \ 358 #define SET_NB_DRAM_INJECTION_READ(word, bits) \
359 (BIT(((word) & 0xF) + 20) | \ 359 (BIT(((word) & 0xF) + 20) | \
360 BIT(16) | bits) 360 BIT(16) | bits)
361 361
362 #define K8_NBCAP 0xE8 362 #define K8_NBCAP 0xE8
363 #define K8_NBCAP_CORES (BIT(12)|BIT(13)) 363 #define K8_NBCAP_CORES (BIT(12)|BIT(13))
364 #define K8_NBCAP_CHIPKILL BIT(4) 364 #define K8_NBCAP_CHIPKILL BIT(4)
365 #define K8_NBCAP_SECDED BIT(3) 365 #define K8_NBCAP_SECDED BIT(3)
366 #define K8_NBCAP_DCT_DUAL BIT(0) 366 #define K8_NBCAP_DCT_DUAL BIT(0)
367 367
368 #define EXT_NB_MCA_CFG 0x180 368 #define EXT_NB_MCA_CFG 0x180
369 369
370 /* MSRs */ 370 /* MSRs */
371 #define K8_MSR_MCGCTL_NBE BIT(4) 371 #define K8_MSR_MCGCTL_NBE BIT(4)
372 372
373 #define K8_MSR_MC4CTL 0x0410 373 #define K8_MSR_MC4CTL 0x0410
374 #define K8_MSR_MC4STAT 0x0411 374 #define K8_MSR_MC4STAT 0x0411
375 #define K8_MSR_MC4ADDR 0x0412 375 #define K8_MSR_MC4ADDR 0x0412
376 376
377 /* AMD sets the first MC device at device ID 0x18. */ 377 /* AMD sets the first MC device at device ID 0x18. */
378 static inline int get_node_id(struct pci_dev *pdev) 378 static inline int get_node_id(struct pci_dev *pdev)
379 { 379 {
380 return PCI_SLOT(pdev->devfn) - 0x18; 380 return PCI_SLOT(pdev->devfn) - 0x18;
381 } 381 }
382 382
383 enum amd64_chipset_families { 383 enum amd64_chipset_families {
384 K8_CPUS = 0, 384 K8_CPUS = 0,
385 F10_CPUS, 385 F10_CPUS,
386 }; 386 };
387 387
388 /* Error injection control structure */ 388 /* Error injection control structure */
389 struct error_injection { 389 struct error_injection {
390 u32 section; 390 u32 section;
391 u32 word; 391 u32 word;
392 u32 bit_map; 392 u32 bit_map;
393 }; 393 };
394 394
395 struct amd64_pvt { 395 struct amd64_pvt {
396 struct low_ops *ops; 396 struct low_ops *ops;
397 397
398 /* pci_device handles which we utilize */ 398 /* pci_device handles which we utilize */
399 struct pci_dev *F1, *F2, *F3; 399 struct pci_dev *F1, *F2, *F3;
400 400
401 int mc_node_id; /* MC index of this MC node */ 401 int mc_node_id; /* MC index of this MC node */
402 int ext_model; /* extended model value of this node */ 402 int ext_model; /* extended model value of this node */
403 int channel_count; 403 int channel_count;
404 404
405 /* Raw registers */ 405 /* Raw registers */
406 u32 dclr0; /* DRAM Configuration Low DCT0 reg */ 406 u32 dclr0; /* DRAM Configuration Low DCT0 reg */
407 u32 dclr1; /* DRAM Configuration Low DCT1 reg */ 407 u32 dclr1; /* DRAM Configuration Low DCT1 reg */
408 u32 dchr0; /* DRAM Configuration High DCT0 reg */ 408 u32 dchr0; /* DRAM Configuration High DCT0 reg */
409 u32 dchr1; /* DRAM Configuration High DCT1 reg */ 409 u32 dchr1; /* DRAM Configuration High DCT1 reg */
410 u32 nbcap; /* North Bridge Capabilities */ 410 u32 nbcap; /* North Bridge Capabilities */
411 u32 nbcfg; /* F10 North Bridge Configuration */ 411 u32 nbcfg; /* F10 North Bridge Configuration */
412 u32 ext_nbcfg; /* Extended F10 North Bridge Configuration */ 412 u32 ext_nbcfg; /* Extended F10 North Bridge Configuration */
413 u32 dhar; /* DRAM Hoist reg */ 413 u32 dhar; /* DRAM Hoist reg */
414 u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */ 414 u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
415 u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ 415 u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
416 416
417 /* DRAM CS Base Address Registers F2x[1,0][5C:40] */ 417 /* DRAM CS Base Address Registers F2x[1,0][5C:40] */
418 u32 dcsb0[MAX_CS_COUNT]; 418 u32 dcsb0[MAX_CS_COUNT];
419 u32 dcsb1[MAX_CS_COUNT]; 419 u32 dcsb1[MAX_CS_COUNT];
420 420
421 /* DRAM CS Mask Registers F2x[1,0][6C:60] */ 421 /* DRAM CS Mask Registers F2x[1,0][6C:60] */
422 u32 dcsm0[MAX_CS_COUNT]; 422 u32 dcsm0[MAX_CS_COUNT];
423 u32 dcsm1[MAX_CS_COUNT]; 423 u32 dcsm1[MAX_CS_COUNT];
424 424
425 /* 425 /*
426 * Decoded parts of DRAM BASE and LIMIT Registers 426 * Decoded parts of DRAM BASE and LIMIT Registers
427 * F1x[78,70,68,60,58,50,48,40] 427 * F1x[78,70,68,60,58,50,48,40]
428 */ 428 */
429 u64 dram_base[DRAM_REG_COUNT]; 429 u64 dram_base[DRAM_REG_COUNT];
430 u64 dram_limit[DRAM_REG_COUNT]; 430 u64 dram_limit[DRAM_REG_COUNT];
431 u8 dram_IntlvSel[DRAM_REG_COUNT]; 431 u8 dram_IntlvSel[DRAM_REG_COUNT];
432 u8 dram_IntlvEn[DRAM_REG_COUNT]; 432 u8 dram_IntlvEn[DRAM_REG_COUNT];
433 u8 dram_DstNode[DRAM_REG_COUNT]; 433 u8 dram_DstNode[DRAM_REG_COUNT];
434 u8 dram_rw_en[DRAM_REG_COUNT]; 434 u8 dram_rw_en[DRAM_REG_COUNT];
435 435
436 /* 436 /*
437 * The following fields are set at (load) run time, after CPU revision 437 * The following fields are set at (load) run time, after CPU revision
438 * has been determined, since the dct_base and dct_mask registers vary 438 * has been determined, since the dct_base and dct_mask registers vary
439 * based on revision 439 * based on revision
440 */ 440 */
441 u32 dcsb_base; /* DCSB base bits */ 441 u32 dcsb_base; /* DCSB base bits */
442 u32 dcsm_mask; /* DCSM mask bits */ 442 u32 dcsm_mask; /* DCSM mask bits */
443 u32 cs_count; /* num chip selects (== num DCSB registers) */ 443 u32 cs_count; /* num chip selects (== num DCSB registers) */
444 u32 num_dcsm; /* Number of DCSM registers */ 444 u32 num_dcsm; /* Number of DCSM registers */
445 u32 dcs_mask_notused; /* DCSM notused mask bits */ 445 u32 dcs_mask_notused; /* DCSM notused mask bits */
446 u32 dcs_shift; /* DCSB and DCSM shift value */ 446 u32 dcs_shift; /* DCSB and DCSM shift value */
447 447
448 u64 top_mem; /* top of memory below 4GB */ 448 u64 top_mem; /* top of memory below 4GB */
449 u64 top_mem2; /* top of memory above 4GB */ 449 u64 top_mem2; /* top of memory above 4GB */
450 450
451 u32 dram_ctl_select_low; /* DRAM Controller Select Low Reg */ 451 u32 dram_ctl_select_low; /* DRAM Controller Select Low Reg */
452 u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */ 452 u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */
453 u32 online_spare; /* On-Line spare Reg */ 453 u32 online_spare; /* On-Line spare Reg */
454 454
455 /* x4 or x8 syndromes in use */ 455 /* x4 or x8 syndromes in use */
456 u8 syn_type; 456 u8 syn_type;
457 457
458 /* temp storage for when input is received from sysfs */ 458 /* temp storage for when input is received from sysfs */
459 struct err_regs ctl_error_info; 459 struct err_regs ctl_error_info;
460 460
461 /* place to store error injection parameters prior to issue */ 461 /* place to store error injection parameters prior to issue */
462 struct error_injection injection; 462 struct error_injection injection;
463 463
464 /* DCT per-family scrubrate setting */ 464 /* DCT per-family scrubrate setting */
465 u32 min_scrubrate; 465 u32 min_scrubrate;
466 466
467 /* family name this instance is running on */ 467 /* family name this instance is running on */
468 const char *ctl_name; 468 const char *ctl_name;
469 469
470 }; 470 };
471 471
472 /* 472 /*
473 * per-node ECC settings descriptor 473 * per-node ECC settings descriptor
474 */ 474 */
475 struct ecc_settings { 475 struct ecc_settings {
476 u32 old_nbctl; 476 u32 old_nbctl;
477 bool nbctl_valid; 477 bool nbctl_valid;
478 478
479 struct flags { 479 struct flags {
480 unsigned long nb_mce_enable:1; 480 unsigned long nb_mce_enable:1;
481 unsigned long nb_ecc_prev:1; 481 unsigned long nb_ecc_prev:1;
482 } flags; 482 } flags;
483 }; 483 };
484 484
485 struct scrubrate {
486 u32 scrubval; /* bit pattern for scrub rate */
487 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
488 };
489
490 extern struct scrubrate scrubrates[23];
491 extern const char *tt_msgs[4]; 485 extern const char *tt_msgs[4];
492 extern const char *ll_msgs[4]; 486 extern const char *ll_msgs[4];
493 extern const char *rrrr_msgs[16]; 487 extern const char *rrrr_msgs[16];
494 extern const char *to_msgs[2]; 488 extern const char *to_msgs[2];
495 extern const char *pp_msgs[4]; 489 extern const char *pp_msgs[4];
496 extern const char *ii_msgs[4]; 490 extern const char *ii_msgs[4];
497 extern const char *htlink_msgs[8]; 491 extern const char *htlink_msgs[8];
498 492
499 #ifdef CONFIG_EDAC_DEBUG 493 #ifdef CONFIG_EDAC_DEBUG
500 #define NUM_DBG_ATTRS 5 494 #define NUM_DBG_ATTRS 5
501 #else 495 #else
502 #define NUM_DBG_ATTRS 0 496 #define NUM_DBG_ATTRS 0
503 #endif 497 #endif
504 498
505 #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION 499 #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
506 #define NUM_INJ_ATTRS 5 500 #define NUM_INJ_ATTRS 5
507 #else 501 #else
508 #define NUM_INJ_ATTRS 0 502 #define NUM_INJ_ATTRS 0
509 #endif 503 #endif
510 504
511 extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS], 505 extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
512 amd64_inj_attrs[NUM_INJ_ATTRS]; 506 amd64_inj_attrs[NUM_INJ_ATTRS];
513 507
514 /* 508 /*
515 * Each of the PCI Device IDs types have their own set of hardware accessor 509 * Each of the PCI Device IDs types have their own set of hardware accessor
516 * functions and per device encoding/decoding logic. 510 * functions and per device encoding/decoding logic.
517 */ 511 */
518 struct low_ops { 512 struct low_ops {
519 int (*early_channel_count) (struct amd64_pvt *pvt); 513 int (*early_channel_count) (struct amd64_pvt *pvt);
520 514
521 u64 (*get_error_address) (struct mem_ctl_info *mci, 515 u64 (*get_error_address) (struct mem_ctl_info *mci,
522 struct err_regs *info); 516 struct err_regs *info);
523 void (*read_dram_base_limit) (struct amd64_pvt *pvt, int dram); 517 void (*read_dram_base_limit) (struct amd64_pvt *pvt, int dram);
524 void (*read_dram_ctl_register) (struct amd64_pvt *pvt); 518 void (*read_dram_ctl_register) (struct amd64_pvt *pvt);
525 void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, 519 void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci,
526 struct err_regs *info, u64 SystemAddr); 520 struct err_regs *info, u64 SystemAddr);
527 int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode); 521 int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode);
528 }; 522 };
529 523
530 struct amd64_family_type { 524 struct amd64_family_type {
531 const char *ctl_name; 525 const char *ctl_name;
532 u16 f1_id, f3_id; 526 u16 f1_id, f3_id;
533 struct low_ops ops; 527 struct low_ops ops;
534 }; 528 };
535 529
536 static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, 530 static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
537 u32 *val, const char *func) 531 u32 *val, const char *func)
538 { 532 {
539 int err = 0; 533 int err = 0;
540 534
541 err = pci_read_config_dword(pdev, offset, val); 535 err = pci_read_config_dword(pdev, offset, val);
542 if (err) 536 if (err)
543 amd64_warn("%s: error reading F%dx%x.\n", 537 amd64_warn("%s: error reading F%dx%x.\n",
544 func, PCI_FUNC(pdev->devfn), offset); 538 func, PCI_FUNC(pdev->devfn), offset);
545 539
546 return err; 540 return err;
547 } 541 }
548 542
549 #define amd64_read_pci_cfg(pdev, offset, val) \ 543 #define amd64_read_pci_cfg(pdev, offset, val) \
550 amd64_read_pci_cfg_dword(pdev, offset, val, __func__) 544 amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
551 545
552 /* 546 /*
553 * For future CPU versions, verify the following as new 'slow' rates appear and 547 * For future CPU versions, verify the following as new 'slow' rates appear and
554 * modify the necessary skip values for the supported CPU. 548 * modify the necessary skip values for the supported CPU.
555 */ 549 */
556 #define K8_MIN_SCRUB_RATE_BITS 0x0 550 #define K8_MIN_SCRUB_RATE_BITS 0x0
557 #define F10_MIN_SCRUB_RATE_BITS 0x5 551 #define F10_MIN_SCRUB_RATE_BITS 0x5
558 552
559 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, 553 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
560 u64 *hole_offset, u64 *hole_size); 554 u64 *hole_offset, u64 *hole_size);
561 555
drivers/edac/cpc925_edac.c
1 /* 1 /*
2 * cpc925_edac.c, EDAC driver for IBM CPC925 Bridge and Memory Controller. 2 * cpc925_edac.c, EDAC driver for IBM CPC925 Bridge and Memory Controller.
3 * 3 *
4 * Copyright (c) 2008 Wind River Systems, Inc. 4 * Copyright (c) 2008 Wind River Systems, Inc.
5 * 5 *
6 * Authors: Cao Qingtao <qingtao.cao@windriver.com> 6 * Authors: Cao Qingtao <qingtao.cao@windriver.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 * 11 *
12 * This program is distributed in the hope that it will be useful, 12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 * See the GNU General Public License for more details. 15 * See the GNU General Public License for more details.
16 * 16 *
17 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software 18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21 21
22 #include <linux/module.h> 22 #include <linux/module.h>
23 #include <linux/init.h> 23 #include <linux/init.h>
24 #include <linux/io.h> 24 #include <linux/io.h>
25 #include <linux/edac.h> 25 #include <linux/edac.h>
26 #include <linux/of.h> 26 #include <linux/of.h>
27 #include <linux/platform_device.h> 27 #include <linux/platform_device.h>
28 #include <linux/gfp.h> 28 #include <linux/gfp.h>
29 29
30 #include "edac_core.h" 30 #include "edac_core.h"
31 #include "edac_module.h" 31 #include "edac_module.h"
32 32
33 #define CPC925_EDAC_REVISION " Ver: 1.0.0 " __DATE__ 33 #define CPC925_EDAC_REVISION " Ver: 1.0.0 " __DATE__
34 #define CPC925_EDAC_MOD_STR "cpc925_edac" 34 #define CPC925_EDAC_MOD_STR "cpc925_edac"
35 35
36 #define cpc925_printk(level, fmt, arg...) \ 36 #define cpc925_printk(level, fmt, arg...) \
37 edac_printk(level, "CPC925", fmt, ##arg) 37 edac_printk(level, "CPC925", fmt, ##arg)
38 38
39 #define cpc925_mc_printk(mci, level, fmt, arg...) \ 39 #define cpc925_mc_printk(mci, level, fmt, arg...) \
40 edac_mc_chipset_printk(mci, level, "CPC925", fmt, ##arg) 40 edac_mc_chipset_printk(mci, level, "CPC925", fmt, ##arg)
41 41
42 /* 42 /*
43 * CPC925 registers are of 32 bits with bit0 defined at the 43 * CPC925 registers are of 32 bits with bit0 defined at the
44 * most significant bit and bit31 at that of least significant. 44 * most significant bit and bit31 at that of least significant.
45 */ 45 */
46 #define CPC925_BITS_PER_REG 32 46 #define CPC925_BITS_PER_REG 32
47 #define CPC925_BIT(nr) (1UL << (CPC925_BITS_PER_REG - 1 - nr)) 47 #define CPC925_BIT(nr) (1UL << (CPC925_BITS_PER_REG - 1 - nr))
48 48
49 /* 49 /*
50 * EDAC device names for the error detections of 50 * EDAC device names for the error detections of
51 * CPU Interface and Hypertransport Link. 51 * CPU Interface and Hypertransport Link.
52 */ 52 */
53 #define CPC925_CPU_ERR_DEV "cpu" 53 #define CPC925_CPU_ERR_DEV "cpu"
54 #define CPC925_HT_LINK_DEV "htlink" 54 #define CPC925_HT_LINK_DEV "htlink"
55 55
56 /* Suppose DDR Refresh cycle is 15.6 microsecond */ 56 /* Suppose DDR Refresh cycle is 15.6 microsecond */
57 #define CPC925_REF_FREQ 0xFA69 57 #define CPC925_REF_FREQ 0xFA69
58 #define CPC925_SCRUB_BLOCK_SIZE 64 /* bytes */ 58 #define CPC925_SCRUB_BLOCK_SIZE 64 /* bytes */
59 #define CPC925_NR_CSROWS 8 59 #define CPC925_NR_CSROWS 8
60 60
61 /* 61 /*
62 * All registers and bits definitions are taken from 62 * All registers and bits definitions are taken from
63 * "CPC925 Bridge and Memory Controller User Manual, SA14-2761-02". 63 * "CPC925 Bridge and Memory Controller User Manual, SA14-2761-02".
64 */ 64 */
65 65
66 /* 66 /*
67 * CPU and Memory Controller Registers 67 * CPU and Memory Controller Registers
68 */ 68 */
69 /************************************************************ 69 /************************************************************
70 * Processor Interface Exception Mask Register (APIMASK) 70 * Processor Interface Exception Mask Register (APIMASK)
71 ************************************************************/ 71 ************************************************************/
72 #define REG_APIMASK_OFFSET 0x30070 72 #define REG_APIMASK_OFFSET 0x30070
73 enum apimask_bits { 73 enum apimask_bits {
74 APIMASK_DART = CPC925_BIT(0), /* DART Exception */ 74 APIMASK_DART = CPC925_BIT(0), /* DART Exception */
75 APIMASK_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */ 75 APIMASK_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */
76 APIMASK_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */ 76 APIMASK_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */
77 APIMASK_STAT = CPC925_BIT(3), /* Status Exception */ 77 APIMASK_STAT = CPC925_BIT(3), /* Status Exception */
78 APIMASK_DERR = CPC925_BIT(4), /* Data Error Exception */ 78 APIMASK_DERR = CPC925_BIT(4), /* Data Error Exception */
79 APIMASK_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */ 79 APIMASK_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */
80 APIMASK_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */ 80 APIMASK_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */
81 /* BIT(7) Reserved */ 81 /* BIT(7) Reserved */
82 APIMASK_ECC_UE_H = CPC925_BIT(8), /* UECC upper */ 82 APIMASK_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
83 APIMASK_ECC_CE_H = CPC925_BIT(9), /* CECC upper */ 83 APIMASK_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
84 APIMASK_ECC_UE_L = CPC925_BIT(10), /* UECC lower */ 84 APIMASK_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
85 APIMASK_ECC_CE_L = CPC925_BIT(11), /* CECC lower */ 85 APIMASK_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
86 86
87 CPU_MASK_ENABLE = (APIMASK_DART | APIMASK_ADI0 | APIMASK_ADI1 | 87 CPU_MASK_ENABLE = (APIMASK_DART | APIMASK_ADI0 | APIMASK_ADI1 |
88 APIMASK_STAT | APIMASK_DERR | APIMASK_ADRS0 | 88 APIMASK_STAT | APIMASK_DERR | APIMASK_ADRS0 |
89 APIMASK_ADRS1), 89 APIMASK_ADRS1),
90 ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H | 90 ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H |
91 APIMASK_ECC_UE_L | APIMASK_ECC_CE_L), 91 APIMASK_ECC_UE_L | APIMASK_ECC_CE_L),
92 }; 92 };
93 93
94 /************************************************************ 94 /************************************************************
95 * Processor Interface Exception Register (APIEXCP) 95 * Processor Interface Exception Register (APIEXCP)
96 ************************************************************/ 96 ************************************************************/
97 #define REG_APIEXCP_OFFSET 0x30060 97 #define REG_APIEXCP_OFFSET 0x30060
98 enum apiexcp_bits { 98 enum apiexcp_bits {
99 APIEXCP_DART = CPC925_BIT(0), /* DART Exception */ 99 APIEXCP_DART = CPC925_BIT(0), /* DART Exception */
100 APIEXCP_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */ 100 APIEXCP_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */
101 APIEXCP_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */ 101 APIEXCP_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */
102 APIEXCP_STAT = CPC925_BIT(3), /* Status Exception */ 102 APIEXCP_STAT = CPC925_BIT(3), /* Status Exception */
103 APIEXCP_DERR = CPC925_BIT(4), /* Data Error Exception */ 103 APIEXCP_DERR = CPC925_BIT(4), /* Data Error Exception */
104 APIEXCP_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */ 104 APIEXCP_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */
105 APIEXCP_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */ 105 APIEXCP_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */
106 /* BIT(7) Reserved */ 106 /* BIT(7) Reserved */
107 APIEXCP_ECC_UE_H = CPC925_BIT(8), /* UECC upper */ 107 APIEXCP_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
108 APIEXCP_ECC_CE_H = CPC925_BIT(9), /* CECC upper */ 108 APIEXCP_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
109 APIEXCP_ECC_UE_L = CPC925_BIT(10), /* UECC lower */ 109 APIEXCP_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
110 APIEXCP_ECC_CE_L = CPC925_BIT(11), /* CECC lower */ 110 APIEXCP_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
111 111
112 CPU_EXCP_DETECTED = (APIEXCP_DART | APIEXCP_ADI0 | APIEXCP_ADI1 | 112 CPU_EXCP_DETECTED = (APIEXCP_DART | APIEXCP_ADI0 | APIEXCP_ADI1 |
113 APIEXCP_STAT | APIEXCP_DERR | APIEXCP_ADRS0 | 113 APIEXCP_STAT | APIEXCP_DERR | APIEXCP_ADRS0 |
114 APIEXCP_ADRS1), 114 APIEXCP_ADRS1),
115 UECC_EXCP_DETECTED = (APIEXCP_ECC_UE_H | APIEXCP_ECC_UE_L), 115 UECC_EXCP_DETECTED = (APIEXCP_ECC_UE_H | APIEXCP_ECC_UE_L),
116 CECC_EXCP_DETECTED = (APIEXCP_ECC_CE_H | APIEXCP_ECC_CE_L), 116 CECC_EXCP_DETECTED = (APIEXCP_ECC_CE_H | APIEXCP_ECC_CE_L),
117 ECC_EXCP_DETECTED = (UECC_EXCP_DETECTED | CECC_EXCP_DETECTED), 117 ECC_EXCP_DETECTED = (UECC_EXCP_DETECTED | CECC_EXCP_DETECTED),
118 }; 118 };
119 119
120 /************************************************************ 120 /************************************************************
121 * Memory Bus Configuration Register (MBCR) 121 * Memory Bus Configuration Register (MBCR)
122 ************************************************************/ 122 ************************************************************/
123 #define REG_MBCR_OFFSET 0x2190 123 #define REG_MBCR_OFFSET 0x2190
124 #define MBCR_64BITCFG_SHIFT 23 124 #define MBCR_64BITCFG_SHIFT 23
125 #define MBCR_64BITCFG_MASK (1UL << MBCR_64BITCFG_SHIFT) 125 #define MBCR_64BITCFG_MASK (1UL << MBCR_64BITCFG_SHIFT)
126 #define MBCR_64BITBUS_SHIFT 22 126 #define MBCR_64BITBUS_SHIFT 22
127 #define MBCR_64BITBUS_MASK (1UL << MBCR_64BITBUS_SHIFT) 127 #define MBCR_64BITBUS_MASK (1UL << MBCR_64BITBUS_SHIFT)
128 128
129 /************************************************************ 129 /************************************************************
130 * Memory Bank Mode Register (MBMR) 130 * Memory Bank Mode Register (MBMR)
131 ************************************************************/ 131 ************************************************************/
132 #define REG_MBMR_OFFSET 0x21C0 132 #define REG_MBMR_OFFSET 0x21C0
133 #define MBMR_MODE_MAX_VALUE 0xF 133 #define MBMR_MODE_MAX_VALUE 0xF
134 #define MBMR_MODE_SHIFT 25 134 #define MBMR_MODE_SHIFT 25
135 #define MBMR_MODE_MASK (MBMR_MODE_MAX_VALUE << MBMR_MODE_SHIFT) 135 #define MBMR_MODE_MASK (MBMR_MODE_MAX_VALUE << MBMR_MODE_SHIFT)
136 #define MBMR_BBA_SHIFT 24 136 #define MBMR_BBA_SHIFT 24
137 #define MBMR_BBA_MASK (1UL << MBMR_BBA_SHIFT) 137 #define MBMR_BBA_MASK (1UL << MBMR_BBA_SHIFT)
138 138
139 /************************************************************ 139 /************************************************************
140 * Memory Bank Boundary Address Register (MBBAR) 140 * Memory Bank Boundary Address Register (MBBAR)
141 ************************************************************/ 141 ************************************************************/
142 #define REG_MBBAR_OFFSET 0x21D0 142 #define REG_MBBAR_OFFSET 0x21D0
143 #define MBBAR_BBA_MAX_VALUE 0xFF 143 #define MBBAR_BBA_MAX_VALUE 0xFF
144 #define MBBAR_BBA_SHIFT 24 144 #define MBBAR_BBA_SHIFT 24
145 #define MBBAR_BBA_MASK (MBBAR_BBA_MAX_VALUE << MBBAR_BBA_SHIFT) 145 #define MBBAR_BBA_MASK (MBBAR_BBA_MAX_VALUE << MBBAR_BBA_SHIFT)
146 146
147 /************************************************************ 147 /************************************************************
148 * Memory Scrub Control Register (MSCR) 148 * Memory Scrub Control Register (MSCR)
149 ************************************************************/ 149 ************************************************************/
150 #define REG_MSCR_OFFSET 0x2400 150 #define REG_MSCR_OFFSET 0x2400
151 #define MSCR_SCRUB_MOD_MASK 0xC0000000 /* scrub_mod - bit0:1*/ 151 #define MSCR_SCRUB_MOD_MASK 0xC0000000 /* scrub_mod - bit0:1*/
152 #define MSCR_BACKGR_SCRUB 0x40000000 /* 01 */ 152 #define MSCR_BACKGR_SCRUB 0x40000000 /* 01 */
153 #define MSCR_SI_SHIFT 16 /* si - bit8:15*/ 153 #define MSCR_SI_SHIFT 16 /* si - bit8:15*/
154 #define MSCR_SI_MAX_VALUE 0xFF 154 #define MSCR_SI_MAX_VALUE 0xFF
155 #define MSCR_SI_MASK (MSCR_SI_MAX_VALUE << MSCR_SI_SHIFT) 155 #define MSCR_SI_MASK (MSCR_SI_MAX_VALUE << MSCR_SI_SHIFT)
156 156
157 /************************************************************ 157 /************************************************************
158 * Memory Scrub Range Start Register (MSRSR) 158 * Memory Scrub Range Start Register (MSRSR)
159 ************************************************************/ 159 ************************************************************/
160 #define REG_MSRSR_OFFSET 0x2410 160 #define REG_MSRSR_OFFSET 0x2410
161 161
162 /************************************************************ 162 /************************************************************
163 * Memory Scrub Range End Register (MSRER) 163 * Memory Scrub Range End Register (MSRER)
164 ************************************************************/ 164 ************************************************************/
165 #define REG_MSRER_OFFSET 0x2420 165 #define REG_MSRER_OFFSET 0x2420
166 166
167 /************************************************************ 167 /************************************************************
168 * Memory Scrub Pattern Register (MSPR) 168 * Memory Scrub Pattern Register (MSPR)
169 ************************************************************/ 169 ************************************************************/
170 #define REG_MSPR_OFFSET 0x2430 170 #define REG_MSPR_OFFSET 0x2430
171 171
172 /************************************************************ 172 /************************************************************
173 * Memory Check Control Register (MCCR) 173 * Memory Check Control Register (MCCR)
174 ************************************************************/ 174 ************************************************************/
175 #define REG_MCCR_OFFSET 0x2440 175 #define REG_MCCR_OFFSET 0x2440
176 enum mccr_bits { 176 enum mccr_bits {
177 MCCR_ECC_EN = CPC925_BIT(0), /* ECC high and low check */ 177 MCCR_ECC_EN = CPC925_BIT(0), /* ECC high and low check */
178 }; 178 };
179 179
180 /************************************************************ 180 /************************************************************
181 * Memory Check Range End Register (MCRER) 181 * Memory Check Range End Register (MCRER)
182 ************************************************************/ 182 ************************************************************/
183 #define REG_MCRER_OFFSET 0x2450 183 #define REG_MCRER_OFFSET 0x2450
184 184
185 /************************************************************ 185 /************************************************************
186 * Memory Error Address Register (MEAR) 186 * Memory Error Address Register (MEAR)
187 ************************************************************/ 187 ************************************************************/
188 #define REG_MEAR_OFFSET 0x2460 188 #define REG_MEAR_OFFSET 0x2460
189 #define MEAR_BCNT_MAX_VALUE 0x3 189 #define MEAR_BCNT_MAX_VALUE 0x3
190 #define MEAR_BCNT_SHIFT 30 190 #define MEAR_BCNT_SHIFT 30
191 #define MEAR_BCNT_MASK (MEAR_BCNT_MAX_VALUE << MEAR_BCNT_SHIFT) 191 #define MEAR_BCNT_MASK (MEAR_BCNT_MAX_VALUE << MEAR_BCNT_SHIFT)
192 #define MEAR_RANK_MAX_VALUE 0x7 192 #define MEAR_RANK_MAX_VALUE 0x7
193 #define MEAR_RANK_SHIFT 27 193 #define MEAR_RANK_SHIFT 27
194 #define MEAR_RANK_MASK (MEAR_RANK_MAX_VALUE << MEAR_RANK_SHIFT) 194 #define MEAR_RANK_MASK (MEAR_RANK_MAX_VALUE << MEAR_RANK_SHIFT)
195 #define MEAR_COL_MAX_VALUE 0x7FF 195 #define MEAR_COL_MAX_VALUE 0x7FF
196 #define MEAR_COL_SHIFT 16 196 #define MEAR_COL_SHIFT 16
197 #define MEAR_COL_MASK (MEAR_COL_MAX_VALUE << MEAR_COL_SHIFT) 197 #define MEAR_COL_MASK (MEAR_COL_MAX_VALUE << MEAR_COL_SHIFT)
198 #define MEAR_BANK_MAX_VALUE 0x3 198 #define MEAR_BANK_MAX_VALUE 0x3
199 #define MEAR_BANK_SHIFT 14 199 #define MEAR_BANK_SHIFT 14
200 #define MEAR_BANK_MASK (MEAR_BANK_MAX_VALUE << MEAR_BANK_SHIFT) 200 #define MEAR_BANK_MASK (MEAR_BANK_MAX_VALUE << MEAR_BANK_SHIFT)
201 #define MEAR_ROW_MASK 0x00003FFF 201 #define MEAR_ROW_MASK 0x00003FFF
202 202
203 /************************************************************ 203 /************************************************************
204 * Memory Error Syndrome Register (MESR) 204 * Memory Error Syndrome Register (MESR)
205 ************************************************************/ 205 ************************************************************/
206 #define REG_MESR_OFFSET 0x2470 206 #define REG_MESR_OFFSET 0x2470
207 #define MESR_ECC_SYN_H_MASK 0xFF00 207 #define MESR_ECC_SYN_H_MASK 0xFF00
208 #define MESR_ECC_SYN_L_MASK 0x00FF 208 #define MESR_ECC_SYN_L_MASK 0x00FF
209 209
210 /************************************************************ 210 /************************************************************
211 * Memory Mode Control Register (MMCR) 211 * Memory Mode Control Register (MMCR)
212 ************************************************************/ 212 ************************************************************/
213 #define REG_MMCR_OFFSET 0x2500 213 #define REG_MMCR_OFFSET 0x2500
214 enum mmcr_bits { 214 enum mmcr_bits {
215 MMCR_REG_DIMM_MODE = CPC925_BIT(3), 215 MMCR_REG_DIMM_MODE = CPC925_BIT(3),
216 }; 216 };
217 217
218 /* 218 /*
219 * HyperTransport Link Registers 219 * HyperTransport Link Registers
220 */ 220 */
221 /************************************************************ 221 /************************************************************
222 * Error Handling/Enumeration Scratch Pad Register (ERRCTRL) 222 * Error Handling/Enumeration Scratch Pad Register (ERRCTRL)
223 ************************************************************/ 223 ************************************************************/
224 #define REG_ERRCTRL_OFFSET 0x70140 224 #define REG_ERRCTRL_OFFSET 0x70140
225 enum errctrl_bits { /* nonfatal interrupts for */ 225 enum errctrl_bits { /* nonfatal interrupts for */
226 ERRCTRL_SERR_NF = CPC925_BIT(0), /* system error */ 226 ERRCTRL_SERR_NF = CPC925_BIT(0), /* system error */
227 ERRCTRL_CRC_NF = CPC925_BIT(1), /* CRC error */ 227 ERRCTRL_CRC_NF = CPC925_BIT(1), /* CRC error */
228 ERRCTRL_RSP_NF = CPC925_BIT(2), /* Response error */ 228 ERRCTRL_RSP_NF = CPC925_BIT(2), /* Response error */
229 ERRCTRL_EOC_NF = CPC925_BIT(3), /* End-Of-Chain error */ 229 ERRCTRL_EOC_NF = CPC925_BIT(3), /* End-Of-Chain error */
230 ERRCTRL_OVF_NF = CPC925_BIT(4), /* Overflow error */ 230 ERRCTRL_OVF_NF = CPC925_BIT(4), /* Overflow error */
231 ERRCTRL_PROT_NF = CPC925_BIT(5), /* Protocol error */ 231 ERRCTRL_PROT_NF = CPC925_BIT(5), /* Protocol error */
232 232
233 ERRCTRL_RSP_ERR = CPC925_BIT(6), /* Response error received */ 233 ERRCTRL_RSP_ERR = CPC925_BIT(6), /* Response error received */
234 ERRCTRL_CHN_FAL = CPC925_BIT(7), /* Sync flooding detected */ 234 ERRCTRL_CHN_FAL = CPC925_BIT(7), /* Sync flooding detected */
235 235
236 HT_ERRCTRL_ENABLE = (ERRCTRL_SERR_NF | ERRCTRL_CRC_NF | 236 HT_ERRCTRL_ENABLE = (ERRCTRL_SERR_NF | ERRCTRL_CRC_NF |
237 ERRCTRL_RSP_NF | ERRCTRL_EOC_NF | 237 ERRCTRL_RSP_NF | ERRCTRL_EOC_NF |
238 ERRCTRL_OVF_NF | ERRCTRL_PROT_NF), 238 ERRCTRL_OVF_NF | ERRCTRL_PROT_NF),
239 HT_ERRCTRL_DETECTED = (ERRCTRL_RSP_ERR | ERRCTRL_CHN_FAL), 239 HT_ERRCTRL_DETECTED = (ERRCTRL_RSP_ERR | ERRCTRL_CHN_FAL),
240 }; 240 };
241 241
242 /************************************************************ 242 /************************************************************
243 * Link Configuration and Link Control Register (LINKCTRL) 243 * Link Configuration and Link Control Register (LINKCTRL)
244 ************************************************************/ 244 ************************************************************/
245 #define REG_LINKCTRL_OFFSET 0x70110 245 #define REG_LINKCTRL_OFFSET 0x70110
246 enum linkctrl_bits { 246 enum linkctrl_bits {
247 LINKCTRL_CRC_ERR = (CPC925_BIT(22) | CPC925_BIT(23)), 247 LINKCTRL_CRC_ERR = (CPC925_BIT(22) | CPC925_BIT(23)),
248 LINKCTRL_LINK_FAIL = CPC925_BIT(27), 248 LINKCTRL_LINK_FAIL = CPC925_BIT(27),
249 249
250 HT_LINKCTRL_DETECTED = (LINKCTRL_CRC_ERR | LINKCTRL_LINK_FAIL), 250 HT_LINKCTRL_DETECTED = (LINKCTRL_CRC_ERR | LINKCTRL_LINK_FAIL),
251 }; 251 };
252 252
253 /************************************************************ 253 /************************************************************
254 * Link FreqCap/Error/Freq/Revision ID Register (LINKERR) 254 * Link FreqCap/Error/Freq/Revision ID Register (LINKERR)
255 ************************************************************/ 255 ************************************************************/
256 #define REG_LINKERR_OFFSET 0x70120 256 #define REG_LINKERR_OFFSET 0x70120
257 enum linkerr_bits { 257 enum linkerr_bits {
258 LINKERR_EOC_ERR = CPC925_BIT(17), /* End-Of-Chain error */ 258 LINKERR_EOC_ERR = CPC925_BIT(17), /* End-Of-Chain error */
259 LINKERR_OVF_ERR = CPC925_BIT(18), /* Receive Buffer Overflow */ 259 LINKERR_OVF_ERR = CPC925_BIT(18), /* Receive Buffer Overflow */
260 LINKERR_PROT_ERR = CPC925_BIT(19), /* Protocol error */ 260 LINKERR_PROT_ERR = CPC925_BIT(19), /* Protocol error */
261 261
262 HT_LINKERR_DETECTED = (LINKERR_EOC_ERR | LINKERR_OVF_ERR | 262 HT_LINKERR_DETECTED = (LINKERR_EOC_ERR | LINKERR_OVF_ERR |
263 LINKERR_PROT_ERR), 263 LINKERR_PROT_ERR),
264 }; 264 };
265 265
266 /************************************************************ 266 /************************************************************
267 * Bridge Control Register (BRGCTRL) 267 * Bridge Control Register (BRGCTRL)
268 ************************************************************/ 268 ************************************************************/
269 #define REG_BRGCTRL_OFFSET 0x70300 269 #define REG_BRGCTRL_OFFSET 0x70300
270 enum brgctrl_bits { 270 enum brgctrl_bits {
271 BRGCTRL_DETSERR = CPC925_BIT(0), /* SERR on Secondary Bus */ 271 BRGCTRL_DETSERR = CPC925_BIT(0), /* SERR on Secondary Bus */
272 BRGCTRL_SECBUSRESET = CPC925_BIT(9), /* Secondary Bus Reset */ 272 BRGCTRL_SECBUSRESET = CPC925_BIT(9), /* Secondary Bus Reset */
273 }; 273 };
274 274
275 /* Private structure for edac memory controller */ 275 /* Private structure for edac memory controller */
276 struct cpc925_mc_pdata { 276 struct cpc925_mc_pdata {
277 void __iomem *vbase; 277 void __iomem *vbase;
278 unsigned long total_mem; 278 unsigned long total_mem;
279 const char *name; 279 const char *name;
280 int edac_idx; 280 int edac_idx;
281 }; 281 };
282 282
283 /* Private structure for common edac device */ 283 /* Private structure for common edac device */
284 struct cpc925_dev_info { 284 struct cpc925_dev_info {
285 void __iomem *vbase; 285 void __iomem *vbase;
286 struct platform_device *pdev; 286 struct platform_device *pdev;
287 char *ctl_name; 287 char *ctl_name;
288 int edac_idx; 288 int edac_idx;
289 struct edac_device_ctl_info *edac_dev; 289 struct edac_device_ctl_info *edac_dev;
290 void (*init)(struct cpc925_dev_info *dev_info); 290 void (*init)(struct cpc925_dev_info *dev_info);
291 void (*exit)(struct cpc925_dev_info *dev_info); 291 void (*exit)(struct cpc925_dev_info *dev_info);
292 void (*check)(struct edac_device_ctl_info *edac_dev); 292 void (*check)(struct edac_device_ctl_info *edac_dev);
293 }; 293 };
294 294
295 /* Get total memory size from Open Firmware DTB */ 295 /* Get total memory size from Open Firmware DTB */
296 static void get_total_mem(struct cpc925_mc_pdata *pdata) 296 static void get_total_mem(struct cpc925_mc_pdata *pdata)
297 { 297 {
298 struct device_node *np = NULL; 298 struct device_node *np = NULL;
299 const unsigned int *reg, *reg_end; 299 const unsigned int *reg, *reg_end;
300 int len, sw, aw; 300 int len, sw, aw;
301 unsigned long start, size; 301 unsigned long start, size;
302 302
303 np = of_find_node_by_type(NULL, "memory"); 303 np = of_find_node_by_type(NULL, "memory");
304 if (!np) 304 if (!np)
305 return; 305 return;
306 306
307 aw = of_n_addr_cells(np); 307 aw = of_n_addr_cells(np);
308 sw = of_n_size_cells(np); 308 sw = of_n_size_cells(np);
309 reg = (const unsigned int *)of_get_property(np, "reg", &len); 309 reg = (const unsigned int *)of_get_property(np, "reg", &len);
310 reg_end = reg + len/4; 310 reg_end = reg + len/4;
311 311
312 pdata->total_mem = 0; 312 pdata->total_mem = 0;
313 do { 313 do {
314 start = of_read_number(reg, aw); 314 start = of_read_number(reg, aw);
315 reg += aw; 315 reg += aw;
316 size = of_read_number(reg, sw); 316 size = of_read_number(reg, sw);
317 reg += sw; 317 reg += sw;
318 debugf1("%s: start 0x%lx, size 0x%lx\n", __func__, 318 debugf1("%s: start 0x%lx, size 0x%lx\n", __func__,
319 start, size); 319 start, size);
320 pdata->total_mem += size; 320 pdata->total_mem += size;
321 } while (reg < reg_end); 321 } while (reg < reg_end);
322 322
323 of_node_put(np); 323 of_node_put(np);
324 debugf0("%s: total_mem 0x%lx\n", __func__, pdata->total_mem); 324 debugf0("%s: total_mem 0x%lx\n", __func__, pdata->total_mem);
325 } 325 }
326 326
327 static void cpc925_init_csrows(struct mem_ctl_info *mci) 327 static void cpc925_init_csrows(struct mem_ctl_info *mci)
328 { 328 {
329 struct cpc925_mc_pdata *pdata = mci->pvt_info; 329 struct cpc925_mc_pdata *pdata = mci->pvt_info;
330 struct csrow_info *csrow; 330 struct csrow_info *csrow;
331 int index; 331 int index;
332 u32 mbmr, mbbar, bba; 332 u32 mbmr, mbbar, bba;
333 unsigned long row_size, last_nr_pages = 0; 333 unsigned long row_size, last_nr_pages = 0;
334 334
335 get_total_mem(pdata); 335 get_total_mem(pdata);
336 336
337 for (index = 0; index < mci->nr_csrows; index++) { 337 for (index = 0; index < mci->nr_csrows; index++) {
338 mbmr = __raw_readl(pdata->vbase + REG_MBMR_OFFSET + 338 mbmr = __raw_readl(pdata->vbase + REG_MBMR_OFFSET +
339 0x20 * index); 339 0x20 * index);
340 mbbar = __raw_readl(pdata->vbase + REG_MBBAR_OFFSET + 340 mbbar = __raw_readl(pdata->vbase + REG_MBBAR_OFFSET +
341 0x20 + index); 341 0x20 + index);
342 bba = (((mbmr & MBMR_BBA_MASK) >> MBMR_BBA_SHIFT) << 8) | 342 bba = (((mbmr & MBMR_BBA_MASK) >> MBMR_BBA_SHIFT) << 8) |
343 ((mbbar & MBBAR_BBA_MASK) >> MBBAR_BBA_SHIFT); 343 ((mbbar & MBBAR_BBA_MASK) >> MBBAR_BBA_SHIFT);
344 344
345 if (bba == 0) 345 if (bba == 0)
346 continue; /* not populated */ 346 continue; /* not populated */
347 347
348 csrow = &mci->csrows[index]; 348 csrow = &mci->csrows[index];
349 349
350 row_size = bba * (1UL << 28); /* 256M */ 350 row_size = bba * (1UL << 28); /* 256M */
351 csrow->first_page = last_nr_pages; 351 csrow->first_page = last_nr_pages;
352 csrow->nr_pages = row_size >> PAGE_SHIFT; 352 csrow->nr_pages = row_size >> PAGE_SHIFT;
353 csrow->last_page = csrow->first_page + csrow->nr_pages - 1; 353 csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
354 last_nr_pages = csrow->last_page + 1; 354 last_nr_pages = csrow->last_page + 1;
355 355
356 csrow->mtype = MEM_RDDR; 356 csrow->mtype = MEM_RDDR;
357 csrow->edac_mode = EDAC_SECDED; 357 csrow->edac_mode = EDAC_SECDED;
358 358
359 switch (csrow->nr_channels) { 359 switch (csrow->nr_channels) {
360 case 1: /* Single channel */ 360 case 1: /* Single channel */
361 csrow->grain = 32; /* four-beat burst of 32 bytes */ 361 csrow->grain = 32; /* four-beat burst of 32 bytes */
362 break; 362 break;
363 case 2: /* Dual channel */ 363 case 2: /* Dual channel */
364 default: 364 default:
365 csrow->grain = 64; /* four-beat burst of 64 bytes */ 365 csrow->grain = 64; /* four-beat burst of 64 bytes */
366 break; 366 break;
367 } 367 }
368 368
369 switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) { 369 switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
370 case 6: /* 0110, no way to differentiate X8 VS X16 */ 370 case 6: /* 0110, no way to differentiate X8 VS X16 */
371 case 5: /* 0101 */ 371 case 5: /* 0101 */
372 case 8: /* 1000 */ 372 case 8: /* 1000 */
373 csrow->dtype = DEV_X16; 373 csrow->dtype = DEV_X16;
374 break; 374 break;
375 case 7: /* 0111 */ 375 case 7: /* 0111 */
376 case 9: /* 1001 */ 376 case 9: /* 1001 */
377 csrow->dtype = DEV_X8; 377 csrow->dtype = DEV_X8;
378 break; 378 break;
379 default: 379 default:
380 csrow->dtype = DEV_UNKNOWN; 380 csrow->dtype = DEV_UNKNOWN;
381 break; 381 break;
382 } 382 }
383 } 383 }
384 } 384 }
385 385
386 /* Enable memory controller ECC detection */ 386 /* Enable memory controller ECC detection */
387 static void cpc925_mc_init(struct mem_ctl_info *mci) 387 static void cpc925_mc_init(struct mem_ctl_info *mci)
388 { 388 {
389 struct cpc925_mc_pdata *pdata = mci->pvt_info; 389 struct cpc925_mc_pdata *pdata = mci->pvt_info;
390 u32 apimask; 390 u32 apimask;
391 u32 mccr; 391 u32 mccr;
392 392
393 /* Enable various ECC error exceptions */ 393 /* Enable various ECC error exceptions */
394 apimask = __raw_readl(pdata->vbase + REG_APIMASK_OFFSET); 394 apimask = __raw_readl(pdata->vbase + REG_APIMASK_OFFSET);
395 if ((apimask & ECC_MASK_ENABLE) == 0) { 395 if ((apimask & ECC_MASK_ENABLE) == 0) {
396 apimask |= ECC_MASK_ENABLE; 396 apimask |= ECC_MASK_ENABLE;
397 __raw_writel(apimask, pdata->vbase + REG_APIMASK_OFFSET); 397 __raw_writel(apimask, pdata->vbase + REG_APIMASK_OFFSET);
398 } 398 }
399 399
400 /* Enable ECC detection */ 400 /* Enable ECC detection */
401 mccr = __raw_readl(pdata->vbase + REG_MCCR_OFFSET); 401 mccr = __raw_readl(pdata->vbase + REG_MCCR_OFFSET);
402 if ((mccr & MCCR_ECC_EN) == 0) { 402 if ((mccr & MCCR_ECC_EN) == 0) {
403 mccr |= MCCR_ECC_EN; 403 mccr |= MCCR_ECC_EN;
404 __raw_writel(mccr, pdata->vbase + REG_MCCR_OFFSET); 404 __raw_writel(mccr, pdata->vbase + REG_MCCR_OFFSET);
405 } 405 }
406 } 406 }
407 407
408 /* Disable memory controller ECC detection */ 408 /* Disable memory controller ECC detection */
409 static void cpc925_mc_exit(struct mem_ctl_info *mci) 409 static void cpc925_mc_exit(struct mem_ctl_info *mci)
410 { 410 {
411 /* 411 /*
412 * WARNING: 412 * WARNING:
413 * We are supposed to clear the ECC error detection bits, 413 * We are supposed to clear the ECC error detection bits,
414 * and it will be no problem to do so. However, once they 414 * and it will be no problem to do so. However, once they
415 * are cleared here if we want to re-install CPC925 EDAC 415 * are cleared here if we want to re-install CPC925 EDAC
416 * module later, setting them up in cpc925_mc_init() will 416 * module later, setting them up in cpc925_mc_init() will
417 * trigger machine check exception. 417 * trigger machine check exception.
418 * Also, it's ok to leave ECC error detection bits enabled, 418 * Also, it's ok to leave ECC error detection bits enabled,
419 * since they are reset to 1 by default or by boot loader. 419 * since they are reset to 1 by default or by boot loader.
420 */ 420 */
421 421
422 return; 422 return;
423 } 423 }
424 424
425 /* 425 /*
426 * Revert DDR column/row/bank addresses into page frame number and 426 * Revert DDR column/row/bank addresses into page frame number and
427 * offset in page. 427 * offset in page.
428 * 428 *
429 * Suppose memory mode is 0x0111(128-bit mode, identical DIMM pairs), 429 * Suppose memory mode is 0x0111(128-bit mode, identical DIMM pairs),
430 * physical address(PA) bits to column address(CA) bits mappings are: 430 * physical address(PA) bits to column address(CA) bits mappings are:
431 * CA 0 1 2 3 4 5 6 7 8 9 10 431 * CA 0 1 2 3 4 5 6 7 8 9 10
432 * PA 59 58 57 56 55 54 53 52 51 50 49 432 * PA 59 58 57 56 55 54 53 52 51 50 49
433 * 433 *
434 * physical address(PA) bits to bank address(BA) bits mappings are: 434 * physical address(PA) bits to bank address(BA) bits mappings are:
435 * BA 0 1 435 * BA 0 1
436 * PA 43 44 436 * PA 43 44
437 * 437 *
438 * physical address(PA) bits to row address(RA) bits mappings are: 438 * physical address(PA) bits to row address(RA) bits mappings are:
439 * RA 0 1 2 3 4 5 6 7 8 9 10 11 12 439 * RA 0 1 2 3 4 5 6 7 8 9 10 11 12
440 * PA 36 35 34 48 47 46 45 40 41 42 39 38 37 440 * PA 36 35 34 48 47 46 45 40 41 42 39 38 37
441 */ 441 */
442 static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear, 442 static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
443 unsigned long *pfn, unsigned long *offset, int *csrow) 443 unsigned long *pfn, unsigned long *offset, int *csrow)
444 { 444 {
445 u32 bcnt, rank, col, bank, row; 445 u32 bcnt, rank, col, bank, row;
446 u32 c; 446 u32 c;
447 unsigned long pa; 447 unsigned long pa;
448 int i; 448 int i;
449 449
450 bcnt = (mear & MEAR_BCNT_MASK) >> MEAR_BCNT_SHIFT; 450 bcnt = (mear & MEAR_BCNT_MASK) >> MEAR_BCNT_SHIFT;
451 rank = (mear & MEAR_RANK_MASK) >> MEAR_RANK_SHIFT; 451 rank = (mear & MEAR_RANK_MASK) >> MEAR_RANK_SHIFT;
452 col = (mear & MEAR_COL_MASK) >> MEAR_COL_SHIFT; 452 col = (mear & MEAR_COL_MASK) >> MEAR_COL_SHIFT;
453 bank = (mear & MEAR_BANK_MASK) >> MEAR_BANK_SHIFT; 453 bank = (mear & MEAR_BANK_MASK) >> MEAR_BANK_SHIFT;
454 row = mear & MEAR_ROW_MASK; 454 row = mear & MEAR_ROW_MASK;
455 455
456 *csrow = rank; 456 *csrow = rank;
457 457
458 #ifdef CONFIG_EDAC_DEBUG 458 #ifdef CONFIG_EDAC_DEBUG
459 if (mci->csrows[rank].first_page == 0) { 459 if (mci->csrows[rank].first_page == 0) {
460 cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a " 460 cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a "
461 "non-populated csrow, broken hardware?\n"); 461 "non-populated csrow, broken hardware?\n");
462 return; 462 return;
463 } 463 }
464 #endif 464 #endif
465 465
466 /* Revert csrow number */ 466 /* Revert csrow number */
467 pa = mci->csrows[rank].first_page << PAGE_SHIFT; 467 pa = mci->csrows[rank].first_page << PAGE_SHIFT;
468 468
469 /* Revert column address */ 469 /* Revert column address */
470 col += bcnt; 470 col += bcnt;
471 for (i = 0; i < 11; i++) { 471 for (i = 0; i < 11; i++) {
472 c = col & 0x1; 472 c = col & 0x1;
473 col >>= 1; 473 col >>= 1;
474 pa |= c << (14 - i); 474 pa |= c << (14 - i);
475 } 475 }
476 476
477 /* Revert bank address */ 477 /* Revert bank address */
478 pa |= bank << 19; 478 pa |= bank << 19;
479 479
480 /* Revert row address, in 4 steps */ 480 /* Revert row address, in 4 steps */
481 for (i = 0; i < 3; i++) { 481 for (i = 0; i < 3; i++) {
482 c = row & 0x1; 482 c = row & 0x1;
483 row >>= 1; 483 row >>= 1;
484 pa |= c << (26 - i); 484 pa |= c << (26 - i);
485 } 485 }
486 486
487 for (i = 0; i < 3; i++) { 487 for (i = 0; i < 3; i++) {
488 c = row & 0x1; 488 c = row & 0x1;
489 row >>= 1; 489 row >>= 1;
490 pa |= c << (21 + i); 490 pa |= c << (21 + i);
491 } 491 }
492 492
493 for (i = 0; i < 4; i++) { 493 for (i = 0; i < 4; i++) {
494 c = row & 0x1; 494 c = row & 0x1;
495 row >>= 1; 495 row >>= 1;
496 pa |= c << (18 - i); 496 pa |= c << (18 - i);
497 } 497 }
498 498
499 for (i = 0; i < 3; i++) { 499 for (i = 0; i < 3; i++) {
500 c = row & 0x1; 500 c = row & 0x1;
501 row >>= 1; 501 row >>= 1;
502 pa |= c << (29 - i); 502 pa |= c << (29 - i);
503 } 503 }
504 504
505 *offset = pa & (PAGE_SIZE - 1); 505 *offset = pa & (PAGE_SIZE - 1);
506 *pfn = pa >> PAGE_SHIFT; 506 *pfn = pa >> PAGE_SHIFT;
507 507
508 debugf0("%s: ECC physical address 0x%lx\n", __func__, pa); 508 debugf0("%s: ECC physical address 0x%lx\n", __func__, pa);
509 } 509 }
510 510
511 static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome) 511 static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome)
512 { 512 {
513 if ((syndrome & MESR_ECC_SYN_H_MASK) == 0) 513 if ((syndrome & MESR_ECC_SYN_H_MASK) == 0)
514 return 0; 514 return 0;
515 515
516 if ((syndrome & MESR_ECC_SYN_L_MASK) == 0) 516 if ((syndrome & MESR_ECC_SYN_L_MASK) == 0)
517 return 1; 517 return 1;
518 518
519 cpc925_mc_printk(mci, KERN_INFO, "Unexpected syndrome value: 0x%x\n", 519 cpc925_mc_printk(mci, KERN_INFO, "Unexpected syndrome value: 0x%x\n",
520 syndrome); 520 syndrome);
521 return 1; 521 return 1;
522 } 522 }
523 523
524 /* Check memory controller registers for ECC errors */ 524 /* Check memory controller registers for ECC errors */
525 static void cpc925_mc_check(struct mem_ctl_info *mci) 525 static void cpc925_mc_check(struct mem_ctl_info *mci)
526 { 526 {
527 struct cpc925_mc_pdata *pdata = mci->pvt_info; 527 struct cpc925_mc_pdata *pdata = mci->pvt_info;
528 u32 apiexcp; 528 u32 apiexcp;
529 u32 mear; 529 u32 mear;
530 u32 mesr; 530 u32 mesr;
531 u16 syndrome; 531 u16 syndrome;
532 unsigned long pfn = 0, offset = 0; 532 unsigned long pfn = 0, offset = 0;
533 int csrow = 0, channel = 0; 533 int csrow = 0, channel = 0;
534 534
535 /* APIEXCP is cleared when read */ 535 /* APIEXCP is cleared when read */
536 apiexcp = __raw_readl(pdata->vbase + REG_APIEXCP_OFFSET); 536 apiexcp = __raw_readl(pdata->vbase + REG_APIEXCP_OFFSET);
537 if ((apiexcp & ECC_EXCP_DETECTED) == 0) 537 if ((apiexcp & ECC_EXCP_DETECTED) == 0)
538 return; 538 return;
539 539
540 mesr = __raw_readl(pdata->vbase + REG_MESR_OFFSET); 540 mesr = __raw_readl(pdata->vbase + REG_MESR_OFFSET);
541 syndrome = mesr | (MESR_ECC_SYN_H_MASK | MESR_ECC_SYN_L_MASK); 541 syndrome = mesr | (MESR_ECC_SYN_H_MASK | MESR_ECC_SYN_L_MASK);
542 542
543 mear = __raw_readl(pdata->vbase + REG_MEAR_OFFSET); 543 mear = __raw_readl(pdata->vbase + REG_MEAR_OFFSET);
544 544
545 /* Revert column/row addresses into page frame number, etc */ 545 /* Revert column/row addresses into page frame number, etc */
546 cpc925_mc_get_pfn(mci, mear, &pfn, &offset, &csrow); 546 cpc925_mc_get_pfn(mci, mear, &pfn, &offset, &csrow);
547 547
548 if (apiexcp & CECC_EXCP_DETECTED) { 548 if (apiexcp & CECC_EXCP_DETECTED) {
549 cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n"); 549 cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
550 channel = cpc925_mc_find_channel(mci, syndrome); 550 channel = cpc925_mc_find_channel(mci, syndrome);
551 edac_mc_handle_ce(mci, pfn, offset, syndrome, 551 edac_mc_handle_ce(mci, pfn, offset, syndrome,
552 csrow, channel, mci->ctl_name); 552 csrow, channel, mci->ctl_name);
553 } 553 }
554 554
555 if (apiexcp & UECC_EXCP_DETECTED) { 555 if (apiexcp & UECC_EXCP_DETECTED) {
556 cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n"); 556 cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
557 edac_mc_handle_ue(mci, pfn, offset, csrow, mci->ctl_name); 557 edac_mc_handle_ue(mci, pfn, offset, csrow, mci->ctl_name);
558 } 558 }
559 559
560 cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n"); 560 cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
561 cpc925_mc_printk(mci, KERN_INFO, "APIMASK 0x%08x\n", 561 cpc925_mc_printk(mci, KERN_INFO, "APIMASK 0x%08x\n",
562 __raw_readl(pdata->vbase + REG_APIMASK_OFFSET)); 562 __raw_readl(pdata->vbase + REG_APIMASK_OFFSET));
563 cpc925_mc_printk(mci, KERN_INFO, "APIEXCP 0x%08x\n", 563 cpc925_mc_printk(mci, KERN_INFO, "APIEXCP 0x%08x\n",
564 apiexcp); 564 apiexcp);
565 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Ctrl 0x%08x\n", 565 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Ctrl 0x%08x\n",
566 __raw_readl(pdata->vbase + REG_MSCR_OFFSET)); 566 __raw_readl(pdata->vbase + REG_MSCR_OFFSET));
567 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge Start 0x%08x\n", 567 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge Start 0x%08x\n",
568 __raw_readl(pdata->vbase + REG_MSRSR_OFFSET)); 568 __raw_readl(pdata->vbase + REG_MSRSR_OFFSET));
569 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge End 0x%08x\n", 569 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge End 0x%08x\n",
570 __raw_readl(pdata->vbase + REG_MSRER_OFFSET)); 570 __raw_readl(pdata->vbase + REG_MSRER_OFFSET));
571 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Pattern 0x%08x\n", 571 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Pattern 0x%08x\n",
572 __raw_readl(pdata->vbase + REG_MSPR_OFFSET)); 572 __raw_readl(pdata->vbase + REG_MSPR_OFFSET));
573 cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Ctrl 0x%08x\n", 573 cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Ctrl 0x%08x\n",
574 __raw_readl(pdata->vbase + REG_MCCR_OFFSET)); 574 __raw_readl(pdata->vbase + REG_MCCR_OFFSET));
575 cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Rge End 0x%08x\n", 575 cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Rge End 0x%08x\n",
576 __raw_readl(pdata->vbase + REG_MCRER_OFFSET)); 576 __raw_readl(pdata->vbase + REG_MCRER_OFFSET));
577 cpc925_mc_printk(mci, KERN_INFO, "Mem Err Address 0x%08x\n", 577 cpc925_mc_printk(mci, KERN_INFO, "Mem Err Address 0x%08x\n",
578 mesr); 578 mesr);
579 cpc925_mc_printk(mci, KERN_INFO, "Mem Err Syndrome 0x%08x\n", 579 cpc925_mc_printk(mci, KERN_INFO, "Mem Err Syndrome 0x%08x\n",
580 syndrome); 580 syndrome);
581 } 581 }
582 582
583 /******************** CPU err device********************************/ 583 /******************** CPU err device********************************/
584 /* Enable CPU Errors detection */ 584 /* Enable CPU Errors detection */
585 static void cpc925_cpu_init(struct cpc925_dev_info *dev_info) 585 static void cpc925_cpu_init(struct cpc925_dev_info *dev_info)
586 { 586 {
587 u32 apimask; 587 u32 apimask;
588 588
589 apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET); 589 apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
590 if ((apimask & CPU_MASK_ENABLE) == 0) { 590 if ((apimask & CPU_MASK_ENABLE) == 0) {
591 apimask |= CPU_MASK_ENABLE; 591 apimask |= CPU_MASK_ENABLE;
592 __raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET); 592 __raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET);
593 } 593 }
594 } 594 }
595 595
596 /* Disable CPU Errors detection */ 596 /* Disable CPU Errors detection */
597 static void cpc925_cpu_exit(struct cpc925_dev_info *dev_info) 597 static void cpc925_cpu_exit(struct cpc925_dev_info *dev_info)
598 { 598 {
599 /* 599 /*
600 * WARNING: 600 * WARNING:
601 * We are supposed to clear the CPU error detection bits, 601 * We are supposed to clear the CPU error detection bits,
602 * and it will be no problem to do so. However, once they 602 * and it will be no problem to do so. However, once they
603 * are cleared here if we want to re-install CPC925 EDAC 603 * are cleared here if we want to re-install CPC925 EDAC
604 * module later, setting them up in cpc925_cpu_init() will 604 * module later, setting them up in cpc925_cpu_init() will
605 * trigger machine check exception. 605 * trigger machine check exception.
606 * Also, it's ok to leave CPU error detection bits enabled, 606 * Also, it's ok to leave CPU error detection bits enabled,
607 * since they are reset to 1 by default. 607 * since they are reset to 1 by default.
608 */ 608 */
609 609
610 return; 610 return;
611 } 611 }
612 612
613 /* Check for CPU Errors */ 613 /* Check for CPU Errors */
614 static void cpc925_cpu_check(struct edac_device_ctl_info *edac_dev) 614 static void cpc925_cpu_check(struct edac_device_ctl_info *edac_dev)
615 { 615 {
616 struct cpc925_dev_info *dev_info = edac_dev->pvt_info; 616 struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
617 u32 apiexcp; 617 u32 apiexcp;
618 u32 apimask; 618 u32 apimask;
619 619
620 /* APIEXCP is cleared when read */ 620 /* APIEXCP is cleared when read */
621 apiexcp = __raw_readl(dev_info->vbase + REG_APIEXCP_OFFSET); 621 apiexcp = __raw_readl(dev_info->vbase + REG_APIEXCP_OFFSET);
622 if ((apiexcp & CPU_EXCP_DETECTED) == 0) 622 if ((apiexcp & CPU_EXCP_DETECTED) == 0)
623 return; 623 return;
624 624
625 apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET); 625 apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
626 cpc925_printk(KERN_INFO, "Processor Interface Fault\n" 626 cpc925_printk(KERN_INFO, "Processor Interface Fault\n"
627 "Processor Interface register dump:\n"); 627 "Processor Interface register dump:\n");
628 cpc925_printk(KERN_INFO, "APIMASK 0x%08x\n", apimask); 628 cpc925_printk(KERN_INFO, "APIMASK 0x%08x\n", apimask);
629 cpc925_printk(KERN_INFO, "APIEXCP 0x%08x\n", apiexcp); 629 cpc925_printk(KERN_INFO, "APIEXCP 0x%08x\n", apiexcp);
630 630
631 edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); 631 edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
632 } 632 }
633 633
634 /******************** HT Link err device****************************/ 634 /******************** HT Link err device****************************/
635 /* Enable HyperTransport Link Error detection */ 635 /* Enable HyperTransport Link Error detection */
636 static void cpc925_htlink_init(struct cpc925_dev_info *dev_info) 636 static void cpc925_htlink_init(struct cpc925_dev_info *dev_info)
637 { 637 {
638 u32 ht_errctrl; 638 u32 ht_errctrl;
639 639
640 ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); 640 ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
641 if ((ht_errctrl & HT_ERRCTRL_ENABLE) == 0) { 641 if ((ht_errctrl & HT_ERRCTRL_ENABLE) == 0) {
642 ht_errctrl |= HT_ERRCTRL_ENABLE; 642 ht_errctrl |= HT_ERRCTRL_ENABLE;
643 __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET); 643 __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
644 } 644 }
645 } 645 }
646 646
647 /* Disable HyperTransport Link Error detection */ 647 /* Disable HyperTransport Link Error detection */
648 static void cpc925_htlink_exit(struct cpc925_dev_info *dev_info) 648 static void cpc925_htlink_exit(struct cpc925_dev_info *dev_info)
649 { 649 {
650 u32 ht_errctrl; 650 u32 ht_errctrl;
651 651
652 ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); 652 ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
653 ht_errctrl &= ~HT_ERRCTRL_ENABLE; 653 ht_errctrl &= ~HT_ERRCTRL_ENABLE;
654 __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET); 654 __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
655 } 655 }
656 656
657 /* Check for HyperTransport Link errors */ 657 /* Check for HyperTransport Link errors */
658 static void cpc925_htlink_check(struct edac_device_ctl_info *edac_dev) 658 static void cpc925_htlink_check(struct edac_device_ctl_info *edac_dev)
659 { 659 {
660 struct cpc925_dev_info *dev_info = edac_dev->pvt_info; 660 struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
661 u32 brgctrl = __raw_readl(dev_info->vbase + REG_BRGCTRL_OFFSET); 661 u32 brgctrl = __raw_readl(dev_info->vbase + REG_BRGCTRL_OFFSET);
662 u32 linkctrl = __raw_readl(dev_info->vbase + REG_LINKCTRL_OFFSET); 662 u32 linkctrl = __raw_readl(dev_info->vbase + REG_LINKCTRL_OFFSET);
663 u32 errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); 663 u32 errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
664 u32 linkerr = __raw_readl(dev_info->vbase + REG_LINKERR_OFFSET); 664 u32 linkerr = __raw_readl(dev_info->vbase + REG_LINKERR_OFFSET);
665 665
666 if (!((brgctrl & BRGCTRL_DETSERR) || 666 if (!((brgctrl & BRGCTRL_DETSERR) ||
667 (linkctrl & HT_LINKCTRL_DETECTED) || 667 (linkctrl & HT_LINKCTRL_DETECTED) ||
668 (errctrl & HT_ERRCTRL_DETECTED) || 668 (errctrl & HT_ERRCTRL_DETECTED) ||
669 (linkerr & HT_LINKERR_DETECTED))) 669 (linkerr & HT_LINKERR_DETECTED)))
670 return; 670 return;
671 671
672 cpc925_printk(KERN_INFO, "HT Link Fault\n" 672 cpc925_printk(KERN_INFO, "HT Link Fault\n"
673 "HT register dump:\n"); 673 "HT register dump:\n");
674 cpc925_printk(KERN_INFO, "Bridge Ctrl 0x%08x\n", 674 cpc925_printk(KERN_INFO, "Bridge Ctrl 0x%08x\n",
675 brgctrl); 675 brgctrl);
676 cpc925_printk(KERN_INFO, "Link Config Ctrl 0x%08x\n", 676 cpc925_printk(KERN_INFO, "Link Config Ctrl 0x%08x\n",
677 linkctrl); 677 linkctrl);
678 cpc925_printk(KERN_INFO, "Error Enum and Ctrl 0x%08x\n", 678 cpc925_printk(KERN_INFO, "Error Enum and Ctrl 0x%08x\n",
679 errctrl); 679 errctrl);
680 cpc925_printk(KERN_INFO, "Link Error 0x%08x\n", 680 cpc925_printk(KERN_INFO, "Link Error 0x%08x\n",
681 linkerr); 681 linkerr);
682 682
683 /* Clear by write 1 */ 683 /* Clear by write 1 */
684 if (brgctrl & BRGCTRL_DETSERR) 684 if (brgctrl & BRGCTRL_DETSERR)
685 __raw_writel(BRGCTRL_DETSERR, 685 __raw_writel(BRGCTRL_DETSERR,
686 dev_info->vbase + REG_BRGCTRL_OFFSET); 686 dev_info->vbase + REG_BRGCTRL_OFFSET);
687 687
688 if (linkctrl & HT_LINKCTRL_DETECTED) 688 if (linkctrl & HT_LINKCTRL_DETECTED)
689 __raw_writel(HT_LINKCTRL_DETECTED, 689 __raw_writel(HT_LINKCTRL_DETECTED,
690 dev_info->vbase + REG_LINKCTRL_OFFSET); 690 dev_info->vbase + REG_LINKCTRL_OFFSET);
691 691
692 /* Initiate Secondary Bus Reset to clear the chain failure */ 692 /* Initiate Secondary Bus Reset to clear the chain failure */
693 if (errctrl & ERRCTRL_CHN_FAL) 693 if (errctrl & ERRCTRL_CHN_FAL)
694 __raw_writel(BRGCTRL_SECBUSRESET, 694 __raw_writel(BRGCTRL_SECBUSRESET,
695 dev_info->vbase + REG_BRGCTRL_OFFSET); 695 dev_info->vbase + REG_BRGCTRL_OFFSET);
696 696
697 if (errctrl & ERRCTRL_RSP_ERR) 697 if (errctrl & ERRCTRL_RSP_ERR)
698 __raw_writel(ERRCTRL_RSP_ERR, 698 __raw_writel(ERRCTRL_RSP_ERR,
699 dev_info->vbase + REG_ERRCTRL_OFFSET); 699 dev_info->vbase + REG_ERRCTRL_OFFSET);
700 700
701 if (linkerr & HT_LINKERR_DETECTED) 701 if (linkerr & HT_LINKERR_DETECTED)
702 __raw_writel(HT_LINKERR_DETECTED, 702 __raw_writel(HT_LINKERR_DETECTED,
703 dev_info->vbase + REG_LINKERR_OFFSET); 703 dev_info->vbase + REG_LINKERR_OFFSET);
704 704
705 edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name); 705 edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
706 } 706 }
707 707
708 static struct cpc925_dev_info cpc925_devs[] = { 708 static struct cpc925_dev_info cpc925_devs[] = {
709 { 709 {
710 .ctl_name = CPC925_CPU_ERR_DEV, 710 .ctl_name = CPC925_CPU_ERR_DEV,
711 .init = cpc925_cpu_init, 711 .init = cpc925_cpu_init,
712 .exit = cpc925_cpu_exit, 712 .exit = cpc925_cpu_exit,
713 .check = cpc925_cpu_check, 713 .check = cpc925_cpu_check,
714 }, 714 },
715 { 715 {
716 .ctl_name = CPC925_HT_LINK_DEV, 716 .ctl_name = CPC925_HT_LINK_DEV,
717 .init = cpc925_htlink_init, 717 .init = cpc925_htlink_init,
718 .exit = cpc925_htlink_exit, 718 .exit = cpc925_htlink_exit,
719 .check = cpc925_htlink_check, 719 .check = cpc925_htlink_check,
720 }, 720 },
721 {0}, /* Terminated by NULL */ 721 {0}, /* Terminated by NULL */
722 }; 722 };
723 723
724 /* 724 /*
725 * Add CPU Err detection and HyperTransport Link Err detection 725 * Add CPU Err detection and HyperTransport Link Err detection
726 * as common "edac_device", they have no corresponding device 726 * as common "edac_device", they have no corresponding device
727 * nodes in the Open Firmware DTB and we have to add platform 727 * nodes in the Open Firmware DTB and we have to add platform
728 * devices for them. Also, they will share the MMIO with that 728 * devices for them. Also, they will share the MMIO with that
729 * of memory controller. 729 * of memory controller.
730 */ 730 */
731 static void cpc925_add_edac_devices(void __iomem *vbase) 731 static void cpc925_add_edac_devices(void __iomem *vbase)
732 { 732 {
733 struct cpc925_dev_info *dev_info; 733 struct cpc925_dev_info *dev_info;
734 734
735 if (!vbase) { 735 if (!vbase) {
736 cpc925_printk(KERN_ERR, "MMIO not established yet\n"); 736 cpc925_printk(KERN_ERR, "MMIO not established yet\n");
737 return; 737 return;
738 } 738 }
739 739
740 for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) { 740 for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
741 dev_info->vbase = vbase; 741 dev_info->vbase = vbase;
742 dev_info->pdev = platform_device_register_simple( 742 dev_info->pdev = platform_device_register_simple(
743 dev_info->ctl_name, 0, NULL, 0); 743 dev_info->ctl_name, 0, NULL, 0);
744 if (IS_ERR(dev_info->pdev)) { 744 if (IS_ERR(dev_info->pdev)) {
745 cpc925_printk(KERN_ERR, 745 cpc925_printk(KERN_ERR,
746 "Can't register platform device for %s\n", 746 "Can't register platform device for %s\n",
747 dev_info->ctl_name); 747 dev_info->ctl_name);
748 continue; 748 continue;
749 } 749 }
750 750
751 /* 751 /*
752 * Don't have to allocate private structure but 752 * Don't have to allocate private structure but
753 * make use of cpc925_devs[] instead. 753 * make use of cpc925_devs[] instead.
754 */ 754 */
755 dev_info->edac_idx = edac_device_alloc_index(); 755 dev_info->edac_idx = edac_device_alloc_index();
756 dev_info->edac_dev = 756 dev_info->edac_dev =
757 edac_device_alloc_ctl_info(0, dev_info->ctl_name, 757 edac_device_alloc_ctl_info(0, dev_info->ctl_name,
758 1, NULL, 0, 0, NULL, 0, dev_info->edac_idx); 758 1, NULL, 0, 0, NULL, 0, dev_info->edac_idx);
759 if (!dev_info->edac_dev) { 759 if (!dev_info->edac_dev) {
760 cpc925_printk(KERN_ERR, "No memory for edac device\n"); 760 cpc925_printk(KERN_ERR, "No memory for edac device\n");
761 goto err1; 761 goto err1;
762 } 762 }
763 763
764 dev_info->edac_dev->pvt_info = dev_info; 764 dev_info->edac_dev->pvt_info = dev_info;
765 dev_info->edac_dev->dev = &dev_info->pdev->dev; 765 dev_info->edac_dev->dev = &dev_info->pdev->dev;
766 dev_info->edac_dev->ctl_name = dev_info->ctl_name; 766 dev_info->edac_dev->ctl_name = dev_info->ctl_name;
767 dev_info->edac_dev->mod_name = CPC925_EDAC_MOD_STR; 767 dev_info->edac_dev->mod_name = CPC925_EDAC_MOD_STR;
768 dev_info->edac_dev->dev_name = dev_name(&dev_info->pdev->dev); 768 dev_info->edac_dev->dev_name = dev_name(&dev_info->pdev->dev);
769 769
770 if (edac_op_state == EDAC_OPSTATE_POLL) 770 if (edac_op_state == EDAC_OPSTATE_POLL)
771 dev_info->edac_dev->edac_check = dev_info->check; 771 dev_info->edac_dev->edac_check = dev_info->check;
772 772
773 if (dev_info->init) 773 if (dev_info->init)
774 dev_info->init(dev_info); 774 dev_info->init(dev_info);
775 775
776 if (edac_device_add_device(dev_info->edac_dev) > 0) { 776 if (edac_device_add_device(dev_info->edac_dev) > 0) {
777 cpc925_printk(KERN_ERR, 777 cpc925_printk(KERN_ERR,
778 "Unable to add edac device for %s\n", 778 "Unable to add edac device for %s\n",
779 dev_info->ctl_name); 779 dev_info->ctl_name);
780 goto err2; 780 goto err2;
781 } 781 }
782 782
783 debugf0("%s: Successfully added edac device for %s\n", 783 debugf0("%s: Successfully added edac device for %s\n",
784 __func__, dev_info->ctl_name); 784 __func__, dev_info->ctl_name);
785 785
786 continue; 786 continue;
787 787
788 err2: 788 err2:
789 if (dev_info->exit) 789 if (dev_info->exit)
790 dev_info->exit(dev_info); 790 dev_info->exit(dev_info);
791 edac_device_free_ctl_info(dev_info->edac_dev); 791 edac_device_free_ctl_info(dev_info->edac_dev);
792 err1: 792 err1:
793 platform_device_unregister(dev_info->pdev); 793 platform_device_unregister(dev_info->pdev);
794 } 794 }
795 } 795 }
796 796
797 /* 797 /*
798 * Delete the common "edac_device" for CPU Err Detection 798 * Delete the common "edac_device" for CPU Err Detection
799 * and HyperTransport Link Err Detection 799 * and HyperTransport Link Err Detection
800 */ 800 */
801 static void cpc925_del_edac_devices(void) 801 static void cpc925_del_edac_devices(void)
802 { 802 {
803 struct cpc925_dev_info *dev_info; 803 struct cpc925_dev_info *dev_info;
804 804
805 for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) { 805 for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
806 if (dev_info->edac_dev) { 806 if (dev_info->edac_dev) {
807 edac_device_del_device(dev_info->edac_dev->dev); 807 edac_device_del_device(dev_info->edac_dev->dev);
808 edac_device_free_ctl_info(dev_info->edac_dev); 808 edac_device_free_ctl_info(dev_info->edac_dev);
809 platform_device_unregister(dev_info->pdev); 809 platform_device_unregister(dev_info->pdev);
810 } 810 }
811 811
812 if (dev_info->exit) 812 if (dev_info->exit)
813 dev_info->exit(dev_info); 813 dev_info->exit(dev_info);
814 814
815 debugf0("%s: Successfully deleted edac device for %s\n", 815 debugf0("%s: Successfully deleted edac device for %s\n",
816 __func__, dev_info->ctl_name); 816 __func__, dev_info->ctl_name);
817 } 817 }
818 } 818 }
819 819
820 /* Convert current back-ground scrub rate into byte/sec bandwith */ 820 /* Convert current back-ground scrub rate into byte/sec bandwith */
821 static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw) 821 static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci)
822 { 822 {
823 struct cpc925_mc_pdata *pdata = mci->pvt_info; 823 struct cpc925_mc_pdata *pdata = mci->pvt_info;
824 int bw;
824 u32 mscr; 825 u32 mscr;
825 u8 si; 826 u8 si;
826 827
827 mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET); 828 mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET);
828 si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT; 829 si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT;
829 830
830 debugf0("%s, Mem Scrub Ctrl Register 0x%x\n", __func__, mscr); 831 debugf0("%s, Mem Scrub Ctrl Register 0x%x\n", __func__, mscr);
831 832
832 if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) || 833 if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) ||
833 (si == 0)) { 834 (si == 0)) {
834 cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n"); 835 cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n");
835 *bw = 0; 836 bw = 0;
836 } else 837 } else
837 *bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si; 838 bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si;
838 839
839 return 0; 840 return bw;
840 } 841 }
841 842
842 /* Return 0 for single channel; 1 for dual channel */ 843 /* Return 0 for single channel; 1 for dual channel */
843 static int cpc925_mc_get_channels(void __iomem *vbase) 844 static int cpc925_mc_get_channels(void __iomem *vbase)
844 { 845 {
845 int dual = 0; 846 int dual = 0;
846 u32 mbcr; 847 u32 mbcr;
847 848
848 mbcr = __raw_readl(vbase + REG_MBCR_OFFSET); 849 mbcr = __raw_readl(vbase + REG_MBCR_OFFSET);
849 850
850 /* 851 /*
851 * Dual channel only when 128-bit wide physical bus 852 * Dual channel only when 128-bit wide physical bus
852 * and 128-bit configuration. 853 * and 128-bit configuration.
853 */ 854 */
854 if (((mbcr & MBCR_64BITCFG_MASK) == 0) && 855 if (((mbcr & MBCR_64BITCFG_MASK) == 0) &&
855 ((mbcr & MBCR_64BITBUS_MASK) == 0)) 856 ((mbcr & MBCR_64BITBUS_MASK) == 0))
856 dual = 1; 857 dual = 1;
857 858
858 debugf0("%s: %s channel\n", __func__, 859 debugf0("%s: %s channel\n", __func__,
859 (dual > 0) ? "Dual" : "Single"); 860 (dual > 0) ? "Dual" : "Single");
860 861
861 return dual; 862 return dual;
862 } 863 }
863 864
864 static int __devinit cpc925_probe(struct platform_device *pdev) 865 static int __devinit cpc925_probe(struct platform_device *pdev)
865 { 866 {
866 static int edac_mc_idx; 867 static int edac_mc_idx;
867 struct mem_ctl_info *mci; 868 struct mem_ctl_info *mci;
868 void __iomem *vbase; 869 void __iomem *vbase;
869 struct cpc925_mc_pdata *pdata; 870 struct cpc925_mc_pdata *pdata;
870 struct resource *r; 871 struct resource *r;
871 int res = 0, nr_channels; 872 int res = 0, nr_channels;
872 873
873 debugf0("%s: %s platform device found!\n", __func__, pdev->name); 874 debugf0("%s: %s platform device found!\n", __func__, pdev->name);
874 875
875 if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) { 876 if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) {
876 res = -ENOMEM; 877 res = -ENOMEM;
877 goto out; 878 goto out;
878 } 879 }
879 880
880 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 881 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
881 if (!r) { 882 if (!r) {
882 cpc925_printk(KERN_ERR, "Unable to get resource\n"); 883 cpc925_printk(KERN_ERR, "Unable to get resource\n");
883 res = -ENOENT; 884 res = -ENOENT;
884 goto err1; 885 goto err1;
885 } 886 }
886 887
887 if (!devm_request_mem_region(&pdev->dev, 888 if (!devm_request_mem_region(&pdev->dev,
888 r->start, 889 r->start,
889 resource_size(r), 890 resource_size(r),
890 pdev->name)) { 891 pdev->name)) {
891 cpc925_printk(KERN_ERR, "Unable to request mem region\n"); 892 cpc925_printk(KERN_ERR, "Unable to request mem region\n");
892 res = -EBUSY; 893 res = -EBUSY;
893 goto err1; 894 goto err1;
894 } 895 }
895 896
896 vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r)); 897 vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r));
897 if (!vbase) { 898 if (!vbase) {
898 cpc925_printk(KERN_ERR, "Unable to ioremap device\n"); 899 cpc925_printk(KERN_ERR, "Unable to ioremap device\n");
899 res = -ENOMEM; 900 res = -ENOMEM;
900 goto err2; 901 goto err2;
901 } 902 }
902 903
903 nr_channels = cpc925_mc_get_channels(vbase); 904 nr_channels = cpc925_mc_get_channels(vbase);
904 mci = edac_mc_alloc(sizeof(struct cpc925_mc_pdata), 905 mci = edac_mc_alloc(sizeof(struct cpc925_mc_pdata),
905 CPC925_NR_CSROWS, nr_channels + 1, edac_mc_idx); 906 CPC925_NR_CSROWS, nr_channels + 1, edac_mc_idx);
906 if (!mci) { 907 if (!mci) {
907 cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n"); 908 cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n");
908 res = -ENOMEM; 909 res = -ENOMEM;
909 goto err2; 910 goto err2;
910 } 911 }
911 912
912 pdata = mci->pvt_info; 913 pdata = mci->pvt_info;
913 pdata->vbase = vbase; 914 pdata->vbase = vbase;
914 pdata->edac_idx = edac_mc_idx++; 915 pdata->edac_idx = edac_mc_idx++;
915 pdata->name = pdev->name; 916 pdata->name = pdev->name;
916 917
917 mci->dev = &pdev->dev; 918 mci->dev = &pdev->dev;
918 platform_set_drvdata(pdev, mci); 919 platform_set_drvdata(pdev, mci);
919 mci->dev_name = dev_name(&pdev->dev); 920 mci->dev_name = dev_name(&pdev->dev);
920 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; 921 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
921 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 922 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
922 mci->edac_cap = EDAC_FLAG_SECDED; 923 mci->edac_cap = EDAC_FLAG_SECDED;
923 mci->mod_name = CPC925_EDAC_MOD_STR; 924 mci->mod_name = CPC925_EDAC_MOD_STR;
924 mci->mod_ver = CPC925_EDAC_REVISION; 925 mci->mod_ver = CPC925_EDAC_REVISION;
925 mci->ctl_name = pdev->name; 926 mci->ctl_name = pdev->name;
926 927
927 if (edac_op_state == EDAC_OPSTATE_POLL) 928 if (edac_op_state == EDAC_OPSTATE_POLL)
928 mci->edac_check = cpc925_mc_check; 929 mci->edac_check = cpc925_mc_check;
929 930
930 mci->ctl_page_to_phys = NULL; 931 mci->ctl_page_to_phys = NULL;
931 mci->scrub_mode = SCRUB_SW_SRC; 932 mci->scrub_mode = SCRUB_SW_SRC;
932 mci->set_sdram_scrub_rate = NULL; 933 mci->set_sdram_scrub_rate = NULL;
933 mci->get_sdram_scrub_rate = cpc925_get_sdram_scrub_rate; 934 mci->get_sdram_scrub_rate = cpc925_get_sdram_scrub_rate;
934 935
935 cpc925_init_csrows(mci); 936 cpc925_init_csrows(mci);
936 937
937 /* Setup memory controller registers */ 938 /* Setup memory controller registers */
938 cpc925_mc_init(mci); 939 cpc925_mc_init(mci);
939 940
940 if (edac_mc_add_mc(mci) > 0) { 941 if (edac_mc_add_mc(mci) > 0) {
941 cpc925_mc_printk(mci, KERN_ERR, "Failed edac_mc_add_mc()\n"); 942 cpc925_mc_printk(mci, KERN_ERR, "Failed edac_mc_add_mc()\n");
942 goto err3; 943 goto err3;
943 } 944 }
944 945
945 cpc925_add_edac_devices(vbase); 946 cpc925_add_edac_devices(vbase);
946 947
947 /* get this far and it's successful */ 948 /* get this far and it's successful */
948 debugf0("%s: success\n", __func__); 949 debugf0("%s: success\n", __func__);
949 950
950 res = 0; 951 res = 0;
951 goto out; 952 goto out;
952 953
953 err3: 954 err3:
954 cpc925_mc_exit(mci); 955 cpc925_mc_exit(mci);
955 edac_mc_free(mci); 956 edac_mc_free(mci);
956 err2: 957 err2:
957 devm_release_mem_region(&pdev->dev, r->start, resource_size(r)); 958 devm_release_mem_region(&pdev->dev, r->start, resource_size(r));
958 err1: 959 err1:
959 devres_release_group(&pdev->dev, cpc925_probe); 960 devres_release_group(&pdev->dev, cpc925_probe);
960 out: 961 out:
961 return res; 962 return res;
962 } 963 }
963 964
964 static int cpc925_remove(struct platform_device *pdev) 965 static int cpc925_remove(struct platform_device *pdev)
965 { 966 {
966 struct mem_ctl_info *mci = platform_get_drvdata(pdev); 967 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
967 968
968 /* 969 /*
969 * Delete common edac devices before edac mc, because 970 * Delete common edac devices before edac mc, because
970 * the former share the MMIO of the latter. 971 * the former share the MMIO of the latter.
971 */ 972 */
972 cpc925_del_edac_devices(); 973 cpc925_del_edac_devices();
973 cpc925_mc_exit(mci); 974 cpc925_mc_exit(mci);
974 975
975 edac_mc_del_mc(&pdev->dev); 976 edac_mc_del_mc(&pdev->dev);
976 edac_mc_free(mci); 977 edac_mc_free(mci);
977 978
978 return 0; 979 return 0;
979 } 980 }
980 981
981 static struct platform_driver cpc925_edac_driver = { 982 static struct platform_driver cpc925_edac_driver = {
982 .probe = cpc925_probe, 983 .probe = cpc925_probe,
983 .remove = cpc925_remove, 984 .remove = cpc925_remove,
984 .driver = { 985 .driver = {
985 .name = "cpc925_edac", 986 .name = "cpc925_edac",
986 } 987 }
987 }; 988 };
988 989
989 static int __init cpc925_edac_init(void) 990 static int __init cpc925_edac_init(void)
990 { 991 {
991 int ret = 0; 992 int ret = 0;
992 993
993 printk(KERN_INFO "IBM CPC925 EDAC driver " CPC925_EDAC_REVISION "\n"); 994 printk(KERN_INFO "IBM CPC925 EDAC driver " CPC925_EDAC_REVISION "\n");
994 printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc\n"); 995 printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc\n");
995 996
996 /* Only support POLL mode so far */ 997 /* Only support POLL mode so far */
997 edac_op_state = EDAC_OPSTATE_POLL; 998 edac_op_state = EDAC_OPSTATE_POLL;
998 999
999 ret = platform_driver_register(&cpc925_edac_driver); 1000 ret = platform_driver_register(&cpc925_edac_driver);
1000 if (ret) { 1001 if (ret) {
1001 printk(KERN_WARNING "Failed to register %s\n", 1002 printk(KERN_WARNING "Failed to register %s\n",
1002 CPC925_EDAC_MOD_STR); 1003 CPC925_EDAC_MOD_STR);
1003 } 1004 }
1004 1005
1005 return ret; 1006 return ret;
1006 } 1007 }
1007 1008
1008 static void __exit cpc925_edac_exit(void) 1009 static void __exit cpc925_edac_exit(void)
1009 { 1010 {
1010 platform_driver_unregister(&cpc925_edac_driver); 1011 platform_driver_unregister(&cpc925_edac_driver);
1011 } 1012 }
1012 1013
1013 module_init(cpc925_edac_init); 1014 module_init(cpc925_edac_init);
1014 module_exit(cpc925_edac_exit); 1015 module_exit(cpc925_edac_exit);
1015 1016
1016 MODULE_LICENSE("GPL"); 1017 MODULE_LICENSE("GPL");
1017 MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>"); 1018 MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>");
1018 MODULE_DESCRIPTION("IBM CPC925 Bridge and MC EDAC kernel module"); 1019 MODULE_DESCRIPTION("IBM CPC925 Bridge and MC EDAC kernel module");
1019 1020
drivers/edac/e752x_edac.c
1 /* 1 /*
2 * Intel e752x Memory Controller kernel module 2 * Intel e752x Memory Controller kernel module
3 * (C) 2004 Linux Networx (http://lnxi.com) 3 * (C) 2004 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the 4 * This file may be distributed under the terms of the
5 * GNU General Public License. 5 * GNU General Public License.
6 * 6 *
7 * See "enum e752x_chips" below for supported chipsets 7 * See "enum e752x_chips" below for supported chipsets
8 * 8 *
9 * Written by Tom Zimmerman 9 * Written by Tom Zimmerman
10 * 10 *
11 * Contributors: 11 * Contributors:
12 * Thayne Harbaugh at realmsys.com (?) 12 * Thayne Harbaugh at realmsys.com (?)
13 * Wang Zhenyu at intel.com 13 * Wang Zhenyu at intel.com
14 * Dave Jiang at mvista.com 14 * Dave Jiang at mvista.com
15 * 15 *
16 * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $ 16 * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
17 * 17 *
18 */ 18 */
19 19
20 #include <linux/module.h> 20 #include <linux/module.h>
21 #include <linux/init.h> 21 #include <linux/init.h>
22 #include <linux/pci.h> 22 #include <linux/pci.h>
23 #include <linux/pci_ids.h> 23 #include <linux/pci_ids.h>
24 #include <linux/edac.h> 24 #include <linux/edac.h>
25 #include "edac_core.h" 25 #include "edac_core.h"
26 26
27 #define E752X_REVISION " Ver: 2.0.2 " __DATE__ 27 #define E752X_REVISION " Ver: 2.0.2 " __DATE__
28 #define EDAC_MOD_STR "e752x_edac" 28 #define EDAC_MOD_STR "e752x_edac"
29 29
30 static int report_non_memory_errors; 30 static int report_non_memory_errors;
31 static int force_function_unhide; 31 static int force_function_unhide;
32 static int sysbus_parity = -1; 32 static int sysbus_parity = -1;
33 33
34 static struct edac_pci_ctl_info *e752x_pci; 34 static struct edac_pci_ctl_info *e752x_pci;
35 35
36 #define e752x_printk(level, fmt, arg...) \ 36 #define e752x_printk(level, fmt, arg...) \
37 edac_printk(level, "e752x", fmt, ##arg) 37 edac_printk(level, "e752x", fmt, ##arg)
38 38
39 #define e752x_mc_printk(mci, level, fmt, arg...) \ 39 #define e752x_mc_printk(mci, level, fmt, arg...) \
40 edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg) 40 edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg)
41 41
42 #ifndef PCI_DEVICE_ID_INTEL_7520_0 42 #ifndef PCI_DEVICE_ID_INTEL_7520_0
43 #define PCI_DEVICE_ID_INTEL_7520_0 0x3590 43 #define PCI_DEVICE_ID_INTEL_7520_0 0x3590
44 #endif /* PCI_DEVICE_ID_INTEL_7520_0 */ 44 #endif /* PCI_DEVICE_ID_INTEL_7520_0 */
45 45
46 #ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR 46 #ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
47 #define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591 47 #define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591
48 #endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */ 48 #endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */
49 49
50 #ifndef PCI_DEVICE_ID_INTEL_7525_0 50 #ifndef PCI_DEVICE_ID_INTEL_7525_0
51 #define PCI_DEVICE_ID_INTEL_7525_0 0x359E 51 #define PCI_DEVICE_ID_INTEL_7525_0 0x359E
52 #endif /* PCI_DEVICE_ID_INTEL_7525_0 */ 52 #endif /* PCI_DEVICE_ID_INTEL_7525_0 */
53 53
54 #ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR 54 #ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
55 #define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593 55 #define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593
56 #endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */ 56 #endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */
57 57
58 #ifndef PCI_DEVICE_ID_INTEL_7320_0 58 #ifndef PCI_DEVICE_ID_INTEL_7320_0
59 #define PCI_DEVICE_ID_INTEL_7320_0 0x3592 59 #define PCI_DEVICE_ID_INTEL_7320_0 0x3592
60 #endif /* PCI_DEVICE_ID_INTEL_7320_0 */ 60 #endif /* PCI_DEVICE_ID_INTEL_7320_0 */
61 61
62 #ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR 62 #ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
63 #define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593 63 #define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
64 #endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */ 64 #endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
65 65
66 #ifndef PCI_DEVICE_ID_INTEL_3100_0 66 #ifndef PCI_DEVICE_ID_INTEL_3100_0
67 #define PCI_DEVICE_ID_INTEL_3100_0 0x35B0 67 #define PCI_DEVICE_ID_INTEL_3100_0 0x35B0
68 #endif /* PCI_DEVICE_ID_INTEL_3100_0 */ 68 #endif /* PCI_DEVICE_ID_INTEL_3100_0 */
69 69
70 #ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR 70 #ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR
71 #define PCI_DEVICE_ID_INTEL_3100_1_ERR 0x35B1 71 #define PCI_DEVICE_ID_INTEL_3100_1_ERR 0x35B1
72 #endif /* PCI_DEVICE_ID_INTEL_3100_1_ERR */ 72 #endif /* PCI_DEVICE_ID_INTEL_3100_1_ERR */
73 73
74 #define E752X_NR_CSROWS 8 /* number of csrows */ 74 #define E752X_NR_CSROWS 8 /* number of csrows */
75 75
76 /* E752X register addresses - device 0 function 0 */ 76 /* E752X register addresses - device 0 function 0 */
77 #define E752X_MCHSCRB 0x52 /* Memory Scrub register (16b) */ 77 #define E752X_MCHSCRB 0x52 /* Memory Scrub register (16b) */
78 /* 78 /*
79 * 6:5 Scrub Completion Count 79 * 6:5 Scrub Completion Count
80 * 3:2 Scrub Rate (i3100 only) 80 * 3:2 Scrub Rate (i3100 only)
81 * 01=fast 10=normal 81 * 01=fast 10=normal
82 * 1:0 Scrub Mode enable 82 * 1:0 Scrub Mode enable
83 * 00=off 10=on 83 * 00=off 10=on
84 */ 84 */
85 #define E752X_DRB 0x60 /* DRAM row boundary register (8b) */ 85 #define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
86 #define E752X_DRA 0x70 /* DRAM row attribute register (8b) */ 86 #define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
87 /* 87 /*
88 * 31:30 Device width row 7 88 * 31:30 Device width row 7
89 * 01=x8 10=x4 11=x8 DDR2 89 * 01=x8 10=x4 11=x8 DDR2
90 * 27:26 Device width row 6 90 * 27:26 Device width row 6
91 * 23:22 Device width row 5 91 * 23:22 Device width row 5
92 * 19:20 Device width row 4 92 * 19:20 Device width row 4
93 * 15:14 Device width row 3 93 * 15:14 Device width row 3
94 * 11:10 Device width row 2 94 * 11:10 Device width row 2
95 * 7:6 Device width row 1 95 * 7:6 Device width row 1
96 * 3:2 Device width row 0 96 * 3:2 Device width row 0
97 */ 97 */
98 #define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */ 98 #define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */
99 /* FIXME:IS THIS RIGHT? */ 99 /* FIXME:IS THIS RIGHT? */
100 /* 100 /*
101 * 22 Number channels 0=1,1=2 101 * 22 Number channels 0=1,1=2
102 * 19:18 DRB Granularity 32/64MB 102 * 19:18 DRB Granularity 32/64MB
103 */ 103 */
104 #define E752X_DRM 0x80 /* Dimm mapping register */ 104 #define E752X_DRM 0x80 /* Dimm mapping register */
105 #define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */ 105 #define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */
106 /* 106 /*
107 * 14:12 1 single A, 2 single B, 3 dual 107 * 14:12 1 single A, 2 single B, 3 dual
108 */ 108 */
109 #define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */ 109 #define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
110 #define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */ 110 #define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
111 #define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */ 111 #define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
112 #define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */ 112 #define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */
113 113
114 /* E752X register addresses - device 0 function 1 */ 114 /* E752X register addresses - device 0 function 1 */
115 #define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */ 115 #define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */
116 #define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */ 116 #define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */
117 #define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */ 117 #define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */
118 #define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */ 118 #define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */
119 #define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */ 119 #define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */
120 #define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */ 120 #define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */
121 #define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */ 121 #define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */
122 #define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */ 122 #define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */
123 #define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */ 123 #define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */
124 #define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */ 124 #define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */
125 #define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */ 125 #define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */
126 #define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */ 126 #define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */
127 #define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */ 127 #define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */
128 #define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI cmd reg (8b) */ 128 #define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI cmd reg (8b) */
129 #define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */ 129 #define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */
130 #define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */ 130 #define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */
131 #define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */ 131 #define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */
132 #define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */ 132 #define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */
133 #define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */ 133 #define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */
134 #define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */ 134 #define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */
135 /* error address register (32b) */ 135 /* error address register (32b) */
136 /* 136 /*
137 * 31 Reserved 137 * 31 Reserved
138 * 30:2 CE address (64 byte block 34:6 138 * 30:2 CE address (64 byte block 34:6
139 * 1 Reserved 139 * 1 Reserved
140 * 0 HiLoCS 140 * 0 HiLoCS
141 */ 141 */
142 #define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */ 142 #define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */
143 /* error address register (32b) */ 143 /* error address register (32b) */
144 /* 144 /*
145 * 31 Reserved 145 * 31 Reserved
146 * 30:2 CE address (64 byte block 34:6) 146 * 30:2 CE address (64 byte block 34:6)
147 * 1 Reserved 147 * 1 Reserved
148 * 0 HiLoCS 148 * 0 HiLoCS
149 */ 149 */
150 #define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */ 150 #define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */
151 /* error address register (32b) */ 151 /* error address register (32b) */
152 /* 152 /*
153 * 31 Reserved 153 * 31 Reserved
154 * 30:2 CE address (64 byte block 34:6) 154 * 30:2 CE address (64 byte block 34:6)
155 * 1 Reserved 155 * 1 Reserved
156 * 0 HiLoCS 156 * 0 HiLoCS
157 */ 157 */
158 #define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM 1st uncorrectable scrub mem */ 158 #define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM 1st uncorrectable scrub mem */
159 /* error address register (32b) */ 159 /* error address register (32b) */
160 /* 160 /*
161 * 31 Reserved 161 * 31 Reserved
162 * 30:2 CE address (64 byte block 34:6 162 * 30:2 CE address (64 byte block 34:6
163 * 1 Reserved 163 * 1 Reserved
164 * 0 HiLoCS 164 * 0 HiLoCS
165 */ 165 */
166 #define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */ 166 #define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */
167 /* error syndrome register (16b) */ 167 /* error syndrome register (16b) */
168 #define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */ 168 #define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */
169 /* error syndrome register (16b) */ 169 /* error syndrome register (16b) */
170 #define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */ 170 #define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */
171 171
172 /* 3100 IMCH specific register addresses - device 0 function 1 */ 172 /* 3100 IMCH specific register addresses - device 0 function 1 */
173 #define I3100_NSI_FERR 0x48 /* NSI first error reg (32b) */ 173 #define I3100_NSI_FERR 0x48 /* NSI first error reg (32b) */
174 #define I3100_NSI_NERR 0x4C /* NSI next error reg (32b) */ 174 #define I3100_NSI_NERR 0x4C /* NSI next error reg (32b) */
175 #define I3100_NSI_SMICMD 0x54 /* NSI SMI command register (32b) */ 175 #define I3100_NSI_SMICMD 0x54 /* NSI SMI command register (32b) */
176 #define I3100_NSI_EMASK 0x90 /* NSI error mask register (32b) */ 176 #define I3100_NSI_EMASK 0x90 /* NSI error mask register (32b) */
177 177
178 /* ICH5R register addresses - device 30 function 0 */ 178 /* ICH5R register addresses - device 30 function 0 */
179 #define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */ 179 #define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */
180 #define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */ 180 #define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */
181 #define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */ 181 #define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */
182 182
183 enum e752x_chips { 183 enum e752x_chips {
184 E7520 = 0, 184 E7520 = 0,
185 E7525 = 1, 185 E7525 = 1,
186 E7320 = 2, 186 E7320 = 2,
187 I3100 = 3 187 I3100 = 3
188 }; 188 };
189 189
190 struct e752x_pvt { 190 struct e752x_pvt {
191 struct pci_dev *bridge_ck; 191 struct pci_dev *bridge_ck;
192 struct pci_dev *dev_d0f0; 192 struct pci_dev *dev_d0f0;
193 struct pci_dev *dev_d0f1; 193 struct pci_dev *dev_d0f1;
194 u32 tolm; 194 u32 tolm;
195 u32 remapbase; 195 u32 remapbase;
196 u32 remaplimit; 196 u32 remaplimit;
197 int mc_symmetric; 197 int mc_symmetric;
198 u8 map[8]; 198 u8 map[8];
199 int map_type; 199 int map_type;
200 const struct e752x_dev_info *dev_info; 200 const struct e752x_dev_info *dev_info;
201 }; 201 };
202 202
203 struct e752x_dev_info { 203 struct e752x_dev_info {
204 u16 err_dev; 204 u16 err_dev;
205 u16 ctl_dev; 205 u16 ctl_dev;
206 const char *ctl_name; 206 const char *ctl_name;
207 }; 207 };
208 208
209 struct e752x_error_info { 209 struct e752x_error_info {
210 u32 ferr_global; 210 u32 ferr_global;
211 u32 nerr_global; 211 u32 nerr_global;
212 u32 nsi_ferr; /* 3100 only */ 212 u32 nsi_ferr; /* 3100 only */
213 u32 nsi_nerr; /* 3100 only */ 213 u32 nsi_nerr; /* 3100 only */
214 u8 hi_ferr; /* all but 3100 */ 214 u8 hi_ferr; /* all but 3100 */
215 u8 hi_nerr; /* all but 3100 */ 215 u8 hi_nerr; /* all but 3100 */
216 u16 sysbus_ferr; 216 u16 sysbus_ferr;
217 u16 sysbus_nerr; 217 u16 sysbus_nerr;
218 u8 buf_ferr; 218 u8 buf_ferr;
219 u8 buf_nerr; 219 u8 buf_nerr;
220 u16 dram_ferr; 220 u16 dram_ferr;
221 u16 dram_nerr; 221 u16 dram_nerr;
222 u32 dram_sec1_add; 222 u32 dram_sec1_add;
223 u32 dram_sec2_add; 223 u32 dram_sec2_add;
224 u16 dram_sec1_syndrome; 224 u16 dram_sec1_syndrome;
225 u16 dram_sec2_syndrome; 225 u16 dram_sec2_syndrome;
226 u32 dram_ded_add; 226 u32 dram_ded_add;
227 u32 dram_scrb_add; 227 u32 dram_scrb_add;
228 u32 dram_retr_add; 228 u32 dram_retr_add;
229 }; 229 };
230 230
231 static const struct e752x_dev_info e752x_devs[] = { 231 static const struct e752x_dev_info e752x_devs[] = {
232 [E7520] = { 232 [E7520] = {
233 .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR, 233 .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
234 .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0, 234 .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
235 .ctl_name = "E7520"}, 235 .ctl_name = "E7520"},
236 [E7525] = { 236 [E7525] = {
237 .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR, 237 .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
238 .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0, 238 .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
239 .ctl_name = "E7525"}, 239 .ctl_name = "E7525"},
240 [E7320] = { 240 [E7320] = {
241 .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR, 241 .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
242 .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0, 242 .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
243 .ctl_name = "E7320"}, 243 .ctl_name = "E7320"},
244 [I3100] = { 244 [I3100] = {
245 .err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR, 245 .err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR,
246 .ctl_dev = PCI_DEVICE_ID_INTEL_3100_0, 246 .ctl_dev = PCI_DEVICE_ID_INTEL_3100_0,
247 .ctl_name = "3100"}, 247 .ctl_name = "3100"},
248 }; 248 };
249 249
250 /* Valid scrub rates for the e752x/3100 hardware memory scrubber. We 250 /* Valid scrub rates for the e752x/3100 hardware memory scrubber. We
251 * map the scrubbing bandwidth to a hardware register value. The 'set' 251 * map the scrubbing bandwidth to a hardware register value. The 'set'
252 * operation finds the 'matching or higher value'. Note that scrubbing 252 * operation finds the 'matching or higher value'. Note that scrubbing
253 * on the e752x can only be enabled/disabled. The 3100 supports 253 * on the e752x can only be enabled/disabled. The 3100 supports
254 * a normal and fast mode. 254 * a normal and fast mode.
255 */ 255 */
256 256
257 #define SDRATE_EOT 0xFFFFFFFF 257 #define SDRATE_EOT 0xFFFFFFFF
258 258
259 struct scrubrate { 259 struct scrubrate {
260 u32 bandwidth; /* bandwidth consumed by scrubbing in bytes/sec */ 260 u32 bandwidth; /* bandwidth consumed by scrubbing in bytes/sec */
261 u16 scrubval; /* register value for scrub rate */ 261 u16 scrubval; /* register value for scrub rate */
262 }; 262 };
263 263
264 /* Rate below assumes same performance as i3100 using PC3200 DDR2 in 264 /* Rate below assumes same performance as i3100 using PC3200 DDR2 in
265 * normal mode. e752x bridges don't support choosing normal or fast mode, 265 * normal mode. e752x bridges don't support choosing normal or fast mode,
266 * so the scrubbing bandwidth value isn't all that important - scrubbing is 266 * so the scrubbing bandwidth value isn't all that important - scrubbing is
267 * either on or off. 267 * either on or off.
268 */ 268 */
269 static const struct scrubrate scrubrates_e752x[] = { 269 static const struct scrubrate scrubrates_e752x[] = {
270 {0, 0x00}, /* Scrubbing Off */ 270 {0, 0x00}, /* Scrubbing Off */
271 {500000, 0x02}, /* Scrubbing On */ 271 {500000, 0x02}, /* Scrubbing On */
272 {SDRATE_EOT, 0x00} /* End of Table */ 272 {SDRATE_EOT, 0x00} /* End of Table */
273 }; 273 };
274 274
275 /* Fast mode: 2 GByte PC3200 DDR2 scrubbed in 33s = 63161283 bytes/s 275 /* Fast mode: 2 GByte PC3200 DDR2 scrubbed in 33s = 63161283 bytes/s
276 * Normal mode: 125 (32000 / 256) times slower than fast mode. 276 * Normal mode: 125 (32000 / 256) times slower than fast mode.
277 */ 277 */
278 static const struct scrubrate scrubrates_i3100[] = { 278 static const struct scrubrate scrubrates_i3100[] = {
279 {0, 0x00}, /* Scrubbing Off */ 279 {0, 0x00}, /* Scrubbing Off */
280 {500000, 0x0a}, /* Normal mode - 32k clocks */ 280 {500000, 0x0a}, /* Normal mode - 32k clocks */
281 {62500000, 0x06}, /* Fast mode - 256 clocks */ 281 {62500000, 0x06}, /* Fast mode - 256 clocks */
282 {SDRATE_EOT, 0x00} /* End of Table */ 282 {SDRATE_EOT, 0x00} /* End of Table */
283 }; 283 };
284 284
285 static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, 285 static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
286 unsigned long page) 286 unsigned long page)
287 { 287 {
288 u32 remap; 288 u32 remap;
289 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; 289 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
290 290
291 debugf3("%s()\n", __func__); 291 debugf3("%s()\n", __func__);
292 292
293 if (page < pvt->tolm) 293 if (page < pvt->tolm)
294 return page; 294 return page;
295 295
296 if ((page >= 0x100000) && (page < pvt->remapbase)) 296 if ((page >= 0x100000) && (page < pvt->remapbase))
297 return page; 297 return page;
298 298
299 remap = (page - pvt->tolm) + pvt->remapbase; 299 remap = (page - pvt->tolm) + pvt->remapbase;
300 300
301 if (remap < pvt->remaplimit) 301 if (remap < pvt->remaplimit)
302 return remap; 302 return remap;
303 303
304 e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page); 304 e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
305 return pvt->tolm - 1; 305 return pvt->tolm - 1;
306 } 306 }
307 307
308 static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, 308 static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
309 u32 sec1_add, u16 sec1_syndrome) 309 u32 sec1_add, u16 sec1_syndrome)
310 { 310 {
311 u32 page; 311 u32 page;
312 int row; 312 int row;
313 int channel; 313 int channel;
314 int i; 314 int i;
315 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; 315 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
316 316
317 debugf3("%s()\n", __func__); 317 debugf3("%s()\n", __func__);
318 318
319 /* convert the addr to 4k page */ 319 /* convert the addr to 4k page */
320 page = sec1_add >> (PAGE_SHIFT - 4); 320 page = sec1_add >> (PAGE_SHIFT - 4);
321 321
322 /* FIXME - check for -1 */ 322 /* FIXME - check for -1 */
323 if (pvt->mc_symmetric) { 323 if (pvt->mc_symmetric) {
324 /* chip select are bits 14 & 13 */ 324 /* chip select are bits 14 & 13 */
325 row = ((page >> 1) & 3); 325 row = ((page >> 1) & 3);
326 e752x_printk(KERN_WARNING, 326 e752x_printk(KERN_WARNING,
327 "Test row %d Table %d %d %d %d %d %d %d %d\n", row, 327 "Test row %d Table %d %d %d %d %d %d %d %d\n", row,
328 pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3], 328 pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
329 pvt->map[4], pvt->map[5], pvt->map[6], 329 pvt->map[4], pvt->map[5], pvt->map[6],
330 pvt->map[7]); 330 pvt->map[7]);
331 331
332 /* test for channel remapping */ 332 /* test for channel remapping */
333 for (i = 0; i < 8; i++) { 333 for (i = 0; i < 8; i++) {
334 if (pvt->map[i] == row) 334 if (pvt->map[i] == row)
335 break; 335 break;
336 } 336 }
337 337
338 e752x_printk(KERN_WARNING, "Test computed row %d\n", i); 338 e752x_printk(KERN_WARNING, "Test computed row %d\n", i);
339 339
340 if (i < 8) 340 if (i < 8)
341 row = i; 341 row = i;
342 else 342 else
343 e752x_mc_printk(mci, KERN_WARNING, 343 e752x_mc_printk(mci, KERN_WARNING,
344 "row %d not found in remap table\n", 344 "row %d not found in remap table\n",
345 row); 345 row);
346 } else 346 } else
347 row = edac_mc_find_csrow_by_page(mci, page); 347 row = edac_mc_find_csrow_by_page(mci, page);
348 348
349 /* 0 = channel A, 1 = channel B */ 349 /* 0 = channel A, 1 = channel B */
350 channel = !(error_one & 1); 350 channel = !(error_one & 1);
351 351
352 /* e752x mc reads 34:6 of the DRAM linear address */ 352 /* e752x mc reads 34:6 of the DRAM linear address */
353 edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4), 353 edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4),
354 sec1_syndrome, row, channel, "e752x CE"); 354 sec1_syndrome, row, channel, "e752x CE");
355 } 355 }
356 356
357 static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, 357 static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
358 u32 sec1_add, u16 sec1_syndrome, int *error_found, 358 u32 sec1_add, u16 sec1_syndrome, int *error_found,
359 int handle_error) 359 int handle_error)
360 { 360 {
361 *error_found = 1; 361 *error_found = 1;
362 362
363 if (handle_error) 363 if (handle_error)
364 do_process_ce(mci, error_one, sec1_add, sec1_syndrome); 364 do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
365 } 365 }
366 366
367 static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, 367 static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
368 u32 ded_add, u32 scrb_add) 368 u32 ded_add, u32 scrb_add)
369 { 369 {
370 u32 error_2b, block_page; 370 u32 error_2b, block_page;
371 int row; 371 int row;
372 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; 372 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
373 373
374 debugf3("%s()\n", __func__); 374 debugf3("%s()\n", __func__);
375 375
376 if (error_one & 0x0202) { 376 if (error_one & 0x0202) {
377 error_2b = ded_add; 377 error_2b = ded_add;
378 378
379 /* convert to 4k address */ 379 /* convert to 4k address */
380 block_page = error_2b >> (PAGE_SHIFT - 4); 380 block_page = error_2b >> (PAGE_SHIFT - 4);
381 381
382 row = pvt->mc_symmetric ? 382 row = pvt->mc_symmetric ?
383 /* chip select are bits 14 & 13 */ 383 /* chip select are bits 14 & 13 */
384 ((block_page >> 1) & 3) : 384 ((block_page >> 1) & 3) :
385 edac_mc_find_csrow_by_page(mci, block_page); 385 edac_mc_find_csrow_by_page(mci, block_page);
386 386
387 /* e752x mc reads 34:6 of the DRAM linear address */ 387 /* e752x mc reads 34:6 of the DRAM linear address */
388 edac_mc_handle_ue(mci, block_page, 388 edac_mc_handle_ue(mci, block_page,
389 offset_in_page(error_2b << 4), 389 offset_in_page(error_2b << 4),
390 row, "e752x UE from Read"); 390 row, "e752x UE from Read");
391 } 391 }
392 if (error_one & 0x0404) { 392 if (error_one & 0x0404) {
393 error_2b = scrb_add; 393 error_2b = scrb_add;
394 394
395 /* convert to 4k address */ 395 /* convert to 4k address */
396 block_page = error_2b >> (PAGE_SHIFT - 4); 396 block_page = error_2b >> (PAGE_SHIFT - 4);
397 397
398 row = pvt->mc_symmetric ? 398 row = pvt->mc_symmetric ?
399 /* chip select are bits 14 & 13 */ 399 /* chip select are bits 14 & 13 */
400 ((block_page >> 1) & 3) : 400 ((block_page >> 1) & 3) :
401 edac_mc_find_csrow_by_page(mci, block_page); 401 edac_mc_find_csrow_by_page(mci, block_page);
402 402
403 /* e752x mc reads 34:6 of the DRAM linear address */ 403 /* e752x mc reads 34:6 of the DRAM linear address */
404 edac_mc_handle_ue(mci, block_page, 404 edac_mc_handle_ue(mci, block_page,
405 offset_in_page(error_2b << 4), 405 offset_in_page(error_2b << 4),
406 row, "e752x UE from Scruber"); 406 row, "e752x UE from Scruber");
407 } 407 }
408 } 408 }
409 409
410 static inline void process_ue(struct mem_ctl_info *mci, u16 error_one, 410 static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
411 u32 ded_add, u32 scrb_add, int *error_found, 411 u32 ded_add, u32 scrb_add, int *error_found,
412 int handle_error) 412 int handle_error)
413 { 413 {
414 *error_found = 1; 414 *error_found = 1;
415 415
416 if (handle_error) 416 if (handle_error)
417 do_process_ue(mci, error_one, ded_add, scrb_add); 417 do_process_ue(mci, error_one, ded_add, scrb_add);
418 } 418 }
419 419
420 static inline void process_ue_no_info_wr(struct mem_ctl_info *mci, 420 static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
421 int *error_found, int handle_error) 421 int *error_found, int handle_error)
422 { 422 {
423 *error_found = 1; 423 *error_found = 1;
424 424
425 if (!handle_error) 425 if (!handle_error)
426 return; 426 return;
427 427
428 debugf3("%s()\n", __func__); 428 debugf3("%s()\n", __func__);
429 edac_mc_handle_ue_no_info(mci, "e752x UE log memory write"); 429 edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
430 } 430 }
431 431
432 static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error, 432 static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
433 u32 retry_add) 433 u32 retry_add)
434 { 434 {
435 u32 error_1b, page; 435 u32 error_1b, page;
436 int row; 436 int row;
437 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; 437 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
438 438
439 error_1b = retry_add; 439 error_1b = retry_add;
440 page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */ 440 page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
441 441
442 /* chip select are bits 14 & 13 */ 442 /* chip select are bits 14 & 13 */
443 row = pvt->mc_symmetric ? ((page >> 1) & 3) : 443 row = pvt->mc_symmetric ? ((page >> 1) & 3) :
444 edac_mc_find_csrow_by_page(mci, page); 444 edac_mc_find_csrow_by_page(mci, page);
445 445
446 e752x_mc_printk(mci, KERN_WARNING, 446 e752x_mc_printk(mci, KERN_WARNING,
447 "CE page 0x%lx, row %d : Memory read retry\n", 447 "CE page 0x%lx, row %d : Memory read retry\n",
448 (long unsigned int)page, row); 448 (long unsigned int)page, row);
449 } 449 }
450 450
451 static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error, 451 static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
452 u32 retry_add, int *error_found, 452 u32 retry_add, int *error_found,
453 int handle_error) 453 int handle_error)
454 { 454 {
455 *error_found = 1; 455 *error_found = 1;
456 456
457 if (handle_error) 457 if (handle_error)
458 do_process_ded_retry(mci, error, retry_add); 458 do_process_ded_retry(mci, error, retry_add);
459 } 459 }
460 460
461 static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error, 461 static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
462 int *error_found, int handle_error) 462 int *error_found, int handle_error)
463 { 463 {
464 *error_found = 1; 464 *error_found = 1;
465 465
466 if (handle_error) 466 if (handle_error)
467 e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n"); 467 e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n");
468 } 468 }
469 469
470 static char *global_message[11] = { 470 static char *global_message[11] = {
471 "PCI Express C1", 471 "PCI Express C1",
472 "PCI Express C", 472 "PCI Express C",
473 "PCI Express B1", 473 "PCI Express B1",
474 "PCI Express B", 474 "PCI Express B",
475 "PCI Express A1", 475 "PCI Express A1",
476 "PCI Express A", 476 "PCI Express A",
477 "DMA Controller", 477 "DMA Controller",
478 "HUB or NS Interface", 478 "HUB or NS Interface",
479 "System Bus", 479 "System Bus",
480 "DRAM Controller", /* 9th entry */ 480 "DRAM Controller", /* 9th entry */
481 "Internal Buffer" 481 "Internal Buffer"
482 }; 482 };
483 483
484 #define DRAM_ENTRY 9 484 #define DRAM_ENTRY 9
485 485
486 static char *fatal_message[2] = { "Non-Fatal ", "Fatal " }; 486 static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
487 487
488 static void do_global_error(int fatal, u32 errors) 488 static void do_global_error(int fatal, u32 errors)
489 { 489 {
490 int i; 490 int i;
491 491
492 for (i = 0; i < 11; i++) { 492 for (i = 0; i < 11; i++) {
493 if (errors & (1 << i)) { 493 if (errors & (1 << i)) {
494 /* If the error is from DRAM Controller OR 494 /* If the error is from DRAM Controller OR
495 * we are to report ALL errors, then 495 * we are to report ALL errors, then
496 * report the error 496 * report the error
497 */ 497 */
498 if ((i == DRAM_ENTRY) || report_non_memory_errors) 498 if ((i == DRAM_ENTRY) || report_non_memory_errors)
499 e752x_printk(KERN_WARNING, "%sError %s\n", 499 e752x_printk(KERN_WARNING, "%sError %s\n",
500 fatal_message[fatal], 500 fatal_message[fatal],
501 global_message[i]); 501 global_message[i]);
502 } 502 }
503 } 503 }
504 } 504 }
505 505
506 static inline void global_error(int fatal, u32 errors, int *error_found, 506 static inline void global_error(int fatal, u32 errors, int *error_found,
507 int handle_error) 507 int handle_error)
508 { 508 {
509 *error_found = 1; 509 *error_found = 1;
510 510
511 if (handle_error) 511 if (handle_error)
512 do_global_error(fatal, errors); 512 do_global_error(fatal, errors);
513 } 513 }
514 514
515 static char *hub_message[7] = { 515 static char *hub_message[7] = {
516 "HI Address or Command Parity", "HI Illegal Access", 516 "HI Address or Command Parity", "HI Illegal Access",
517 "HI Internal Parity", "Out of Range Access", 517 "HI Internal Parity", "Out of Range Access",
518 "HI Data Parity", "Enhanced Config Access", 518 "HI Data Parity", "Enhanced Config Access",
519 "Hub Interface Target Abort" 519 "Hub Interface Target Abort"
520 }; 520 };
521 521
522 static void do_hub_error(int fatal, u8 errors) 522 static void do_hub_error(int fatal, u8 errors)
523 { 523 {
524 int i; 524 int i;
525 525
526 for (i = 0; i < 7; i++) { 526 for (i = 0; i < 7; i++) {
527 if (errors & (1 << i)) 527 if (errors & (1 << i))
528 e752x_printk(KERN_WARNING, "%sError %s\n", 528 e752x_printk(KERN_WARNING, "%sError %s\n",
529 fatal_message[fatal], hub_message[i]); 529 fatal_message[fatal], hub_message[i]);
530 } 530 }
531 } 531 }
532 532
533 static inline void hub_error(int fatal, u8 errors, int *error_found, 533 static inline void hub_error(int fatal, u8 errors, int *error_found,
534 int handle_error) 534 int handle_error)
535 { 535 {
536 *error_found = 1; 536 *error_found = 1;
537 537
538 if (handle_error) 538 if (handle_error)
539 do_hub_error(fatal, errors); 539 do_hub_error(fatal, errors);
540 } 540 }
541 541
542 #define NSI_FATAL_MASK 0x0c080081 542 #define NSI_FATAL_MASK 0x0c080081
543 #define NSI_NON_FATAL_MASK 0x23a0ba64 543 #define NSI_NON_FATAL_MASK 0x23a0ba64
544 #define NSI_ERR_MASK (NSI_FATAL_MASK | NSI_NON_FATAL_MASK) 544 #define NSI_ERR_MASK (NSI_FATAL_MASK | NSI_NON_FATAL_MASK)
545 545
546 static char *nsi_message[30] = { 546 static char *nsi_message[30] = {
547 "NSI Link Down", /* NSI_FERR/NSI_NERR bit 0, fatal error */ 547 "NSI Link Down", /* NSI_FERR/NSI_NERR bit 0, fatal error */
548 "", /* reserved */ 548 "", /* reserved */
549 "NSI Parity Error", /* bit 2, non-fatal */ 549 "NSI Parity Error", /* bit 2, non-fatal */
550 "", /* reserved */ 550 "", /* reserved */
551 "", /* reserved */ 551 "", /* reserved */
552 "Correctable Error Message", /* bit 5, non-fatal */ 552 "Correctable Error Message", /* bit 5, non-fatal */
553 "Non-Fatal Error Message", /* bit 6, non-fatal */ 553 "Non-Fatal Error Message", /* bit 6, non-fatal */
554 "Fatal Error Message", /* bit 7, fatal */ 554 "Fatal Error Message", /* bit 7, fatal */
555 "", /* reserved */ 555 "", /* reserved */
556 "Receiver Error", /* bit 9, non-fatal */ 556 "Receiver Error", /* bit 9, non-fatal */
557 "", /* reserved */ 557 "", /* reserved */
558 "Bad TLP", /* bit 11, non-fatal */ 558 "Bad TLP", /* bit 11, non-fatal */
559 "Bad DLLP", /* bit 12, non-fatal */ 559 "Bad DLLP", /* bit 12, non-fatal */
560 "REPLAY_NUM Rollover", /* bit 13, non-fatal */ 560 "REPLAY_NUM Rollover", /* bit 13, non-fatal */
561 "", /* reserved */ 561 "", /* reserved */
562 "Replay Timer Timeout", /* bit 15, non-fatal */ 562 "Replay Timer Timeout", /* bit 15, non-fatal */
563 "", /* reserved */ 563 "", /* reserved */
564 "", /* reserved */ 564 "", /* reserved */
565 "", /* reserved */ 565 "", /* reserved */
566 "Data Link Protocol Error", /* bit 19, fatal */ 566 "Data Link Protocol Error", /* bit 19, fatal */
567 "", /* reserved */ 567 "", /* reserved */
568 "Poisoned TLP", /* bit 21, non-fatal */ 568 "Poisoned TLP", /* bit 21, non-fatal */
569 "", /* reserved */ 569 "", /* reserved */
570 "Completion Timeout", /* bit 23, non-fatal */ 570 "Completion Timeout", /* bit 23, non-fatal */
571 "Completer Abort", /* bit 24, non-fatal */ 571 "Completer Abort", /* bit 24, non-fatal */
572 "Unexpected Completion", /* bit 25, non-fatal */ 572 "Unexpected Completion", /* bit 25, non-fatal */
573 "Receiver Overflow", /* bit 26, fatal */ 573 "Receiver Overflow", /* bit 26, fatal */
574 "Malformed TLP", /* bit 27, fatal */ 574 "Malformed TLP", /* bit 27, fatal */
575 "", /* reserved */ 575 "", /* reserved */
576 "Unsupported Request" /* bit 29, non-fatal */ 576 "Unsupported Request" /* bit 29, non-fatal */
577 }; 577 };
578 578
579 static void do_nsi_error(int fatal, u32 errors) 579 static void do_nsi_error(int fatal, u32 errors)
580 { 580 {
581 int i; 581 int i;
582 582
583 for (i = 0; i < 30; i++) { 583 for (i = 0; i < 30; i++) {
584 if (errors & (1 << i)) 584 if (errors & (1 << i))
585 printk(KERN_WARNING "%sError %s\n", 585 printk(KERN_WARNING "%sError %s\n",
586 fatal_message[fatal], nsi_message[i]); 586 fatal_message[fatal], nsi_message[i]);
587 } 587 }
588 } 588 }
589 589
590 static inline void nsi_error(int fatal, u32 errors, int *error_found, 590 static inline void nsi_error(int fatal, u32 errors, int *error_found,
591 int handle_error) 591 int handle_error)
592 { 592 {
593 *error_found = 1; 593 *error_found = 1;
594 594
595 if (handle_error) 595 if (handle_error)
596 do_nsi_error(fatal, errors); 596 do_nsi_error(fatal, errors);
597 } 597 }
598 598
599 static char *membuf_message[4] = { 599 static char *membuf_message[4] = {
600 "Internal PMWB to DRAM parity", 600 "Internal PMWB to DRAM parity",
601 "Internal PMWB to System Bus Parity", 601 "Internal PMWB to System Bus Parity",
602 "Internal System Bus or IO to PMWB Parity", 602 "Internal System Bus or IO to PMWB Parity",
603 "Internal DRAM to PMWB Parity" 603 "Internal DRAM to PMWB Parity"
604 }; 604 };
605 605
606 static void do_membuf_error(u8 errors) 606 static void do_membuf_error(u8 errors)
607 { 607 {
608 int i; 608 int i;
609 609
610 for (i = 0; i < 4; i++) { 610 for (i = 0; i < 4; i++) {
611 if (errors & (1 << i)) 611 if (errors & (1 << i))
612 e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n", 612 e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n",
613 membuf_message[i]); 613 membuf_message[i]);
614 } 614 }
615 } 615 }
616 616
617 static inline void membuf_error(u8 errors, int *error_found, int handle_error) 617 static inline void membuf_error(u8 errors, int *error_found, int handle_error)
618 { 618 {
619 *error_found = 1; 619 *error_found = 1;
620 620
621 if (handle_error) 621 if (handle_error)
622 do_membuf_error(errors); 622 do_membuf_error(errors);
623 } 623 }
624 624
625 static char *sysbus_message[10] = { 625 static char *sysbus_message[10] = {
626 "Addr or Request Parity", 626 "Addr or Request Parity",
627 "Data Strobe Glitch", 627 "Data Strobe Glitch",
628 "Addr Strobe Glitch", 628 "Addr Strobe Glitch",
629 "Data Parity", 629 "Data Parity",
630 "Addr Above TOM", 630 "Addr Above TOM",
631 "Non DRAM Lock Error", 631 "Non DRAM Lock Error",
632 "MCERR", "BINIT", 632 "MCERR", "BINIT",
633 "Memory Parity", 633 "Memory Parity",
634 "IO Subsystem Parity" 634 "IO Subsystem Parity"
635 }; 635 };
636 636
637 static void do_sysbus_error(int fatal, u32 errors) 637 static void do_sysbus_error(int fatal, u32 errors)
638 { 638 {
639 int i; 639 int i;
640 640
641 for (i = 0; i < 10; i++) { 641 for (i = 0; i < 10; i++) {
642 if (errors & (1 << i)) 642 if (errors & (1 << i))
643 e752x_printk(KERN_WARNING, "%sError System Bus %s\n", 643 e752x_printk(KERN_WARNING, "%sError System Bus %s\n",
644 fatal_message[fatal], sysbus_message[i]); 644 fatal_message[fatal], sysbus_message[i]);
645 } 645 }
646 } 646 }
647 647
648 static inline void sysbus_error(int fatal, u32 errors, int *error_found, 648 static inline void sysbus_error(int fatal, u32 errors, int *error_found,
649 int handle_error) 649 int handle_error)
650 { 650 {
651 *error_found = 1; 651 *error_found = 1;
652 652
653 if (handle_error) 653 if (handle_error)
654 do_sysbus_error(fatal, errors); 654 do_sysbus_error(fatal, errors);
655 } 655 }
656 656
657 static void e752x_check_hub_interface(struct e752x_error_info *info, 657 static void e752x_check_hub_interface(struct e752x_error_info *info,
658 int *error_found, int handle_error) 658 int *error_found, int handle_error)
659 { 659 {
660 u8 stat8; 660 u8 stat8;
661 661
662 //pci_read_config_byte(dev,E752X_HI_FERR,&stat8); 662 //pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
663 663
664 stat8 = info->hi_ferr; 664 stat8 = info->hi_ferr;
665 665
666 if (stat8 & 0x7f) { /* Error, so process */ 666 if (stat8 & 0x7f) { /* Error, so process */
667 stat8 &= 0x7f; 667 stat8 &= 0x7f;
668 668
669 if (stat8 & 0x2b) 669 if (stat8 & 0x2b)
670 hub_error(1, stat8 & 0x2b, error_found, handle_error); 670 hub_error(1, stat8 & 0x2b, error_found, handle_error);
671 671
672 if (stat8 & 0x54) 672 if (stat8 & 0x54)
673 hub_error(0, stat8 & 0x54, error_found, handle_error); 673 hub_error(0, stat8 & 0x54, error_found, handle_error);
674 } 674 }
675 //pci_read_config_byte(dev,E752X_HI_NERR,&stat8); 675 //pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
676 676
677 stat8 = info->hi_nerr; 677 stat8 = info->hi_nerr;
678 678
679 if (stat8 & 0x7f) { /* Error, so process */ 679 if (stat8 & 0x7f) { /* Error, so process */
680 stat8 &= 0x7f; 680 stat8 &= 0x7f;
681 681
682 if (stat8 & 0x2b) 682 if (stat8 & 0x2b)
683 hub_error(1, stat8 & 0x2b, error_found, handle_error); 683 hub_error(1, stat8 & 0x2b, error_found, handle_error);
684 684
685 if (stat8 & 0x54) 685 if (stat8 & 0x54)
686 hub_error(0, stat8 & 0x54, error_found, handle_error); 686 hub_error(0, stat8 & 0x54, error_found, handle_error);
687 } 687 }
688 } 688 }
689 689
690 static void e752x_check_ns_interface(struct e752x_error_info *info, 690 static void e752x_check_ns_interface(struct e752x_error_info *info,
691 int *error_found, int handle_error) 691 int *error_found, int handle_error)
692 { 692 {
693 u32 stat32; 693 u32 stat32;
694 694
695 stat32 = info->nsi_ferr; 695 stat32 = info->nsi_ferr;
696 if (stat32 & NSI_ERR_MASK) { /* Error, so process */ 696 if (stat32 & NSI_ERR_MASK) { /* Error, so process */
697 if (stat32 & NSI_FATAL_MASK) /* check for fatal errors */ 697 if (stat32 & NSI_FATAL_MASK) /* check for fatal errors */
698 nsi_error(1, stat32 & NSI_FATAL_MASK, error_found, 698 nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
699 handle_error); 699 handle_error);
700 if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */ 700 if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */
701 nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found, 701 nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
702 handle_error); 702 handle_error);
703 } 703 }
704 stat32 = info->nsi_nerr; 704 stat32 = info->nsi_nerr;
705 if (stat32 & NSI_ERR_MASK) { 705 if (stat32 & NSI_ERR_MASK) {
706 if (stat32 & NSI_FATAL_MASK) 706 if (stat32 & NSI_FATAL_MASK)
707 nsi_error(1, stat32 & NSI_FATAL_MASK, error_found, 707 nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
708 handle_error); 708 handle_error);
709 if (stat32 & NSI_NON_FATAL_MASK) 709 if (stat32 & NSI_NON_FATAL_MASK)
710 nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found, 710 nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
711 handle_error); 711 handle_error);
712 } 712 }
713 } 713 }
714 714
715 static void e752x_check_sysbus(struct e752x_error_info *info, 715 static void e752x_check_sysbus(struct e752x_error_info *info,
716 int *error_found, int handle_error) 716 int *error_found, int handle_error)
717 { 717 {
718 u32 stat32, error32; 718 u32 stat32, error32;
719 719
720 //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32); 720 //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
721 stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16); 721 stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
722 722
723 if (stat32 == 0) 723 if (stat32 == 0)
724 return; /* no errors */ 724 return; /* no errors */
725 725
726 error32 = (stat32 >> 16) & 0x3ff; 726 error32 = (stat32 >> 16) & 0x3ff;
727 stat32 = stat32 & 0x3ff; 727 stat32 = stat32 & 0x3ff;
728 728
729 if (stat32 & 0x087) 729 if (stat32 & 0x087)
730 sysbus_error(1, stat32 & 0x087, error_found, handle_error); 730 sysbus_error(1, stat32 & 0x087, error_found, handle_error);
731 731
732 if (stat32 & 0x378) 732 if (stat32 & 0x378)
733 sysbus_error(0, stat32 & 0x378, error_found, handle_error); 733 sysbus_error(0, stat32 & 0x378, error_found, handle_error);
734 734
735 if (error32 & 0x087) 735 if (error32 & 0x087)
736 sysbus_error(1, error32 & 0x087, error_found, handle_error); 736 sysbus_error(1, error32 & 0x087, error_found, handle_error);
737 737
738 if (error32 & 0x378) 738 if (error32 & 0x378)
739 sysbus_error(0, error32 & 0x378, error_found, handle_error); 739 sysbus_error(0, error32 & 0x378, error_found, handle_error);
740 } 740 }
741 741
742 static void e752x_check_membuf(struct e752x_error_info *info, 742 static void e752x_check_membuf(struct e752x_error_info *info,
743 int *error_found, int handle_error) 743 int *error_found, int handle_error)
744 { 744 {
745 u8 stat8; 745 u8 stat8;
746 746
747 stat8 = info->buf_ferr; 747 stat8 = info->buf_ferr;
748 748
749 if (stat8 & 0x0f) { /* Error, so process */ 749 if (stat8 & 0x0f) { /* Error, so process */
750 stat8 &= 0x0f; 750 stat8 &= 0x0f;
751 membuf_error(stat8, error_found, handle_error); 751 membuf_error(stat8, error_found, handle_error);
752 } 752 }
753 753
754 stat8 = info->buf_nerr; 754 stat8 = info->buf_nerr;
755 755
756 if (stat8 & 0x0f) { /* Error, so process */ 756 if (stat8 & 0x0f) { /* Error, so process */
757 stat8 &= 0x0f; 757 stat8 &= 0x0f;
758 membuf_error(stat8, error_found, handle_error); 758 membuf_error(stat8, error_found, handle_error);
759 } 759 }
760 } 760 }
761 761
762 static void e752x_check_dram(struct mem_ctl_info *mci, 762 static void e752x_check_dram(struct mem_ctl_info *mci,
763 struct e752x_error_info *info, int *error_found, 763 struct e752x_error_info *info, int *error_found,
764 int handle_error) 764 int handle_error)
765 { 765 {
766 u16 error_one, error_next; 766 u16 error_one, error_next;
767 767
768 error_one = info->dram_ferr; 768 error_one = info->dram_ferr;
769 error_next = info->dram_nerr; 769 error_next = info->dram_nerr;
770 770
771 /* decode and report errors */ 771 /* decode and report errors */
772 if (error_one & 0x0101) /* check first error correctable */ 772 if (error_one & 0x0101) /* check first error correctable */
773 process_ce(mci, error_one, info->dram_sec1_add, 773 process_ce(mci, error_one, info->dram_sec1_add,
774 info->dram_sec1_syndrome, error_found, handle_error); 774 info->dram_sec1_syndrome, error_found, handle_error);
775 775
776 if (error_next & 0x0101) /* check next error correctable */ 776 if (error_next & 0x0101) /* check next error correctable */
777 process_ce(mci, error_next, info->dram_sec2_add, 777 process_ce(mci, error_next, info->dram_sec2_add,
778 info->dram_sec2_syndrome, error_found, handle_error); 778 info->dram_sec2_syndrome, error_found, handle_error);
779 779
780 if (error_one & 0x4040) 780 if (error_one & 0x4040)
781 process_ue_no_info_wr(mci, error_found, handle_error); 781 process_ue_no_info_wr(mci, error_found, handle_error);
782 782
783 if (error_next & 0x4040) 783 if (error_next & 0x4040)
784 process_ue_no_info_wr(mci, error_found, handle_error); 784 process_ue_no_info_wr(mci, error_found, handle_error);
785 785
786 if (error_one & 0x2020) 786 if (error_one & 0x2020)
787 process_ded_retry(mci, error_one, info->dram_retr_add, 787 process_ded_retry(mci, error_one, info->dram_retr_add,
788 error_found, handle_error); 788 error_found, handle_error);
789 789
790 if (error_next & 0x2020) 790 if (error_next & 0x2020)
791 process_ded_retry(mci, error_next, info->dram_retr_add, 791 process_ded_retry(mci, error_next, info->dram_retr_add,
792 error_found, handle_error); 792 error_found, handle_error);
793 793
794 if (error_one & 0x0808) 794 if (error_one & 0x0808)
795 process_threshold_ce(mci, error_one, error_found, handle_error); 795 process_threshold_ce(mci, error_one, error_found, handle_error);
796 796
797 if (error_next & 0x0808) 797 if (error_next & 0x0808)
798 process_threshold_ce(mci, error_next, error_found, 798 process_threshold_ce(mci, error_next, error_found,
799 handle_error); 799 handle_error);
800 800
801 if (error_one & 0x0606) 801 if (error_one & 0x0606)
802 process_ue(mci, error_one, info->dram_ded_add, 802 process_ue(mci, error_one, info->dram_ded_add,
803 info->dram_scrb_add, error_found, handle_error); 803 info->dram_scrb_add, error_found, handle_error);
804 804
805 if (error_next & 0x0606) 805 if (error_next & 0x0606)
806 process_ue(mci, error_next, info->dram_ded_add, 806 process_ue(mci, error_next, info->dram_ded_add,
807 info->dram_scrb_add, error_found, handle_error); 807 info->dram_scrb_add, error_found, handle_error);
808 } 808 }
809 809
810 static void e752x_get_error_info(struct mem_ctl_info *mci, 810 static void e752x_get_error_info(struct mem_ctl_info *mci,
811 struct e752x_error_info *info) 811 struct e752x_error_info *info)
812 { 812 {
813 struct pci_dev *dev; 813 struct pci_dev *dev;
814 struct e752x_pvt *pvt; 814 struct e752x_pvt *pvt;
815 815
816 memset(info, 0, sizeof(*info)); 816 memset(info, 0, sizeof(*info));
817 pvt = (struct e752x_pvt *)mci->pvt_info; 817 pvt = (struct e752x_pvt *)mci->pvt_info;
818 dev = pvt->dev_d0f1; 818 dev = pvt->dev_d0f1;
819 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global); 819 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
820 820
821 if (info->ferr_global) { 821 if (info->ferr_global) {
822 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) { 822 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
823 pci_read_config_dword(dev, I3100_NSI_FERR, 823 pci_read_config_dword(dev, I3100_NSI_FERR,
824 &info->nsi_ferr); 824 &info->nsi_ferr);
825 info->hi_ferr = 0; 825 info->hi_ferr = 0;
826 } else { 826 } else {
827 pci_read_config_byte(dev, E752X_HI_FERR, 827 pci_read_config_byte(dev, E752X_HI_FERR,
828 &info->hi_ferr); 828 &info->hi_ferr);
829 info->nsi_ferr = 0; 829 info->nsi_ferr = 0;
830 } 830 }
831 pci_read_config_word(dev, E752X_SYSBUS_FERR, 831 pci_read_config_word(dev, E752X_SYSBUS_FERR,
832 &info->sysbus_ferr); 832 &info->sysbus_ferr);
833 pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr); 833 pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
834 pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr); 834 pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr);
835 pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD, 835 pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
836 &info->dram_sec1_add); 836 &info->dram_sec1_add);
837 pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME, 837 pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
838 &info->dram_sec1_syndrome); 838 &info->dram_sec1_syndrome);
839 pci_read_config_dword(dev, E752X_DRAM_DED_ADD, 839 pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
840 &info->dram_ded_add); 840 &info->dram_ded_add);
841 pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD, 841 pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
842 &info->dram_scrb_add); 842 &info->dram_scrb_add);
843 pci_read_config_dword(dev, E752X_DRAM_RETR_ADD, 843 pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
844 &info->dram_retr_add); 844 &info->dram_retr_add);
845 845
846 /* ignore the reserved bits just in case */ 846 /* ignore the reserved bits just in case */
847 if (info->hi_ferr & 0x7f) 847 if (info->hi_ferr & 0x7f)
848 pci_write_config_byte(dev, E752X_HI_FERR, 848 pci_write_config_byte(dev, E752X_HI_FERR,
849 info->hi_ferr); 849 info->hi_ferr);
850 850
851 if (info->nsi_ferr & NSI_ERR_MASK) 851 if (info->nsi_ferr & NSI_ERR_MASK)
852 pci_write_config_dword(dev, I3100_NSI_FERR, 852 pci_write_config_dword(dev, I3100_NSI_FERR,
853 info->nsi_ferr); 853 info->nsi_ferr);
854 854
855 if (info->sysbus_ferr) 855 if (info->sysbus_ferr)
856 pci_write_config_word(dev, E752X_SYSBUS_FERR, 856 pci_write_config_word(dev, E752X_SYSBUS_FERR,
857 info->sysbus_ferr); 857 info->sysbus_ferr);
858 858
859 if (info->buf_ferr & 0x0f) 859 if (info->buf_ferr & 0x0f)
860 pci_write_config_byte(dev, E752X_BUF_FERR, 860 pci_write_config_byte(dev, E752X_BUF_FERR,
861 info->buf_ferr); 861 info->buf_ferr);
862 862
863 if (info->dram_ferr) 863 if (info->dram_ferr)
864 pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR, 864 pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
865 info->dram_ferr, info->dram_ferr); 865 info->dram_ferr, info->dram_ferr);
866 866
867 pci_write_config_dword(dev, E752X_FERR_GLOBAL, 867 pci_write_config_dword(dev, E752X_FERR_GLOBAL,
868 info->ferr_global); 868 info->ferr_global);
869 } 869 }
870 870
871 pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global); 871 pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
872 872
873 if (info->nerr_global) { 873 if (info->nerr_global) {
874 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) { 874 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
875 pci_read_config_dword(dev, I3100_NSI_NERR, 875 pci_read_config_dword(dev, I3100_NSI_NERR,
876 &info->nsi_nerr); 876 &info->nsi_nerr);
877 info->hi_nerr = 0; 877 info->hi_nerr = 0;
878 } else { 878 } else {
879 pci_read_config_byte(dev, E752X_HI_NERR, 879 pci_read_config_byte(dev, E752X_HI_NERR,
880 &info->hi_nerr); 880 &info->hi_nerr);
881 info->nsi_nerr = 0; 881 info->nsi_nerr = 0;
882 } 882 }
883 pci_read_config_word(dev, E752X_SYSBUS_NERR, 883 pci_read_config_word(dev, E752X_SYSBUS_NERR,
884 &info->sysbus_nerr); 884 &info->sysbus_nerr);
885 pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr); 885 pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
886 pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr); 886 pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr);
887 pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD, 887 pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
888 &info->dram_sec2_add); 888 &info->dram_sec2_add);
889 pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME, 889 pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
890 &info->dram_sec2_syndrome); 890 &info->dram_sec2_syndrome);
891 891
892 if (info->hi_nerr & 0x7f) 892 if (info->hi_nerr & 0x7f)
893 pci_write_config_byte(dev, E752X_HI_NERR, 893 pci_write_config_byte(dev, E752X_HI_NERR,
894 info->hi_nerr); 894 info->hi_nerr);
895 895
896 if (info->nsi_nerr & NSI_ERR_MASK) 896 if (info->nsi_nerr & NSI_ERR_MASK)
897 pci_write_config_dword(dev, I3100_NSI_NERR, 897 pci_write_config_dword(dev, I3100_NSI_NERR,
898 info->nsi_nerr); 898 info->nsi_nerr);
899 899
900 if (info->sysbus_nerr) 900 if (info->sysbus_nerr)
901 pci_write_config_word(dev, E752X_SYSBUS_NERR, 901 pci_write_config_word(dev, E752X_SYSBUS_NERR,
902 info->sysbus_nerr); 902 info->sysbus_nerr);
903 903
904 if (info->buf_nerr & 0x0f) 904 if (info->buf_nerr & 0x0f)
905 pci_write_config_byte(dev, E752X_BUF_NERR, 905 pci_write_config_byte(dev, E752X_BUF_NERR,
906 info->buf_nerr); 906 info->buf_nerr);
907 907
908 if (info->dram_nerr) 908 if (info->dram_nerr)
909 pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR, 909 pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
910 info->dram_nerr, info->dram_nerr); 910 info->dram_nerr, info->dram_nerr);
911 911
912 pci_write_config_dword(dev, E752X_NERR_GLOBAL, 912 pci_write_config_dword(dev, E752X_NERR_GLOBAL,
913 info->nerr_global); 913 info->nerr_global);
914 } 914 }
915 } 915 }
916 916
917 static int e752x_process_error_info(struct mem_ctl_info *mci, 917 static int e752x_process_error_info(struct mem_ctl_info *mci,
918 struct e752x_error_info *info, 918 struct e752x_error_info *info,
919 int handle_errors) 919 int handle_errors)
920 { 920 {
921 u32 error32, stat32; 921 u32 error32, stat32;
922 int error_found; 922 int error_found;
923 923
924 error_found = 0; 924 error_found = 0;
925 error32 = (info->ferr_global >> 18) & 0x3ff; 925 error32 = (info->ferr_global >> 18) & 0x3ff;
926 stat32 = (info->ferr_global >> 4) & 0x7ff; 926 stat32 = (info->ferr_global >> 4) & 0x7ff;
927 927
928 if (error32) 928 if (error32)
929 global_error(1, error32, &error_found, handle_errors); 929 global_error(1, error32, &error_found, handle_errors);
930 930
931 if (stat32) 931 if (stat32)
932 global_error(0, stat32, &error_found, handle_errors); 932 global_error(0, stat32, &error_found, handle_errors);
933 933
934 error32 = (info->nerr_global >> 18) & 0x3ff; 934 error32 = (info->nerr_global >> 18) & 0x3ff;
935 stat32 = (info->nerr_global >> 4) & 0x7ff; 935 stat32 = (info->nerr_global >> 4) & 0x7ff;
936 936
937 if (error32) 937 if (error32)
938 global_error(1, error32, &error_found, handle_errors); 938 global_error(1, error32, &error_found, handle_errors);
939 939
940 if (stat32) 940 if (stat32)
941 global_error(0, stat32, &error_found, handle_errors); 941 global_error(0, stat32, &error_found, handle_errors);
942 942
943 e752x_check_hub_interface(info, &error_found, handle_errors); 943 e752x_check_hub_interface(info, &error_found, handle_errors);
944 e752x_check_ns_interface(info, &error_found, handle_errors); 944 e752x_check_ns_interface(info, &error_found, handle_errors);
945 e752x_check_sysbus(info, &error_found, handle_errors); 945 e752x_check_sysbus(info, &error_found, handle_errors);
946 e752x_check_membuf(info, &error_found, handle_errors); 946 e752x_check_membuf(info, &error_found, handle_errors);
947 e752x_check_dram(mci, info, &error_found, handle_errors); 947 e752x_check_dram(mci, info, &error_found, handle_errors);
948 return error_found; 948 return error_found;
949 } 949 }
950 950
951 static void e752x_check(struct mem_ctl_info *mci) 951 static void e752x_check(struct mem_ctl_info *mci)
952 { 952 {
953 struct e752x_error_info info; 953 struct e752x_error_info info;
954 954
955 debugf3("%s()\n", __func__); 955 debugf3("%s()\n", __func__);
956 e752x_get_error_info(mci, &info); 956 e752x_get_error_info(mci, &info);
957 e752x_process_error_info(mci, &info, 1); 957 e752x_process_error_info(mci, &info, 1);
958 } 958 }
959 959
960 /* Program byte/sec bandwidth scrub rate to hardware */ 960 /* Program byte/sec bandwidth scrub rate to hardware */
961 static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw) 961 static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
962 { 962 {
963 const struct scrubrate *scrubrates; 963 const struct scrubrate *scrubrates;
964 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; 964 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
965 struct pci_dev *pdev = pvt->dev_d0f0; 965 struct pci_dev *pdev = pvt->dev_d0f0;
966 int i; 966 int i;
967 967
968 if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0) 968 if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
969 scrubrates = scrubrates_i3100; 969 scrubrates = scrubrates_i3100;
970 else 970 else
971 scrubrates = scrubrates_e752x; 971 scrubrates = scrubrates_e752x;
972 972
973 /* Translate the desired scrub rate to a e752x/3100 register value. 973 /* Translate the desired scrub rate to a e752x/3100 register value.
974 * Search for the bandwidth that is equal or greater than the 974 * Search for the bandwidth that is equal or greater than the
975 * desired rate and program the cooresponding register value. 975 * desired rate and program the cooresponding register value.
976 */ 976 */
977 for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++) 977 for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
978 if (scrubrates[i].bandwidth >= new_bw) 978 if (scrubrates[i].bandwidth >= new_bw)
979 break; 979 break;
980 980
981 if (scrubrates[i].bandwidth == SDRATE_EOT) 981 if (scrubrates[i].bandwidth == SDRATE_EOT)
982 return -1; 982 return -1;
983 983
984 pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval); 984 pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval);
985 985
986 return 0; 986 return scrubrates[i].bandwidth;
987 } 987 }
988 988
989 /* Convert current scrub rate value into byte/sec bandwidth */ 989 /* Convert current scrub rate value into byte/sec bandwidth */
990 static int get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw) 990 static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
991 { 991 {
992 const struct scrubrate *scrubrates; 992 const struct scrubrate *scrubrates;
993 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; 993 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
994 struct pci_dev *pdev = pvt->dev_d0f0; 994 struct pci_dev *pdev = pvt->dev_d0f0;
995 u16 scrubval; 995 u16 scrubval;
996 int i; 996 int i;
997 997
998 if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0) 998 if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
999 scrubrates = scrubrates_i3100; 999 scrubrates = scrubrates_i3100;
1000 else 1000 else
1001 scrubrates = scrubrates_e752x; 1001 scrubrates = scrubrates_e752x;
1002 1002
1003 /* Find the bandwidth matching the memory scrubber configuration */ 1003 /* Find the bandwidth matching the memory scrubber configuration */
1004 pci_read_config_word(pdev, E752X_MCHSCRB, &scrubval); 1004 pci_read_config_word(pdev, E752X_MCHSCRB, &scrubval);
1005 scrubval = scrubval & 0x0f; 1005 scrubval = scrubval & 0x0f;
1006 1006
1007 for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++) 1007 for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
1008 if (scrubrates[i].scrubval == scrubval) 1008 if (scrubrates[i].scrubval == scrubval)
1009 break; 1009 break;
1010 1010
1011 if (scrubrates[i].bandwidth == SDRATE_EOT) { 1011 if (scrubrates[i].bandwidth == SDRATE_EOT) {
1012 e752x_printk(KERN_WARNING, 1012 e752x_printk(KERN_WARNING,
1013 "Invalid sdram scrub control value: 0x%x\n", scrubval); 1013 "Invalid sdram scrub control value: 0x%x\n", scrubval);
1014 return -1; 1014 return -1;
1015 } 1015 }
1016 return scrubrates[i].bandwidth;
1016 1017
1017 *bw = scrubrates[i].bandwidth;
1018
1019 return 0;
1020 } 1018 }
1021 1019
1022 /* Return 1 if dual channel mode is active. Else return 0. */ 1020 /* Return 1 if dual channel mode is active. Else return 0. */
1023 static inline int dual_channel_active(u16 ddrcsr) 1021 static inline int dual_channel_active(u16 ddrcsr)
1024 { 1022 {
1025 return (((ddrcsr >> 12) & 3) == 3); 1023 return (((ddrcsr >> 12) & 3) == 3);
1026 } 1024 }
1027 1025
1028 /* Remap csrow index numbers if map_type is "reverse" 1026 /* Remap csrow index numbers if map_type is "reverse"
1029 */ 1027 */
1030 static inline int remap_csrow_index(struct mem_ctl_info *mci, int index) 1028 static inline int remap_csrow_index(struct mem_ctl_info *mci, int index)
1031 { 1029 {
1032 struct e752x_pvt *pvt = mci->pvt_info; 1030 struct e752x_pvt *pvt = mci->pvt_info;
1033 1031
1034 if (!pvt->map_type) 1032 if (!pvt->map_type)
1035 return (7 - index); 1033 return (7 - index);
1036 1034
1037 return (index); 1035 return (index);
1038 } 1036 }
1039 1037
1040 static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, 1038 static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
1041 u16 ddrcsr) 1039 u16 ddrcsr)
1042 { 1040 {
1043 struct csrow_info *csrow; 1041 struct csrow_info *csrow;
1044 unsigned long last_cumul_size; 1042 unsigned long last_cumul_size;
1045 int index, mem_dev, drc_chan; 1043 int index, mem_dev, drc_chan;
1046 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ 1044 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
1047 int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */ 1045 int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
1048 u8 value; 1046 u8 value;
1049 u32 dra, drc, cumul_size; 1047 u32 dra, drc, cumul_size;
1050 1048
1051 dra = 0; 1049 dra = 0;
1052 for (index = 0; index < 4; index++) { 1050 for (index = 0; index < 4; index++) {
1053 u8 dra_reg; 1051 u8 dra_reg;
1054 pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg); 1052 pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg);
1055 dra |= dra_reg << (index * 8); 1053 dra |= dra_reg << (index * 8);
1056 } 1054 }
1057 pci_read_config_dword(pdev, E752X_DRC, &drc); 1055 pci_read_config_dword(pdev, E752X_DRC, &drc);
1058 drc_chan = dual_channel_active(ddrcsr); 1056 drc_chan = dual_channel_active(ddrcsr);
1059 drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */ 1057 drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
1060 drc_ddim = (drc >> 20) & 0x3; 1058 drc_ddim = (drc >> 20) & 0x3;
1061 1059
1062 /* The dram row boundary (DRB) reg values are boundary address for 1060 /* The dram row boundary (DRB) reg values are boundary address for
1063 * each DRAM row with a granularity of 64 or 128MB (single/dual 1061 * each DRAM row with a granularity of 64 or 128MB (single/dual
1064 * channel operation). DRB regs are cumulative; therefore DRB7 will 1062 * channel operation). DRB regs are cumulative; therefore DRB7 will
1065 * contain the total memory contained in all eight rows. 1063 * contain the total memory contained in all eight rows.
1066 */ 1064 */
1067 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { 1065 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
1068 /* mem_dev 0=x8, 1=x4 */ 1066 /* mem_dev 0=x8, 1=x4 */
1069 mem_dev = (dra >> (index * 4 + 2)) & 0x3; 1067 mem_dev = (dra >> (index * 4 + 2)) & 0x3;
1070 csrow = &mci->csrows[remap_csrow_index(mci, index)]; 1068 csrow = &mci->csrows[remap_csrow_index(mci, index)];
1071 1069
1072 mem_dev = (mem_dev == 2); 1070 mem_dev = (mem_dev == 2);
1073 pci_read_config_byte(pdev, E752X_DRB + index, &value); 1071 pci_read_config_byte(pdev, E752X_DRB + index, &value);
1074 /* convert a 128 or 64 MiB DRB to a page size. */ 1072 /* convert a 128 or 64 MiB DRB to a page size. */
1075 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); 1073 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
1076 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 1074 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
1077 cumul_size); 1075 cumul_size);
1078 if (cumul_size == last_cumul_size) 1076 if (cumul_size == last_cumul_size)
1079 continue; /* not populated */ 1077 continue; /* not populated */
1080 1078
1081 csrow->first_page = last_cumul_size; 1079 csrow->first_page = last_cumul_size;
1082 csrow->last_page = cumul_size - 1; 1080 csrow->last_page = cumul_size - 1;
1083 csrow->nr_pages = cumul_size - last_cumul_size; 1081 csrow->nr_pages = cumul_size - last_cumul_size;
1084 last_cumul_size = cumul_size; 1082 last_cumul_size = cumul_size;
1085 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ 1083 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
1086 csrow->mtype = MEM_RDDR; /* only one type supported */ 1084 csrow->mtype = MEM_RDDR; /* only one type supported */
1087 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; 1085 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
1088 1086
1089 /* 1087 /*
1090 * if single channel or x8 devices then SECDED 1088 * if single channel or x8 devices then SECDED
1091 * if dual channel and x4 then S4ECD4ED 1089 * if dual channel and x4 then S4ECD4ED
1092 */ 1090 */
1093 if (drc_ddim) { 1091 if (drc_ddim) {
1094 if (drc_chan && mem_dev) { 1092 if (drc_chan && mem_dev) {
1095 csrow->edac_mode = EDAC_S4ECD4ED; 1093 csrow->edac_mode = EDAC_S4ECD4ED;
1096 mci->edac_cap |= EDAC_FLAG_S4ECD4ED; 1094 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
1097 } else { 1095 } else {
1098 csrow->edac_mode = EDAC_SECDED; 1096 csrow->edac_mode = EDAC_SECDED;
1099 mci->edac_cap |= EDAC_FLAG_SECDED; 1097 mci->edac_cap |= EDAC_FLAG_SECDED;
1100 } 1098 }
1101 } else 1099 } else
1102 csrow->edac_mode = EDAC_NONE; 1100 csrow->edac_mode = EDAC_NONE;
1103 } 1101 }
1104 } 1102 }
1105 1103
1106 static void e752x_init_mem_map_table(struct pci_dev *pdev, 1104 static void e752x_init_mem_map_table(struct pci_dev *pdev,
1107 struct e752x_pvt *pvt) 1105 struct e752x_pvt *pvt)
1108 { 1106 {
1109 int index; 1107 int index;
1110 u8 value, last, row; 1108 u8 value, last, row;
1111 1109
1112 last = 0; 1110 last = 0;
1113 row = 0; 1111 row = 0;
1114 1112
1115 for (index = 0; index < 8; index += 2) { 1113 for (index = 0; index < 8; index += 2) {
1116 pci_read_config_byte(pdev, E752X_DRB + index, &value); 1114 pci_read_config_byte(pdev, E752X_DRB + index, &value);
1117 /* test if there is a dimm in this slot */ 1115 /* test if there is a dimm in this slot */
1118 if (value == last) { 1116 if (value == last) {
1119 /* no dimm in the slot, so flag it as empty */ 1117 /* no dimm in the slot, so flag it as empty */
1120 pvt->map[index] = 0xff; 1118 pvt->map[index] = 0xff;
1121 pvt->map[index + 1] = 0xff; 1119 pvt->map[index + 1] = 0xff;
1122 } else { /* there is a dimm in the slot */ 1120 } else { /* there is a dimm in the slot */
1123 pvt->map[index] = row; 1121 pvt->map[index] = row;
1124 row++; 1122 row++;
1125 last = value; 1123 last = value;
1126 /* test the next value to see if the dimm is double 1124 /* test the next value to see if the dimm is double
1127 * sided 1125 * sided
1128 */ 1126 */
1129 pci_read_config_byte(pdev, E752X_DRB + index + 1, 1127 pci_read_config_byte(pdev, E752X_DRB + index + 1,
1130 &value); 1128 &value);
1131 1129
1132 /* the dimm is single sided, so flag as empty */ 1130 /* the dimm is single sided, so flag as empty */
1133 /* this is a double sided dimm to save the next row #*/ 1131 /* this is a double sided dimm to save the next row #*/
1134 pvt->map[index + 1] = (value == last) ? 0xff : row; 1132 pvt->map[index + 1] = (value == last) ? 0xff : row;
1135 row++; 1133 row++;
1136 last = value; 1134 last = value;
1137 } 1135 }
1138 } 1136 }
1139 } 1137 }
1140 1138
1141 /* Return 0 on success or 1 on failure. */ 1139 /* Return 0 on success or 1 on failure. */
1142 static int e752x_get_devs(struct pci_dev *pdev, int dev_idx, 1140 static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
1143 struct e752x_pvt *pvt) 1141 struct e752x_pvt *pvt)
1144 { 1142 {
1145 struct pci_dev *dev; 1143 struct pci_dev *dev;
1146 1144
1147 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, 1145 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
1148 pvt->dev_info->err_dev, pvt->bridge_ck); 1146 pvt->dev_info->err_dev, pvt->bridge_ck);
1149 1147
1150 if (pvt->bridge_ck == NULL) 1148 if (pvt->bridge_ck == NULL)
1151 pvt->bridge_ck = pci_scan_single_device(pdev->bus, 1149 pvt->bridge_ck = pci_scan_single_device(pdev->bus,
1152 PCI_DEVFN(0, 1)); 1150 PCI_DEVFN(0, 1));
1153 1151
1154 if (pvt->bridge_ck == NULL) { 1152 if (pvt->bridge_ck == NULL) {
1155 e752x_printk(KERN_ERR, "error reporting device not found:" 1153 e752x_printk(KERN_ERR, "error reporting device not found:"
1156 "vendor %x device 0x%x (broken BIOS?)\n", 1154 "vendor %x device 0x%x (broken BIOS?)\n",
1157 PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev); 1155 PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
1158 return 1; 1156 return 1;
1159 } 1157 }
1160 1158
1161 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 1159 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
1162 e752x_devs[dev_idx].ctl_dev, 1160 e752x_devs[dev_idx].ctl_dev,
1163 NULL); 1161 NULL);
1164 1162
1165 if (dev == NULL) 1163 if (dev == NULL)
1166 goto fail; 1164 goto fail;
1167 1165
1168 pvt->dev_d0f0 = dev; 1166 pvt->dev_d0f0 = dev;
1169 pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck); 1167 pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
1170 1168
1171 return 0; 1169 return 0;
1172 1170
1173 fail: 1171 fail:
1174 pci_dev_put(pvt->bridge_ck); 1172 pci_dev_put(pvt->bridge_ck);
1175 return 1; 1173 return 1;
1176 } 1174 }
1177 1175
1178 /* Setup system bus parity mask register. 1176 /* Setup system bus parity mask register.
1179 * Sysbus parity supported on: 1177 * Sysbus parity supported on:
1180 * e7320/e7520/e7525 + Xeon 1178 * e7320/e7520/e7525 + Xeon
1181 */ 1179 */
1182 static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt) 1180 static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
1183 { 1181 {
1184 char *cpu_id = cpu_data(0).x86_model_id; 1182 char *cpu_id = cpu_data(0).x86_model_id;
1185 struct pci_dev *dev = pvt->dev_d0f1; 1183 struct pci_dev *dev = pvt->dev_d0f1;
1186 int enable = 1; 1184 int enable = 1;
1187 1185
1188 /* Allow module parameter override, else see if CPU supports parity */ 1186 /* Allow module parameter override, else see if CPU supports parity */
1189 if (sysbus_parity != -1) { 1187 if (sysbus_parity != -1) {
1190 enable = sysbus_parity; 1188 enable = sysbus_parity;
1191 } else if (cpu_id[0] && !strstr(cpu_id, "Xeon")) { 1189 } else if (cpu_id[0] && !strstr(cpu_id, "Xeon")) {
1192 e752x_printk(KERN_INFO, "System Bus Parity not " 1190 e752x_printk(KERN_INFO, "System Bus Parity not "
1193 "supported by CPU, disabling\n"); 1191 "supported by CPU, disabling\n");
1194 enable = 0; 1192 enable = 0;
1195 } 1193 }
1196 1194
1197 if (enable) 1195 if (enable)
1198 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000); 1196 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000);
1199 else 1197 else
1200 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309); 1198 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309);
1201 } 1199 }
1202 1200
1203 static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt) 1201 static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
1204 { 1202 {
1205 struct pci_dev *dev; 1203 struct pci_dev *dev;
1206 1204
1207 dev = pvt->dev_d0f1; 1205 dev = pvt->dev_d0f1;
1208 /* Turn off error disable & SMI in case the BIOS turned it on */ 1206 /* Turn off error disable & SMI in case the BIOS turned it on */
1209 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) { 1207 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
1210 pci_write_config_dword(dev, I3100_NSI_EMASK, 0); 1208 pci_write_config_dword(dev, I3100_NSI_EMASK, 0);
1211 pci_write_config_dword(dev, I3100_NSI_SMICMD, 0); 1209 pci_write_config_dword(dev, I3100_NSI_SMICMD, 0);
1212 } else { 1210 } else {
1213 pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00); 1211 pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
1214 pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00); 1212 pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
1215 } 1213 }
1216 1214
1217 e752x_init_sysbus_parity_mask(pvt); 1215 e752x_init_sysbus_parity_mask(pvt);
1218 1216
1219 pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00); 1217 pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
1220 pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00); 1218 pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
1221 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00); 1219 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
1222 pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00); 1220 pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
1223 pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00); 1221 pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
1224 } 1222 }
1225 1223
1226 static int e752x_probe1(struct pci_dev *pdev, int dev_idx) 1224 static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1227 { 1225 {
1228 u16 pci_data; 1226 u16 pci_data;
1229 u8 stat8; 1227 u8 stat8;
1230 struct mem_ctl_info *mci; 1228 struct mem_ctl_info *mci;
1231 struct e752x_pvt *pvt; 1229 struct e752x_pvt *pvt;
1232 u16 ddrcsr; 1230 u16 ddrcsr;
1233 int drc_chan; /* Number of channels 0=1chan,1=2chan */ 1231 int drc_chan; /* Number of channels 0=1chan,1=2chan */
1234 struct e752x_error_info discard; 1232 struct e752x_error_info discard;
1235 1233
1236 debugf0("%s(): mci\n", __func__); 1234 debugf0("%s(): mci\n", __func__);
1237 debugf0("Starting Probe1\n"); 1235 debugf0("Starting Probe1\n");
1238 1236
1239 /* check to see if device 0 function 1 is enabled; if it isn't, we 1237 /* check to see if device 0 function 1 is enabled; if it isn't, we
1240 * assume the BIOS has reserved it for a reason and is expecting 1238 * assume the BIOS has reserved it for a reason and is expecting
1241 * exclusive access, we take care not to violate that assumption and 1239 * exclusive access, we take care not to violate that assumption and
1242 * fail the probe. */ 1240 * fail the probe. */
1243 pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8); 1241 pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
1244 if (!force_function_unhide && !(stat8 & (1 << 5))) { 1242 if (!force_function_unhide && !(stat8 & (1 << 5))) {
1245 printk(KERN_INFO "Contact your BIOS vendor to see if the " 1243 printk(KERN_INFO "Contact your BIOS vendor to see if the "
1246 "E752x error registers can be safely un-hidden\n"); 1244 "E752x error registers can be safely un-hidden\n");
1247 return -ENODEV; 1245 return -ENODEV;
1248 } 1246 }
1249 stat8 |= (1 << 5); 1247 stat8 |= (1 << 5);
1250 pci_write_config_byte(pdev, E752X_DEVPRES1, stat8); 1248 pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
1251 1249
1252 pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr); 1250 pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
1253 /* FIXME: should check >>12 or 0xf, true for all? */ 1251 /* FIXME: should check >>12 or 0xf, true for all? */
1254 /* Dual channel = 1, Single channel = 0 */ 1252 /* Dual channel = 1, Single channel = 0 */
1255 drc_chan = dual_channel_active(ddrcsr); 1253 drc_chan = dual_channel_active(ddrcsr);
1256 1254
1257 mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0); 1255 mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0);
1258 1256
1259 if (mci == NULL) { 1257 if (mci == NULL) {
1260 return -ENOMEM; 1258 return -ENOMEM;
1261 } 1259 }
1262 1260
1263 debugf3("%s(): init mci\n", __func__); 1261 debugf3("%s(): init mci\n", __func__);
1264 mci->mtype_cap = MEM_FLAG_RDDR; 1262 mci->mtype_cap = MEM_FLAG_RDDR;
1265 /* 3100 IMCH supports SECDEC only */ 1263 /* 3100 IMCH supports SECDEC only */
1266 mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED : 1264 mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
1267 (EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED); 1265 (EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
1268 /* FIXME - what if different memory types are in different csrows? */ 1266 /* FIXME - what if different memory types are in different csrows? */
1269 mci->mod_name = EDAC_MOD_STR; 1267 mci->mod_name = EDAC_MOD_STR;
1270 mci->mod_ver = E752X_REVISION; 1268 mci->mod_ver = E752X_REVISION;
1271 mci->dev = &pdev->dev; 1269 mci->dev = &pdev->dev;
1272 1270
1273 debugf3("%s(): init pvt\n", __func__); 1271 debugf3("%s(): init pvt\n", __func__);
1274 pvt = (struct e752x_pvt *)mci->pvt_info; 1272 pvt = (struct e752x_pvt *)mci->pvt_info;
1275 pvt->dev_info = &e752x_devs[dev_idx]; 1273 pvt->dev_info = &e752x_devs[dev_idx];
1276 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0); 1274 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
1277 1275
1278 if (e752x_get_devs(pdev, dev_idx, pvt)) { 1276 if (e752x_get_devs(pdev, dev_idx, pvt)) {
1279 edac_mc_free(mci); 1277 edac_mc_free(mci);
1280 return -ENODEV; 1278 return -ENODEV;
1281 } 1279 }
1282 1280
1283 debugf3("%s(): more mci init\n", __func__); 1281 debugf3("%s(): more mci init\n", __func__);
1284 mci->ctl_name = pvt->dev_info->ctl_name; 1282 mci->ctl_name = pvt->dev_info->ctl_name;
1285 mci->dev_name = pci_name(pdev); 1283 mci->dev_name = pci_name(pdev);
1286 mci->edac_check = e752x_check; 1284 mci->edac_check = e752x_check;
1287 mci->ctl_page_to_phys = ctl_page_to_phys; 1285 mci->ctl_page_to_phys = ctl_page_to_phys;
1288 mci->set_sdram_scrub_rate = set_sdram_scrub_rate; 1286 mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
1289 mci->get_sdram_scrub_rate = get_sdram_scrub_rate; 1287 mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
1290 1288
1291 /* set the map type. 1 = normal, 0 = reversed 1289 /* set the map type. 1 = normal, 0 = reversed
1292 * Must be set before e752x_init_csrows in case csrow mapping 1290 * Must be set before e752x_init_csrows in case csrow mapping
1293 * is reversed. 1291 * is reversed.
1294 */ 1292 */
1295 pci_read_config_byte(pdev, E752X_DRM, &stat8); 1293 pci_read_config_byte(pdev, E752X_DRM, &stat8);
1296 pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f)); 1294 pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
1297 1295
1298 e752x_init_csrows(mci, pdev, ddrcsr); 1296 e752x_init_csrows(mci, pdev, ddrcsr);
1299 e752x_init_mem_map_table(pdev, pvt); 1297 e752x_init_mem_map_table(pdev, pvt);
1300 1298
1301 if (dev_idx == I3100) 1299 if (dev_idx == I3100)
1302 mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */ 1300 mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
1303 else 1301 else
1304 mci->edac_cap |= EDAC_FLAG_NONE; 1302 mci->edac_cap |= EDAC_FLAG_NONE;
1305 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); 1303 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
1306 1304
1307 /* load the top of low memory, remap base, and remap limit vars */ 1305 /* load the top of low memory, remap base, and remap limit vars */
1308 pci_read_config_word(pdev, E752X_TOLM, &pci_data); 1306 pci_read_config_word(pdev, E752X_TOLM, &pci_data);
1309 pvt->tolm = ((u32) pci_data) << 4; 1307 pvt->tolm = ((u32) pci_data) << 4;
1310 pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data); 1308 pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data);
1311 pvt->remapbase = ((u32) pci_data) << 14; 1309 pvt->remapbase = ((u32) pci_data) << 14;
1312 pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data); 1310 pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data);
1313 pvt->remaplimit = ((u32) pci_data) << 14; 1311 pvt->remaplimit = ((u32) pci_data) << 14;
1314 e752x_printk(KERN_INFO, 1312 e752x_printk(KERN_INFO,
1315 "tolm = %x, remapbase = %x, remaplimit = %x\n", 1313 "tolm = %x, remapbase = %x, remaplimit = %x\n",
1316 pvt->tolm, pvt->remapbase, pvt->remaplimit); 1314 pvt->tolm, pvt->remapbase, pvt->remaplimit);
1317 1315
1318 /* Here we assume that we will never see multiple instances of this 1316 /* Here we assume that we will never see multiple instances of this
1319 * type of memory controller. The ID is therefore hardcoded to 0. 1317 * type of memory controller. The ID is therefore hardcoded to 0.
1320 */ 1318 */
1321 if (edac_mc_add_mc(mci)) { 1319 if (edac_mc_add_mc(mci)) {
1322 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 1320 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
1323 goto fail; 1321 goto fail;
1324 } 1322 }
1325 1323
1326 e752x_init_error_reporting_regs(pvt); 1324 e752x_init_error_reporting_regs(pvt);
1327 e752x_get_error_info(mci, &discard); /* clear other MCH errors */ 1325 e752x_get_error_info(mci, &discard); /* clear other MCH errors */
1328 1326
1329 /* allocating generic PCI control info */ 1327 /* allocating generic PCI control info */
1330 e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); 1328 e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1331 if (!e752x_pci) { 1329 if (!e752x_pci) {
1332 printk(KERN_WARNING 1330 printk(KERN_WARNING
1333 "%s(): Unable to create PCI control\n", __func__); 1331 "%s(): Unable to create PCI control\n", __func__);
1334 printk(KERN_WARNING 1332 printk(KERN_WARNING
1335 "%s(): PCI error report via EDAC not setup\n", 1333 "%s(): PCI error report via EDAC not setup\n",
1336 __func__); 1334 __func__);
1337 } 1335 }
1338 1336
1339 /* get this far and it's successful */ 1337 /* get this far and it's successful */
1340 debugf3("%s(): success\n", __func__); 1338 debugf3("%s(): success\n", __func__);
1341 return 0; 1339 return 0;
1342 1340
1343 fail: 1341 fail:
1344 pci_dev_put(pvt->dev_d0f0); 1342 pci_dev_put(pvt->dev_d0f0);
1345 pci_dev_put(pvt->dev_d0f1); 1343 pci_dev_put(pvt->dev_d0f1);
1346 pci_dev_put(pvt->bridge_ck); 1344 pci_dev_put(pvt->bridge_ck);
1347 edac_mc_free(mci); 1345 edac_mc_free(mci);
1348 1346
1349 return -ENODEV; 1347 return -ENODEV;
1350 } 1348 }
1351 1349
1352 /* returns count (>= 0), or negative on error */ 1350 /* returns count (>= 0), or negative on error */
1353 static int __devinit e752x_init_one(struct pci_dev *pdev, 1351 static int __devinit e752x_init_one(struct pci_dev *pdev,
1354 const struct pci_device_id *ent) 1352 const struct pci_device_id *ent)
1355 { 1353 {
1356 debugf0("%s()\n", __func__); 1354 debugf0("%s()\n", __func__);
1357 1355
1358 /* wake up and enable device */ 1356 /* wake up and enable device */
1359 if (pci_enable_device(pdev) < 0) 1357 if (pci_enable_device(pdev) < 0)
1360 return -EIO; 1358 return -EIO;
1361 1359
1362 return e752x_probe1(pdev, ent->driver_data); 1360 return e752x_probe1(pdev, ent->driver_data);
1363 } 1361 }
1364 1362
1365 static void __devexit e752x_remove_one(struct pci_dev *pdev) 1363 static void __devexit e752x_remove_one(struct pci_dev *pdev)
1366 { 1364 {
1367 struct mem_ctl_info *mci; 1365 struct mem_ctl_info *mci;
1368 struct e752x_pvt *pvt; 1366 struct e752x_pvt *pvt;
1369 1367
1370 debugf0("%s()\n", __func__); 1368 debugf0("%s()\n", __func__);
1371 1369
1372 if (e752x_pci) 1370 if (e752x_pci)
1373 edac_pci_release_generic_ctl(e752x_pci); 1371 edac_pci_release_generic_ctl(e752x_pci);
1374 1372
1375 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) 1373 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
1376 return; 1374 return;
1377 1375
1378 pvt = (struct e752x_pvt *)mci->pvt_info; 1376 pvt = (struct e752x_pvt *)mci->pvt_info;
1379 pci_dev_put(pvt->dev_d0f0); 1377 pci_dev_put(pvt->dev_d0f0);
1380 pci_dev_put(pvt->dev_d0f1); 1378 pci_dev_put(pvt->dev_d0f1);
1381 pci_dev_put(pvt->bridge_ck); 1379 pci_dev_put(pvt->bridge_ck);
1382 edac_mc_free(mci); 1380 edac_mc_free(mci);
1383 } 1381 }
1384 1382
1385 static const struct pci_device_id e752x_pci_tbl[] __devinitdata = { 1383 static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
1386 { 1384 {
1387 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1385 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1388 E7520}, 1386 E7520},
1389 { 1387 {
1390 PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1388 PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1391 E7525}, 1389 E7525},
1392 { 1390 {
1393 PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1391 PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1394 E7320}, 1392 E7320},
1395 { 1393 {
1396 PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1394 PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1397 I3100}, 1395 I3100},
1398 { 1396 {
1399 0, 1397 0,
1400 } /* 0 terminated list. */ 1398 } /* 0 terminated list. */
1401 }; 1399 };
1402 1400
1403 MODULE_DEVICE_TABLE(pci, e752x_pci_tbl); 1401 MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
1404 1402
1405 static struct pci_driver e752x_driver = { 1403 static struct pci_driver e752x_driver = {
1406 .name = EDAC_MOD_STR, 1404 .name = EDAC_MOD_STR,
1407 .probe = e752x_init_one, 1405 .probe = e752x_init_one,
1408 .remove = __devexit_p(e752x_remove_one), 1406 .remove = __devexit_p(e752x_remove_one),
1409 .id_table = e752x_pci_tbl, 1407 .id_table = e752x_pci_tbl,
1410 }; 1408 };
1411 1409
1412 static int __init e752x_init(void) 1410 static int __init e752x_init(void)
1413 { 1411 {
1414 int pci_rc; 1412 int pci_rc;
1415 1413
1416 debugf3("%s()\n", __func__); 1414 debugf3("%s()\n", __func__);
1417 1415
1418 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1416 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1419 opstate_init(); 1417 opstate_init();
1420 1418
1421 pci_rc = pci_register_driver(&e752x_driver); 1419 pci_rc = pci_register_driver(&e752x_driver);
1422 return (pci_rc < 0) ? pci_rc : 0; 1420 return (pci_rc < 0) ? pci_rc : 0;
1423 } 1421 }
1424 1422
1425 static void __exit e752x_exit(void) 1423 static void __exit e752x_exit(void)
1426 { 1424 {
1427 debugf3("%s()\n", __func__); 1425 debugf3("%s()\n", __func__);
1428 pci_unregister_driver(&e752x_driver); 1426 pci_unregister_driver(&e752x_driver);
1429 } 1427 }
1430 1428
1431 module_init(e752x_init); 1429 module_init(e752x_init);
1432 module_exit(e752x_exit); 1430 module_exit(e752x_exit);
1433 1431
1434 MODULE_LICENSE("GPL"); 1432 MODULE_LICENSE("GPL");
1435 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n"); 1433 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
1436 MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers"); 1434 MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
1437 1435
1438 module_param(force_function_unhide, int, 0444); 1436 module_param(force_function_unhide, int, 0444);
1439 MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:" 1437 MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
1440 " 1=force unhide and hope BIOS doesn't fight driver for " 1438 " 1=force unhide and hope BIOS doesn't fight driver for "
1441 "Dev0:Fun1 access"); 1439 "Dev0:Fun1 access");
1442 1440
1443 module_param(edac_op_state, int, 0444); 1441 module_param(edac_op_state, int, 0444);
1444 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 1442 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1445 1443
1446 module_param(sysbus_parity, int, 0444); 1444 module_param(sysbus_parity, int, 0444);
1447 MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking," 1445 MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
1448 " 1=enable system bus parity checking, default=auto-detect"); 1446 " 1=enable system bus parity checking, default=auto-detect");
1449 module_param(report_non_memory_errors, int, 0644); 1447 module_param(report_non_memory_errors, int, 0644);
1450 MODULE_PARM_DESC(report_non_memory_errors, "0=disable non-memory error " 1448 MODULE_PARM_DESC(report_non_memory_errors, "0=disable non-memory error "
1451 "reporting, 1=enable non-memory error reporting"); 1449 "reporting, 1=enable non-memory error reporting");
drivers/edac/edac_core.h
1 /* 1 /*
2 * Defines, structures, APIs for edac_core module 2 * Defines, structures, APIs for edac_core module
3 * 3 *
4 * (C) 2007 Linux Networx (http://lnxi.com) 4 * (C) 2007 Linux Networx (http://lnxi.com)
5 * This file may be distributed under the terms of the 5 * This file may be distributed under the terms of the
6 * GNU General Public License. 6 * GNU General Public License.
7 * 7 *
8 * Written by Thayne Harbaugh 8 * Written by Thayne Harbaugh
9 * Based on work by Dan Hollis <goemon at anime dot net> and others. 9 * Based on work by Dan Hollis <goemon at anime dot net> and others.
10 * http://www.anime.net/~goemon/linux-ecc/ 10 * http://www.anime.net/~goemon/linux-ecc/
11 * 11 *
12 * NMI handling support added by 12 * NMI handling support added by
13 * Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com> 13 * Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
14 * 14 *
15 * Refactored for multi-source files: 15 * Refactored for multi-source files:
16 * Doug Thompson <norsk5@xmission.com> 16 * Doug Thompson <norsk5@xmission.com>
17 * 17 *
18 */ 18 */
19 19
20 #ifndef _EDAC_CORE_H_ 20 #ifndef _EDAC_CORE_H_
21 #define _EDAC_CORE_H_ 21 #define _EDAC_CORE_H_
22 22
23 #include <linux/kernel.h> 23 #include <linux/kernel.h>
24 #include <linux/types.h> 24 #include <linux/types.h>
25 #include <linux/module.h> 25 #include <linux/module.h>
26 #include <linux/spinlock.h> 26 #include <linux/spinlock.h>
27 #include <linux/smp.h> 27 #include <linux/smp.h>
28 #include <linux/pci.h> 28 #include <linux/pci.h>
29 #include <linux/time.h> 29 #include <linux/time.h>
30 #include <linux/nmi.h> 30 #include <linux/nmi.h>
31 #include <linux/rcupdate.h> 31 #include <linux/rcupdate.h>
32 #include <linux/completion.h> 32 #include <linux/completion.h>
33 #include <linux/kobject.h> 33 #include <linux/kobject.h>
34 #include <linux/platform_device.h> 34 #include <linux/platform_device.h>
35 #include <linux/sysdev.h> 35 #include <linux/sysdev.h>
36 #include <linux/workqueue.h> 36 #include <linux/workqueue.h>
37 37
38 #define EDAC_MC_LABEL_LEN 31 38 #define EDAC_MC_LABEL_LEN 31
39 #define EDAC_DEVICE_NAME_LEN 31 39 #define EDAC_DEVICE_NAME_LEN 31
40 #define EDAC_ATTRIB_VALUE_LEN 15 40 #define EDAC_ATTRIB_VALUE_LEN 15
41 #define MC_PROC_NAME_MAX_LEN 7 41 #define MC_PROC_NAME_MAX_LEN 7
42 42
43 #if PAGE_SHIFT < 20 43 #if PAGE_SHIFT < 20
44 #define PAGES_TO_MiB(pages) ((pages) >> (20 - PAGE_SHIFT)) 44 #define PAGES_TO_MiB(pages) ((pages) >> (20 - PAGE_SHIFT))
45 #define MiB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) 45 #define MiB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
46 #else /* PAGE_SHIFT > 20 */ 46 #else /* PAGE_SHIFT > 20 */
47 #define PAGES_TO_MiB(pages) ((pages) << (PAGE_SHIFT - 20)) 47 #define PAGES_TO_MiB(pages) ((pages) << (PAGE_SHIFT - 20))
48 #define MiB_TO_PAGES(mb) ((mb) >> (PAGE_SHIFT - 20)) 48 #define MiB_TO_PAGES(mb) ((mb) >> (PAGE_SHIFT - 20))
49 #endif 49 #endif
50 50
51 #define edac_printk(level, prefix, fmt, arg...) \ 51 #define edac_printk(level, prefix, fmt, arg...) \
52 printk(level "EDAC " prefix ": " fmt, ##arg) 52 printk(level "EDAC " prefix ": " fmt, ##arg)
53 53
54 #define edac_mc_printk(mci, level, fmt, arg...) \ 54 #define edac_mc_printk(mci, level, fmt, arg...) \
55 printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg) 55 printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg)
56 56
57 #define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \ 57 #define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \
58 printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg) 58 printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg)
59 59
60 #define edac_device_printk(ctl, level, fmt, arg...) \ 60 #define edac_device_printk(ctl, level, fmt, arg...) \
61 printk(level "EDAC DEVICE%d: " fmt, ctl->dev_idx, ##arg) 61 printk(level "EDAC DEVICE%d: " fmt, ctl->dev_idx, ##arg)
62 62
63 #define edac_pci_printk(ctl, level, fmt, arg...) \ 63 #define edac_pci_printk(ctl, level, fmt, arg...) \
64 printk(level "EDAC PCI%d: " fmt, ctl->pci_idx, ##arg) 64 printk(level "EDAC PCI%d: " fmt, ctl->pci_idx, ##arg)
65 65
66 /* prefixes for edac_printk() and edac_mc_printk() */ 66 /* prefixes for edac_printk() and edac_mc_printk() */
67 #define EDAC_MC "MC" 67 #define EDAC_MC "MC"
68 #define EDAC_PCI "PCI" 68 #define EDAC_PCI "PCI"
69 #define EDAC_DEBUG "DEBUG" 69 #define EDAC_DEBUG "DEBUG"
70 70
71 extern const char *edac_mem_types[]; 71 extern const char *edac_mem_types[];
72 72
73 #ifdef CONFIG_EDAC_DEBUG 73 #ifdef CONFIG_EDAC_DEBUG
74 extern int edac_debug_level; 74 extern int edac_debug_level;
75 75
76 #define edac_debug_printk(level, fmt, arg...) \ 76 #define edac_debug_printk(level, fmt, arg...) \
77 do { \ 77 do { \
78 if (level <= edac_debug_level) \ 78 if (level <= edac_debug_level) \
79 edac_printk(KERN_DEBUG, EDAC_DEBUG, \ 79 edac_printk(KERN_DEBUG, EDAC_DEBUG, \
80 "%s: " fmt, __func__, ##arg); \ 80 "%s: " fmt, __func__, ##arg); \
81 } while (0) 81 } while (0)
82 82
83 #define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ ) 83 #define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
84 #define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ ) 84 #define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
85 #define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ ) 85 #define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ )
86 #define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ ) 86 #define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ )
87 #define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ ) 87 #define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ )
88 88
89 #else /* !CONFIG_EDAC_DEBUG */ 89 #else /* !CONFIG_EDAC_DEBUG */
90 90
91 #define debugf0( ... ) 91 #define debugf0( ... )
92 #define debugf1( ... ) 92 #define debugf1( ... )
93 #define debugf2( ... ) 93 #define debugf2( ... )
94 #define debugf3( ... ) 94 #define debugf3( ... )
95 #define debugf4( ... ) 95 #define debugf4( ... )
96 96
97 #endif /* !CONFIG_EDAC_DEBUG */ 97 #endif /* !CONFIG_EDAC_DEBUG */
98 98
99 #define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \ 99 #define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
100 PCI_DEVICE_ID_ ## vend ## _ ## dev 100 PCI_DEVICE_ID_ ## vend ## _ ## dev
101 101
102 #define edac_dev_name(dev) (dev)->dev_name 102 #define edac_dev_name(dev) (dev)->dev_name
103 103
104 /* memory devices */ 104 /* memory devices */
105 enum dev_type { 105 enum dev_type {
106 DEV_UNKNOWN = 0, 106 DEV_UNKNOWN = 0,
107 DEV_X1, 107 DEV_X1,
108 DEV_X2, 108 DEV_X2,
109 DEV_X4, 109 DEV_X4,
110 DEV_X8, 110 DEV_X8,
111 DEV_X16, 111 DEV_X16,
112 DEV_X32, /* Do these parts exist? */ 112 DEV_X32, /* Do these parts exist? */
113 DEV_X64 /* Do these parts exist? */ 113 DEV_X64 /* Do these parts exist? */
114 }; 114 };
115 115
116 #define DEV_FLAG_UNKNOWN BIT(DEV_UNKNOWN) 116 #define DEV_FLAG_UNKNOWN BIT(DEV_UNKNOWN)
117 #define DEV_FLAG_X1 BIT(DEV_X1) 117 #define DEV_FLAG_X1 BIT(DEV_X1)
118 #define DEV_FLAG_X2 BIT(DEV_X2) 118 #define DEV_FLAG_X2 BIT(DEV_X2)
119 #define DEV_FLAG_X4 BIT(DEV_X4) 119 #define DEV_FLAG_X4 BIT(DEV_X4)
120 #define DEV_FLAG_X8 BIT(DEV_X8) 120 #define DEV_FLAG_X8 BIT(DEV_X8)
121 #define DEV_FLAG_X16 BIT(DEV_X16) 121 #define DEV_FLAG_X16 BIT(DEV_X16)
122 #define DEV_FLAG_X32 BIT(DEV_X32) 122 #define DEV_FLAG_X32 BIT(DEV_X32)
123 #define DEV_FLAG_X64 BIT(DEV_X64) 123 #define DEV_FLAG_X64 BIT(DEV_X64)
124 124
125 /* memory types */ 125 /* memory types */
126 enum mem_type { 126 enum mem_type {
127 MEM_EMPTY = 0, /* Empty csrow */ 127 MEM_EMPTY = 0, /* Empty csrow */
128 MEM_RESERVED, /* Reserved csrow type */ 128 MEM_RESERVED, /* Reserved csrow type */
129 MEM_UNKNOWN, /* Unknown csrow type */ 129 MEM_UNKNOWN, /* Unknown csrow type */
130 MEM_FPM, /* Fast page mode */ 130 MEM_FPM, /* Fast page mode */
131 MEM_EDO, /* Extended data out */ 131 MEM_EDO, /* Extended data out */
132 MEM_BEDO, /* Burst Extended data out */ 132 MEM_BEDO, /* Burst Extended data out */
133 MEM_SDR, /* Single data rate SDRAM */ 133 MEM_SDR, /* Single data rate SDRAM */
134 MEM_RDR, /* Registered single data rate SDRAM */ 134 MEM_RDR, /* Registered single data rate SDRAM */
135 MEM_DDR, /* Double data rate SDRAM */ 135 MEM_DDR, /* Double data rate SDRAM */
136 MEM_RDDR, /* Registered Double data rate SDRAM */ 136 MEM_RDDR, /* Registered Double data rate SDRAM */
137 MEM_RMBS, /* Rambus DRAM */ 137 MEM_RMBS, /* Rambus DRAM */
138 MEM_DDR2, /* DDR2 RAM */ 138 MEM_DDR2, /* DDR2 RAM */
139 MEM_FB_DDR2, /* fully buffered DDR2 */ 139 MEM_FB_DDR2, /* fully buffered DDR2 */
140 MEM_RDDR2, /* Registered DDR2 RAM */ 140 MEM_RDDR2, /* Registered DDR2 RAM */
141 MEM_XDR, /* Rambus XDR */ 141 MEM_XDR, /* Rambus XDR */
142 MEM_DDR3, /* DDR3 RAM */ 142 MEM_DDR3, /* DDR3 RAM */
143 MEM_RDDR3, /* Registered DDR3 RAM */ 143 MEM_RDDR3, /* Registered DDR3 RAM */
144 }; 144 };
145 145
146 #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) 146 #define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
147 #define MEM_FLAG_RESERVED BIT(MEM_RESERVED) 147 #define MEM_FLAG_RESERVED BIT(MEM_RESERVED)
148 #define MEM_FLAG_UNKNOWN BIT(MEM_UNKNOWN) 148 #define MEM_FLAG_UNKNOWN BIT(MEM_UNKNOWN)
149 #define MEM_FLAG_FPM BIT(MEM_FPM) 149 #define MEM_FLAG_FPM BIT(MEM_FPM)
150 #define MEM_FLAG_EDO BIT(MEM_EDO) 150 #define MEM_FLAG_EDO BIT(MEM_EDO)
151 #define MEM_FLAG_BEDO BIT(MEM_BEDO) 151 #define MEM_FLAG_BEDO BIT(MEM_BEDO)
152 #define MEM_FLAG_SDR BIT(MEM_SDR) 152 #define MEM_FLAG_SDR BIT(MEM_SDR)
153 #define MEM_FLAG_RDR BIT(MEM_RDR) 153 #define MEM_FLAG_RDR BIT(MEM_RDR)
154 #define MEM_FLAG_DDR BIT(MEM_DDR) 154 #define MEM_FLAG_DDR BIT(MEM_DDR)
155 #define MEM_FLAG_RDDR BIT(MEM_RDDR) 155 #define MEM_FLAG_RDDR BIT(MEM_RDDR)
156 #define MEM_FLAG_RMBS BIT(MEM_RMBS) 156 #define MEM_FLAG_RMBS BIT(MEM_RMBS)
157 #define MEM_FLAG_DDR2 BIT(MEM_DDR2) 157 #define MEM_FLAG_DDR2 BIT(MEM_DDR2)
158 #define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2) 158 #define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
159 #define MEM_FLAG_RDDR2 BIT(MEM_RDDR2) 159 #define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
160 #define MEM_FLAG_XDR BIT(MEM_XDR) 160 #define MEM_FLAG_XDR BIT(MEM_XDR)
161 #define MEM_FLAG_DDR3 BIT(MEM_DDR3) 161 #define MEM_FLAG_DDR3 BIT(MEM_DDR3)
162 #define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) 162 #define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
163 163
164 /* chipset Error Detection and Correction capabilities and mode */ 164 /* chipset Error Detection and Correction capabilities and mode */
165 enum edac_type { 165 enum edac_type {
166 EDAC_UNKNOWN = 0, /* Unknown if ECC is available */ 166 EDAC_UNKNOWN = 0, /* Unknown if ECC is available */
167 EDAC_NONE, /* Doesnt support ECC */ 167 EDAC_NONE, /* Doesnt support ECC */
168 EDAC_RESERVED, /* Reserved ECC type */ 168 EDAC_RESERVED, /* Reserved ECC type */
169 EDAC_PARITY, /* Detects parity errors */ 169 EDAC_PARITY, /* Detects parity errors */
170 EDAC_EC, /* Error Checking - no correction */ 170 EDAC_EC, /* Error Checking - no correction */
171 EDAC_SECDED, /* Single bit error correction, Double detection */ 171 EDAC_SECDED, /* Single bit error correction, Double detection */
172 EDAC_S2ECD2ED, /* Chipkill x2 devices - do these exist? */ 172 EDAC_S2ECD2ED, /* Chipkill x2 devices - do these exist? */
173 EDAC_S4ECD4ED, /* Chipkill x4 devices */ 173 EDAC_S4ECD4ED, /* Chipkill x4 devices */
174 EDAC_S8ECD8ED, /* Chipkill x8 devices */ 174 EDAC_S8ECD8ED, /* Chipkill x8 devices */
175 EDAC_S16ECD16ED, /* Chipkill x16 devices */ 175 EDAC_S16ECD16ED, /* Chipkill x16 devices */
176 }; 176 };
177 177
178 #define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN) 178 #define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN)
179 #define EDAC_FLAG_NONE BIT(EDAC_NONE) 179 #define EDAC_FLAG_NONE BIT(EDAC_NONE)
180 #define EDAC_FLAG_PARITY BIT(EDAC_PARITY) 180 #define EDAC_FLAG_PARITY BIT(EDAC_PARITY)
181 #define EDAC_FLAG_EC BIT(EDAC_EC) 181 #define EDAC_FLAG_EC BIT(EDAC_EC)
182 #define EDAC_FLAG_SECDED BIT(EDAC_SECDED) 182 #define EDAC_FLAG_SECDED BIT(EDAC_SECDED)
183 #define EDAC_FLAG_S2ECD2ED BIT(EDAC_S2ECD2ED) 183 #define EDAC_FLAG_S2ECD2ED BIT(EDAC_S2ECD2ED)
184 #define EDAC_FLAG_S4ECD4ED BIT(EDAC_S4ECD4ED) 184 #define EDAC_FLAG_S4ECD4ED BIT(EDAC_S4ECD4ED)
185 #define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED) 185 #define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED)
186 #define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED) 186 #define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED)
187 187
188 /* scrubbing capabilities */ 188 /* scrubbing capabilities */
189 enum scrub_type { 189 enum scrub_type {
190 SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */ 190 SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */
191 SCRUB_NONE, /* No scrubber */ 191 SCRUB_NONE, /* No scrubber */
192 SCRUB_SW_PROG, /* SW progressive (sequential) scrubbing */ 192 SCRUB_SW_PROG, /* SW progressive (sequential) scrubbing */
193 SCRUB_SW_SRC, /* Software scrub only errors */ 193 SCRUB_SW_SRC, /* Software scrub only errors */
194 SCRUB_SW_PROG_SRC, /* Progressive software scrub from an error */ 194 SCRUB_SW_PROG_SRC, /* Progressive software scrub from an error */
195 SCRUB_SW_TUNABLE, /* Software scrub frequency is tunable */ 195 SCRUB_SW_TUNABLE, /* Software scrub frequency is tunable */
196 SCRUB_HW_PROG, /* HW progressive (sequential) scrubbing */ 196 SCRUB_HW_PROG, /* HW progressive (sequential) scrubbing */
197 SCRUB_HW_SRC, /* Hardware scrub only errors */ 197 SCRUB_HW_SRC, /* Hardware scrub only errors */
198 SCRUB_HW_PROG_SRC, /* Progressive hardware scrub from an error */ 198 SCRUB_HW_PROG_SRC, /* Progressive hardware scrub from an error */
199 SCRUB_HW_TUNABLE /* Hardware scrub frequency is tunable */ 199 SCRUB_HW_TUNABLE /* Hardware scrub frequency is tunable */
200 }; 200 };
201 201
202 #define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG) 202 #define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG)
203 #define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC) 203 #define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC)
204 #define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC) 204 #define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC)
205 #define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE) 205 #define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE)
206 #define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG) 206 #define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG)
207 #define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC) 207 #define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC)
208 #define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC) 208 #define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC)
209 #define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE) 209 #define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE)
210 210
211 /* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */ 211 /* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */
212 212
213 /* EDAC internal operation states */ 213 /* EDAC internal operation states */
214 #define OP_ALLOC 0x100 214 #define OP_ALLOC 0x100
215 #define OP_RUNNING_POLL 0x201 215 #define OP_RUNNING_POLL 0x201
216 #define OP_RUNNING_INTERRUPT 0x202 216 #define OP_RUNNING_INTERRUPT 0x202
217 #define OP_RUNNING_POLL_INTR 0x203 217 #define OP_RUNNING_POLL_INTR 0x203
218 #define OP_OFFLINE 0x300 218 #define OP_OFFLINE 0x300
219 219
220 /* 220 /*
221 * There are several things to be aware of that aren't at all obvious: 221 * There are several things to be aware of that aren't at all obvious:
222 * 222 *
223 * 223 *
224 * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc.. 224 * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc..
225 * 225 *
226 * These are some of the many terms that are thrown about that don't always 226 * These are some of the many terms that are thrown about that don't always
227 * mean what people think they mean (Inconceivable!). In the interest of 227 * mean what people think they mean (Inconceivable!). In the interest of
228 * creating a common ground for discussion, terms and their definitions 228 * creating a common ground for discussion, terms and their definitions
229 * will be established. 229 * will be established.
230 * 230 *
231 * Memory devices: The individual chip on a memory stick. These devices 231 * Memory devices: The individual chip on a memory stick. These devices
232 * commonly output 4 and 8 bits each. Grouping several 232 * commonly output 4 and 8 bits each. Grouping several
233 * of these in parallel provides 64 bits which is common 233 * of these in parallel provides 64 bits which is common
234 * for a memory stick. 234 * for a memory stick.
235 * 235 *
236 * Memory Stick: A printed circuit board that agregates multiple 236 * Memory Stick: A printed circuit board that agregates multiple
237 * memory devices in parallel. This is the atomic 237 * memory devices in parallel. This is the atomic
238 * memory component that is purchaseable by Joe consumer 238 * memory component that is purchaseable by Joe consumer
239 * and loaded into a memory socket. 239 * and loaded into a memory socket.
240 * 240 *
241 * Socket: A physical connector on the motherboard that accepts 241 * Socket: A physical connector on the motherboard that accepts
242 * a single memory stick. 242 * a single memory stick.
243 * 243 *
244 * Channel: Set of memory devices on a memory stick that must be 244 * Channel: Set of memory devices on a memory stick that must be
245 * grouped in parallel with one or more additional 245 * grouped in parallel with one or more additional
246 * channels from other memory sticks. This parallel 246 * channels from other memory sticks. This parallel
247 * grouping of the output from multiple channels are 247 * grouping of the output from multiple channels are
248 * necessary for the smallest granularity of memory access. 248 * necessary for the smallest granularity of memory access.
249 * Some memory controllers are capable of single channel - 249 * Some memory controllers are capable of single channel -
250 * which means that memory sticks can be loaded 250 * which means that memory sticks can be loaded
251 * individually. Other memory controllers are only 251 * individually. Other memory controllers are only
252 * capable of dual channel - which means that memory 252 * capable of dual channel - which means that memory
253 * sticks must be loaded as pairs (see "socket set"). 253 * sticks must be loaded as pairs (see "socket set").
254 * 254 *
255 * Chip-select row: All of the memory devices that are selected together. 255 * Chip-select row: All of the memory devices that are selected together.
256 * for a single, minimum grain of memory access. 256 * for a single, minimum grain of memory access.
257 * This selects all of the parallel memory devices across 257 * This selects all of the parallel memory devices across
258 * all of the parallel channels. Common chip-select rows 258 * all of the parallel channels. Common chip-select rows
259 * for single channel are 64 bits, for dual channel 128 259 * for single channel are 64 bits, for dual channel 128
260 * bits. 260 * bits.
261 * 261 *
262 * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memmory. 262 * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memmory.
263 * Motherboards commonly drive two chip-select pins to 263 * Motherboards commonly drive two chip-select pins to
264 * a memory stick. A single-ranked stick, will occupy 264 * a memory stick. A single-ranked stick, will occupy
265 * only one of those rows. The other will be unused. 265 * only one of those rows. The other will be unused.
266 * 266 *
267 * Double-Ranked stick: A double-ranked stick has two chip-select rows which 267 * Double-Ranked stick: A double-ranked stick has two chip-select rows which
268 * access different sets of memory devices. The two 268 * access different sets of memory devices. The two
269 * rows cannot be accessed concurrently. 269 * rows cannot be accessed concurrently.
270 * 270 *
271 * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick. 271 * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick.
272 * A double-sided stick has two chip-select rows which 272 * A double-sided stick has two chip-select rows which
273 * access different sets of memory devices. The two 273 * access different sets of memory devices. The two
274 * rows cannot be accessed concurrently. "Double-sided" 274 * rows cannot be accessed concurrently. "Double-sided"
275 * is irrespective of the memory devices being mounted 275 * is irrespective of the memory devices being mounted
276 * on both sides of the memory stick. 276 * on both sides of the memory stick.
277 * 277 *
278 * Socket set: All of the memory sticks that are required for 278 * Socket set: All of the memory sticks that are required for
279 * a single memory access or all of the memory sticks 279 * a single memory access or all of the memory sticks
280 * spanned by a chip-select row. A single socket set 280 * spanned by a chip-select row. A single socket set
281 * has two chip-select rows and if double-sided sticks 281 * has two chip-select rows and if double-sided sticks
282 * are used these will occupy those chip-select rows. 282 * are used these will occupy those chip-select rows.
283 * 283 *
284 * Bank: This term is avoided because it is unclear when 284 * Bank: This term is avoided because it is unclear when
285 * needing to distinguish between chip-select rows and 285 * needing to distinguish between chip-select rows and
286 * socket sets. 286 * socket sets.
287 * 287 *
288 * Controller pages: 288 * Controller pages:
289 * 289 *
290 * Physical pages: 290 * Physical pages:
291 * 291 *
292 * Virtual pages: 292 * Virtual pages:
293 * 293 *
294 * 294 *
295 * STRUCTURE ORGANIZATION AND CHOICES 295 * STRUCTURE ORGANIZATION AND CHOICES
296 * 296 *
297 * 297 *
298 * 298 *
299 * PS - I enjoyed writing all that about as much as you enjoyed reading it. 299 * PS - I enjoyed writing all that about as much as you enjoyed reading it.
300 */ 300 */
301 301
302 struct channel_info { 302 struct channel_info {
303 int chan_idx; /* channel index */ 303 int chan_idx; /* channel index */
304 u32 ce_count; /* Correctable Errors for this CHANNEL */ 304 u32 ce_count; /* Correctable Errors for this CHANNEL */
305 char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ 305 char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
306 struct csrow_info *csrow; /* the parent */ 306 struct csrow_info *csrow; /* the parent */
307 }; 307 };
308 308
309 struct csrow_info { 309 struct csrow_info {
310 unsigned long first_page; /* first page number in dimm */ 310 unsigned long first_page; /* first page number in dimm */
311 unsigned long last_page; /* last page number in dimm */ 311 unsigned long last_page; /* last page number in dimm */
312 unsigned long page_mask; /* used for interleaving - 312 unsigned long page_mask; /* used for interleaving -
313 * 0UL for non intlv 313 * 0UL for non intlv
314 */ 314 */
315 u32 nr_pages; /* number of pages in csrow */ 315 u32 nr_pages; /* number of pages in csrow */
316 u32 grain; /* granularity of reported error in bytes */ 316 u32 grain; /* granularity of reported error in bytes */
317 int csrow_idx; /* the chip-select row */ 317 int csrow_idx; /* the chip-select row */
318 enum dev_type dtype; /* memory device type */ 318 enum dev_type dtype; /* memory device type */
319 u32 ue_count; /* Uncorrectable Errors for this csrow */ 319 u32 ue_count; /* Uncorrectable Errors for this csrow */
320 u32 ce_count; /* Correctable Errors for this csrow */ 320 u32 ce_count; /* Correctable Errors for this csrow */
321 enum mem_type mtype; /* memory csrow type */ 321 enum mem_type mtype; /* memory csrow type */
322 enum edac_type edac_mode; /* EDAC mode for this csrow */ 322 enum edac_type edac_mode; /* EDAC mode for this csrow */
323 struct mem_ctl_info *mci; /* the parent */ 323 struct mem_ctl_info *mci; /* the parent */
324 324
325 struct kobject kobj; /* sysfs kobject for this csrow */ 325 struct kobject kobj; /* sysfs kobject for this csrow */
326 326
327 /* channel information for this csrow */ 327 /* channel information for this csrow */
328 u32 nr_channels; 328 u32 nr_channels;
329 struct channel_info *channels; 329 struct channel_info *channels;
330 }; 330 };
331 331
332 struct mcidev_sysfs_group { 332 struct mcidev_sysfs_group {
333 const char *name; /* group name */ 333 const char *name; /* group name */
334 const struct mcidev_sysfs_attribute *mcidev_attr; /* group attributes */ 334 const struct mcidev_sysfs_attribute *mcidev_attr; /* group attributes */
335 }; 335 };
336 336
337 struct mcidev_sysfs_group_kobj { 337 struct mcidev_sysfs_group_kobj {
338 struct list_head list; /* list for all instances within a mc */ 338 struct list_head list; /* list for all instances within a mc */
339 339
340 struct kobject kobj; /* kobj for the group */ 340 struct kobject kobj; /* kobj for the group */
341 341
342 const struct mcidev_sysfs_group *grp; /* group description table */ 342 const struct mcidev_sysfs_group *grp; /* group description table */
343 struct mem_ctl_info *mci; /* the parent */ 343 struct mem_ctl_info *mci; /* the parent */
344 }; 344 };
345 345
346 /* mcidev_sysfs_attribute structure 346 /* mcidev_sysfs_attribute structure
347 * used for driver sysfs attributes and in mem_ctl_info 347 * used for driver sysfs attributes and in mem_ctl_info
348 * sysfs top level entries 348 * sysfs top level entries
349 */ 349 */
350 struct mcidev_sysfs_attribute { 350 struct mcidev_sysfs_attribute {
351 /* It should use either attr or grp */ 351 /* It should use either attr or grp */
352 struct attribute attr; 352 struct attribute attr;
353 const struct mcidev_sysfs_group *grp; /* Points to a group of attributes */ 353 const struct mcidev_sysfs_group *grp; /* Points to a group of attributes */
354 354
355 /* Ops for show/store values at the attribute - not used on group */ 355 /* Ops for show/store values at the attribute - not used on group */
356 ssize_t (*show)(struct mem_ctl_info *,char *); 356 ssize_t (*show)(struct mem_ctl_info *,char *);
357 ssize_t (*store)(struct mem_ctl_info *, const char *,size_t); 357 ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
358 }; 358 };
359 359
360 /* MEMORY controller information structure 360 /* MEMORY controller information structure
361 */ 361 */
362 struct mem_ctl_info { 362 struct mem_ctl_info {
363 struct list_head link; /* for global list of mem_ctl_info structs */ 363 struct list_head link; /* for global list of mem_ctl_info structs */
364 364
365 struct module *owner; /* Module owner of this control struct */ 365 struct module *owner; /* Module owner of this control struct */
366 366
367 unsigned long mtype_cap; /* memory types supported by mc */ 367 unsigned long mtype_cap; /* memory types supported by mc */
368 unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */ 368 unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */
369 unsigned long edac_cap; /* configuration capabilities - this is 369 unsigned long edac_cap; /* configuration capabilities - this is
370 * closely related to edac_ctl_cap. The 370 * closely related to edac_ctl_cap. The
371 * difference is that the controller may be 371 * difference is that the controller may be
372 * capable of s4ecd4ed which would be listed 372 * capable of s4ecd4ed which would be listed
373 * in edac_ctl_cap, but if channels aren't 373 * in edac_ctl_cap, but if channels aren't
374 * capable of s4ecd4ed then the edac_cap would 374 * capable of s4ecd4ed then the edac_cap would
375 * not have that capability. 375 * not have that capability.
376 */ 376 */
377 unsigned long scrub_cap; /* chipset scrub capabilities */ 377 unsigned long scrub_cap; /* chipset scrub capabilities */
378 enum scrub_type scrub_mode; /* current scrub mode */ 378 enum scrub_type scrub_mode; /* current scrub mode */
379 379
380 /* Translates sdram memory scrub rate given in bytes/sec to the 380 /* Translates sdram memory scrub rate given in bytes/sec to the
381 internal representation and configures whatever else needs 381 internal representation and configures whatever else needs
382 to be configured. 382 to be configured.
383 */ 383 */
384 int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 bw); 384 int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 bw);
385 385
386 /* Get the current sdram memory scrub rate from the internal 386 /* Get the current sdram memory scrub rate from the internal
387 representation and converts it to the closest matching 387 representation and converts it to the closest matching
388 bandwith in bytes/sec. 388 bandwith in bytes/sec.
389 */ 389 */
390 int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw); 390 int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci);
391 391
392 392
393 /* pointer to edac checking routine */ 393 /* pointer to edac checking routine */
394 void (*edac_check) (struct mem_ctl_info * mci); 394 void (*edac_check) (struct mem_ctl_info * mci);
395 395
396 /* 396 /*
397 * Remaps memory pages: controller pages to physical pages. 397 * Remaps memory pages: controller pages to physical pages.
398 * For most MC's, this will be NULL. 398 * For most MC's, this will be NULL.
399 */ 399 */
400 /* FIXME - why not send the phys page to begin with? */ 400 /* FIXME - why not send the phys page to begin with? */
401 unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci, 401 unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
402 unsigned long page); 402 unsigned long page);
403 int mc_idx; 403 int mc_idx;
404 int nr_csrows; 404 int nr_csrows;
405 struct csrow_info *csrows; 405 struct csrow_info *csrows;
406 /* 406 /*
407 * FIXME - what about controllers on other busses? - IDs must be 407 * FIXME - what about controllers on other busses? - IDs must be
408 * unique. dev pointer should be sufficiently unique, but 408 * unique. dev pointer should be sufficiently unique, but
409 * BUS:SLOT.FUNC numbers may not be unique. 409 * BUS:SLOT.FUNC numbers may not be unique.
410 */ 410 */
411 struct device *dev; 411 struct device *dev;
412 const char *mod_name; 412 const char *mod_name;
413 const char *mod_ver; 413 const char *mod_ver;
414 const char *ctl_name; 414 const char *ctl_name;
415 const char *dev_name; 415 const char *dev_name;
416 char proc_name[MC_PROC_NAME_MAX_LEN + 1]; 416 char proc_name[MC_PROC_NAME_MAX_LEN + 1];
417 void *pvt_info; 417 void *pvt_info;
418 u32 ue_noinfo_count; /* Uncorrectable Errors w/o info */ 418 u32 ue_noinfo_count; /* Uncorrectable Errors w/o info */
419 u32 ce_noinfo_count; /* Correctable Errors w/o info */ 419 u32 ce_noinfo_count; /* Correctable Errors w/o info */
420 u32 ue_count; /* Total Uncorrectable Errors for this MC */ 420 u32 ue_count; /* Total Uncorrectable Errors for this MC */
421 u32 ce_count; /* Total Correctable Errors for this MC */ 421 u32 ce_count; /* Total Correctable Errors for this MC */
422 unsigned long start_time; /* mci load start time (in jiffies) */ 422 unsigned long start_time; /* mci load start time (in jiffies) */
423 423
424 /* this stuff is for safe removal of mc devices from global list while 424 /* this stuff is for safe removal of mc devices from global list while
425 * NMI handlers may be traversing list 425 * NMI handlers may be traversing list
426 */ 426 */
427 struct rcu_head rcu; 427 struct rcu_head rcu;
428 struct completion complete; 428 struct completion complete;
429 429
430 /* edac sysfs device control */ 430 /* edac sysfs device control */
431 struct kobject edac_mci_kobj; 431 struct kobject edac_mci_kobj;
432 432
433 /* list for all grp instances within a mc */ 433 /* list for all grp instances within a mc */
434 struct list_head grp_kobj_list; 434 struct list_head grp_kobj_list;
435 435
436 /* Additional top controller level attributes, but specified 436 /* Additional top controller level attributes, but specified
437 * by the low level driver. 437 * by the low level driver.
438 * 438 *
439 * Set by the low level driver to provide attributes at the 439 * Set by the low level driver to provide attributes at the
440 * controller level, same level as 'ue_count' and 'ce_count' above. 440 * controller level, same level as 'ue_count' and 'ce_count' above.
441 * An array of structures, NULL terminated 441 * An array of structures, NULL terminated
442 * 442 *
443 * If attributes are desired, then set to array of attributes 443 * If attributes are desired, then set to array of attributes
444 * If no attributes are desired, leave NULL 444 * If no attributes are desired, leave NULL
445 */ 445 */
446 const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes; 446 const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes;
447 447
448 /* work struct for this MC */ 448 /* work struct for this MC */
449 struct delayed_work work; 449 struct delayed_work work;
450 450
451 /* the internal state of this controller instance */ 451 /* the internal state of this controller instance */
452 int op_state; 452 int op_state;
453 }; 453 };
454 454
455 /* 455 /*
456 * The following are the structures to provide for a generic 456 * The following are the structures to provide for a generic
457 * or abstract 'edac_device'. This set of structures and the 457 * or abstract 'edac_device'. This set of structures and the
458 * code that implements the APIs for the same, provide for 458 * code that implements the APIs for the same, provide for
459 * registering EDAC type devices which are NOT standard memory. 459 * registering EDAC type devices which are NOT standard memory.
460 * 460 *
461 * CPU caches (L1 and L2) 461 * CPU caches (L1 and L2)
462 * DMA engines 462 * DMA engines
463 * Core CPU swithces 463 * Core CPU swithces
464 * Fabric switch units 464 * Fabric switch units
465 * PCIe interface controllers 465 * PCIe interface controllers
466 * other EDAC/ECC type devices that can be monitored for 466 * other EDAC/ECC type devices that can be monitored for
467 * errors, etc. 467 * errors, etc.
468 * 468 *
469 * It allows for a 2 level set of hiearchry. For example: 469 * It allows for a 2 level set of hiearchry. For example:
470 * 470 *
471 * cache could be composed of L1, L2 and L3 levels of cache. 471 * cache could be composed of L1, L2 and L3 levels of cache.
472 * Each CPU core would have its own L1 cache, while sharing 472 * Each CPU core would have its own L1 cache, while sharing
473 * L2 and maybe L3 caches. 473 * L2 and maybe L3 caches.
474 * 474 *
475 * View them arranged, via the sysfs presentation: 475 * View them arranged, via the sysfs presentation:
476 * /sys/devices/system/edac/.. 476 * /sys/devices/system/edac/..
477 * 477 *
478 * mc/ <existing memory device directory> 478 * mc/ <existing memory device directory>
479 * cpu/cpu0/.. <L1 and L2 block directory> 479 * cpu/cpu0/.. <L1 and L2 block directory>
480 * /L1-cache/ce_count 480 * /L1-cache/ce_count
481 * /ue_count 481 * /ue_count
482 * /L2-cache/ce_count 482 * /L2-cache/ce_count
483 * /ue_count 483 * /ue_count
484 * cpu/cpu1/.. <L1 and L2 block directory> 484 * cpu/cpu1/.. <L1 and L2 block directory>
485 * /L1-cache/ce_count 485 * /L1-cache/ce_count
486 * /ue_count 486 * /ue_count
487 * /L2-cache/ce_count 487 * /L2-cache/ce_count
488 * /ue_count 488 * /ue_count
489 * ... 489 * ...
490 * 490 *
491 * the L1 and L2 directories would be "edac_device_block's" 491 * the L1 and L2 directories would be "edac_device_block's"
492 */ 492 */
493 493
494 struct edac_device_counter { 494 struct edac_device_counter {
495 u32 ue_count; 495 u32 ue_count;
496 u32 ce_count; 496 u32 ce_count;
497 }; 497 };
498 498
499 /* forward reference */ 499 /* forward reference */
500 struct edac_device_ctl_info; 500 struct edac_device_ctl_info;
501 struct edac_device_block; 501 struct edac_device_block;
502 502
503 /* edac_dev_sysfs_attribute structure 503 /* edac_dev_sysfs_attribute structure
504 * used for driver sysfs attributes in mem_ctl_info 504 * used for driver sysfs attributes in mem_ctl_info
505 * for extra controls and attributes: 505 * for extra controls and attributes:
506 * like high level error Injection controls 506 * like high level error Injection controls
507 */ 507 */
508 struct edac_dev_sysfs_attribute { 508 struct edac_dev_sysfs_attribute {
509 struct attribute attr; 509 struct attribute attr;
510 ssize_t (*show)(struct edac_device_ctl_info *, char *); 510 ssize_t (*show)(struct edac_device_ctl_info *, char *);
511 ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t); 511 ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t);
512 }; 512 };
513 513
514 /* edac_dev_sysfs_block_attribute structure 514 /* edac_dev_sysfs_block_attribute structure
515 * 515 *
516 * used in leaf 'block' nodes for adding controls/attributes 516 * used in leaf 'block' nodes for adding controls/attributes
517 * 517 *
518 * each block in each instance of the containing control structure 518 * each block in each instance of the containing control structure
519 * can have an array of the following. The show and store functions 519 * can have an array of the following. The show and store functions
520 * will be filled in with the show/store function in the 520 * will be filled in with the show/store function in the
521 * low level driver. 521 * low level driver.
522 * 522 *
523 * The 'value' field will be the actual value field used for 523 * The 'value' field will be the actual value field used for
524 * counting 524 * counting
525 */ 525 */
526 struct edac_dev_sysfs_block_attribute { 526 struct edac_dev_sysfs_block_attribute {
527 struct attribute attr; 527 struct attribute attr;
528 ssize_t (*show)(struct kobject *, struct attribute *, char *); 528 ssize_t (*show)(struct kobject *, struct attribute *, char *);
529 ssize_t (*store)(struct kobject *, struct attribute *, 529 ssize_t (*store)(struct kobject *, struct attribute *,
530 const char *, size_t); 530 const char *, size_t);
531 struct edac_device_block *block; 531 struct edac_device_block *block;
532 532
533 unsigned int value; 533 unsigned int value;
534 }; 534 };
535 535
536 /* device block control structure */ 536 /* device block control structure */
537 struct edac_device_block { 537 struct edac_device_block {
538 struct edac_device_instance *instance; /* Up Pointer */ 538 struct edac_device_instance *instance; /* Up Pointer */
539 char name[EDAC_DEVICE_NAME_LEN + 1]; 539 char name[EDAC_DEVICE_NAME_LEN + 1];
540 540
541 struct edac_device_counter counters; /* basic UE and CE counters */ 541 struct edac_device_counter counters; /* basic UE and CE counters */
542 542
543 int nr_attribs; /* how many attributes */ 543 int nr_attribs; /* how many attributes */
544 544
545 /* this block's attributes, could be NULL */ 545 /* this block's attributes, could be NULL */
546 struct edac_dev_sysfs_block_attribute *block_attributes; 546 struct edac_dev_sysfs_block_attribute *block_attributes;
547 547
548 /* edac sysfs device control */ 548 /* edac sysfs device control */
549 struct kobject kobj; 549 struct kobject kobj;
550 }; 550 };
551 551
552 /* device instance control structure */ 552 /* device instance control structure */
553 struct edac_device_instance { 553 struct edac_device_instance {
554 struct edac_device_ctl_info *ctl; /* Up pointer */ 554 struct edac_device_ctl_info *ctl; /* Up pointer */
555 char name[EDAC_DEVICE_NAME_LEN + 4]; 555 char name[EDAC_DEVICE_NAME_LEN + 4];
556 556
557 struct edac_device_counter counters; /* instance counters */ 557 struct edac_device_counter counters; /* instance counters */
558 558
559 u32 nr_blocks; /* how many blocks */ 559 u32 nr_blocks; /* how many blocks */
560 struct edac_device_block *blocks; /* block array */ 560 struct edac_device_block *blocks; /* block array */
561 561
562 /* edac sysfs device control */ 562 /* edac sysfs device control */
563 struct kobject kobj; 563 struct kobject kobj;
564 }; 564 };
565 565
566 566
567 /* 567 /*
568 * Abstract edac_device control info structure 568 * Abstract edac_device control info structure
569 * 569 *
570 */ 570 */
571 struct edac_device_ctl_info { 571 struct edac_device_ctl_info {
572 /* for global list of edac_device_ctl_info structs */ 572 /* for global list of edac_device_ctl_info structs */
573 struct list_head link; 573 struct list_head link;
574 574
575 struct module *owner; /* Module owner of this control struct */ 575 struct module *owner; /* Module owner of this control struct */
576 576
577 int dev_idx; 577 int dev_idx;
578 578
579 /* Per instance controls for this edac_device */ 579 /* Per instance controls for this edac_device */
580 int log_ue; /* boolean for logging UEs */ 580 int log_ue; /* boolean for logging UEs */
581 int log_ce; /* boolean for logging CEs */ 581 int log_ce; /* boolean for logging CEs */
582 int panic_on_ue; /* boolean for panic'ing on an UE */ 582 int panic_on_ue; /* boolean for panic'ing on an UE */
583 unsigned poll_msec; /* number of milliseconds to poll interval */ 583 unsigned poll_msec; /* number of milliseconds to poll interval */
584 unsigned long delay; /* number of jiffies for poll_msec */ 584 unsigned long delay; /* number of jiffies for poll_msec */
585 585
586 /* Additional top controller level attributes, but specified 586 /* Additional top controller level attributes, but specified
587 * by the low level driver. 587 * by the low level driver.
588 * 588 *
589 * Set by the low level driver to provide attributes at the 589 * Set by the low level driver to provide attributes at the
590 * controller level, same level as 'ue_count' and 'ce_count' above. 590 * controller level, same level as 'ue_count' and 'ce_count' above.
591 * An array of structures, NULL terminated 591 * An array of structures, NULL terminated
592 * 592 *
593 * If attributes are desired, then set to array of attributes 593 * If attributes are desired, then set to array of attributes
594 * If no attributes are desired, leave NULL 594 * If no attributes are desired, leave NULL
595 */ 595 */
596 struct edac_dev_sysfs_attribute *sysfs_attributes; 596 struct edac_dev_sysfs_attribute *sysfs_attributes;
597 597
598 /* pointer to main 'edac' class in sysfs */ 598 /* pointer to main 'edac' class in sysfs */
599 struct sysdev_class *edac_class; 599 struct sysdev_class *edac_class;
600 600
601 /* the internal state of this controller instance */ 601 /* the internal state of this controller instance */
602 int op_state; 602 int op_state;
603 /* work struct for this instance */ 603 /* work struct for this instance */
604 struct delayed_work work; 604 struct delayed_work work;
605 605
606 /* pointer to edac polling checking routine: 606 /* pointer to edac polling checking routine:
607 * If NOT NULL: points to polling check routine 607 * If NOT NULL: points to polling check routine
608 * If NULL: Then assumes INTERRUPT operation, where 608 * If NULL: Then assumes INTERRUPT operation, where
609 * MC driver will receive events 609 * MC driver will receive events
610 */ 610 */
611 void (*edac_check) (struct edac_device_ctl_info * edac_dev); 611 void (*edac_check) (struct edac_device_ctl_info * edac_dev);
612 612
613 struct device *dev; /* pointer to device structure */ 613 struct device *dev; /* pointer to device structure */
614 614
615 const char *mod_name; /* module name */ 615 const char *mod_name; /* module name */
616 const char *ctl_name; /* edac controller name */ 616 const char *ctl_name; /* edac controller name */
617 const char *dev_name; /* pci/platform/etc... name */ 617 const char *dev_name; /* pci/platform/etc... name */
618 618
619 void *pvt_info; /* pointer to 'private driver' info */ 619 void *pvt_info; /* pointer to 'private driver' info */
620 620
621 unsigned long start_time; /* edac_device load start time (jiffies) */ 621 unsigned long start_time; /* edac_device load start time (jiffies) */
622 622
623 /* these are for safe removal of mc devices from global list while 623 /* these are for safe removal of mc devices from global list while
624 * NMI handlers may be traversing list 624 * NMI handlers may be traversing list
625 */ 625 */
626 struct rcu_head rcu; 626 struct rcu_head rcu;
627 struct completion removal_complete; 627 struct completion removal_complete;
628 628
629 /* sysfs top name under 'edac' directory 629 /* sysfs top name under 'edac' directory
630 * and instance name: 630 * and instance name:
631 * cpu/cpu0/... 631 * cpu/cpu0/...
632 * cpu/cpu1/... 632 * cpu/cpu1/...
633 * cpu/cpu2/... 633 * cpu/cpu2/...
634 * ... 634 * ...
635 */ 635 */
636 char name[EDAC_DEVICE_NAME_LEN + 1]; 636 char name[EDAC_DEVICE_NAME_LEN + 1];
637 637
638 /* Number of instances supported on this control structure 638 /* Number of instances supported on this control structure
639 * and the array of those instances 639 * and the array of those instances
640 */ 640 */
641 u32 nr_instances; 641 u32 nr_instances;
642 struct edac_device_instance *instances; 642 struct edac_device_instance *instances;
643 643
644 /* Event counters for the this whole EDAC Device */ 644 /* Event counters for the this whole EDAC Device */
645 struct edac_device_counter counters; 645 struct edac_device_counter counters;
646 646
647 /* edac sysfs device control for the 'name' 647 /* edac sysfs device control for the 'name'
648 * device this structure controls 648 * device this structure controls
649 */ 649 */
650 struct kobject kobj; 650 struct kobject kobj;
651 }; 651 };
652 652
653 /* To get from the instance's wq to the beginning of the ctl structure */ 653 /* To get from the instance's wq to the beginning of the ctl structure */
654 #define to_edac_mem_ctl_work(w) \ 654 #define to_edac_mem_ctl_work(w) \
655 container_of(w, struct mem_ctl_info, work) 655 container_of(w, struct mem_ctl_info, work)
656 656
657 #define to_edac_device_ctl_work(w) \ 657 #define to_edac_device_ctl_work(w) \
658 container_of(w,struct edac_device_ctl_info,work) 658 container_of(w,struct edac_device_ctl_info,work)
659 659
660 /* 660 /*
661 * The alloc() and free() functions for the 'edac_device' control info 661 * The alloc() and free() functions for the 'edac_device' control info
662 * structure. A MC driver will allocate one of these for each edac_device 662 * structure. A MC driver will allocate one of these for each edac_device
663 * it is going to control/register with the EDAC CORE. 663 * it is going to control/register with the EDAC CORE.
664 */ 664 */
665 extern struct edac_device_ctl_info *edac_device_alloc_ctl_info( 665 extern struct edac_device_ctl_info *edac_device_alloc_ctl_info(
666 unsigned sizeof_private, 666 unsigned sizeof_private,
667 char *edac_device_name, unsigned nr_instances, 667 char *edac_device_name, unsigned nr_instances,
668 char *edac_block_name, unsigned nr_blocks, 668 char *edac_block_name, unsigned nr_blocks,
669 unsigned offset_value, 669 unsigned offset_value,
670 struct edac_dev_sysfs_block_attribute *block_attributes, 670 struct edac_dev_sysfs_block_attribute *block_attributes,
671 unsigned nr_attribs, 671 unsigned nr_attribs,
672 int device_index); 672 int device_index);
673 673
674 /* The offset value can be: 674 /* The offset value can be:
675 * -1 indicating no offset value 675 * -1 indicating no offset value
676 * 0 for zero-based block numbers 676 * 0 for zero-based block numbers
677 * 1 for 1-based block number 677 * 1 for 1-based block number
678 * other for other-based block number 678 * other for other-based block number
679 */ 679 */
680 #define BLOCK_OFFSET_VALUE_OFF ((unsigned) -1) 680 #define BLOCK_OFFSET_VALUE_OFF ((unsigned) -1)
681 681
682 extern void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info); 682 extern void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info);
683 683
684 #ifdef CONFIG_PCI 684 #ifdef CONFIG_PCI
685 685
686 struct edac_pci_counter { 686 struct edac_pci_counter {
687 atomic_t pe_count; 687 atomic_t pe_count;
688 atomic_t npe_count; 688 atomic_t npe_count;
689 }; 689 };
690 690
691 /* 691 /*
692 * Abstract edac_pci control info structure 692 * Abstract edac_pci control info structure
693 * 693 *
694 */ 694 */
695 struct edac_pci_ctl_info { 695 struct edac_pci_ctl_info {
696 /* for global list of edac_pci_ctl_info structs */ 696 /* for global list of edac_pci_ctl_info structs */
697 struct list_head link; 697 struct list_head link;
698 698
699 int pci_idx; 699 int pci_idx;
700 700
701 struct sysdev_class *edac_class; /* pointer to class */ 701 struct sysdev_class *edac_class; /* pointer to class */
702 702
703 /* the internal state of this controller instance */ 703 /* the internal state of this controller instance */
704 int op_state; 704 int op_state;
705 /* work struct for this instance */ 705 /* work struct for this instance */
706 struct delayed_work work; 706 struct delayed_work work;
707 707
708 /* pointer to edac polling checking routine: 708 /* pointer to edac polling checking routine:
709 * If NOT NULL: points to polling check routine 709 * If NOT NULL: points to polling check routine
710 * If NULL: Then assumes INTERRUPT operation, where 710 * If NULL: Then assumes INTERRUPT operation, where
711 * MC driver will receive events 711 * MC driver will receive events
712 */ 712 */
713 void (*edac_check) (struct edac_pci_ctl_info * edac_dev); 713 void (*edac_check) (struct edac_pci_ctl_info * edac_dev);
714 714
715 struct device *dev; /* pointer to device structure */ 715 struct device *dev; /* pointer to device structure */
716 716
717 const char *mod_name; /* module name */ 717 const char *mod_name; /* module name */
718 const char *ctl_name; /* edac controller name */ 718 const char *ctl_name; /* edac controller name */
719 const char *dev_name; /* pci/platform/etc... name */ 719 const char *dev_name; /* pci/platform/etc... name */
720 720
721 void *pvt_info; /* pointer to 'private driver' info */ 721 void *pvt_info; /* pointer to 'private driver' info */
722 722
723 unsigned long start_time; /* edac_pci load start time (jiffies) */ 723 unsigned long start_time; /* edac_pci load start time (jiffies) */
724 724
725 /* these are for safe removal of devices from global list while 725 /* these are for safe removal of devices from global list while
726 * NMI handlers may be traversing list 726 * NMI handlers may be traversing list
727 */ 727 */
728 struct rcu_head rcu; 728 struct rcu_head rcu;
729 struct completion complete; 729 struct completion complete;
730 730
731 /* sysfs top name under 'edac' directory 731 /* sysfs top name under 'edac' directory
732 * and instance name: 732 * and instance name:
733 * cpu/cpu0/... 733 * cpu/cpu0/...
734 * cpu/cpu1/... 734 * cpu/cpu1/...
735 * cpu/cpu2/... 735 * cpu/cpu2/...
736 * ... 736 * ...
737 */ 737 */
738 char name[EDAC_DEVICE_NAME_LEN + 1]; 738 char name[EDAC_DEVICE_NAME_LEN + 1];
739 739
740 /* Event counters for the this whole EDAC Device */ 740 /* Event counters for the this whole EDAC Device */
741 struct edac_pci_counter counters; 741 struct edac_pci_counter counters;
742 742
743 /* edac sysfs device control for the 'name' 743 /* edac sysfs device control for the 'name'
744 * device this structure controls 744 * device this structure controls
745 */ 745 */
746 struct kobject kobj; 746 struct kobject kobj;
747 struct completion kobj_complete; 747 struct completion kobj_complete;
748 }; 748 };
749 749
750 #define to_edac_pci_ctl_work(w) \ 750 #define to_edac_pci_ctl_work(w) \
751 container_of(w, struct edac_pci_ctl_info,work) 751 container_of(w, struct edac_pci_ctl_info,work)
752 752
753 /* write all or some bits in a byte-register*/ 753 /* write all or some bits in a byte-register*/
754 static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value, 754 static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value,
755 u8 mask) 755 u8 mask)
756 { 756 {
757 if (mask != 0xff) { 757 if (mask != 0xff) {
758 u8 buf; 758 u8 buf;
759 759
760 pci_read_config_byte(pdev, offset, &buf); 760 pci_read_config_byte(pdev, offset, &buf);
761 value &= mask; 761 value &= mask;
762 buf &= ~mask; 762 buf &= ~mask;
763 value |= buf; 763 value |= buf;
764 } 764 }
765 765
766 pci_write_config_byte(pdev, offset, value); 766 pci_write_config_byte(pdev, offset, value);
767 } 767 }
768 768
769 /* write all or some bits in a word-register*/ 769 /* write all or some bits in a word-register*/
770 static inline void pci_write_bits16(struct pci_dev *pdev, int offset, 770 static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
771 u16 value, u16 mask) 771 u16 value, u16 mask)
772 { 772 {
773 if (mask != 0xffff) { 773 if (mask != 0xffff) {
774 u16 buf; 774 u16 buf;
775 775
776 pci_read_config_word(pdev, offset, &buf); 776 pci_read_config_word(pdev, offset, &buf);
777 value &= mask; 777 value &= mask;
778 buf &= ~mask; 778 buf &= ~mask;
779 value |= buf; 779 value |= buf;
780 } 780 }
781 781
782 pci_write_config_word(pdev, offset, value); 782 pci_write_config_word(pdev, offset, value);
783 } 783 }
784 784
785 /* 785 /*
786 * pci_write_bits32 786 * pci_write_bits32
787 * 787 *
788 * edac local routine to do pci_write_config_dword, but adds 788 * edac local routine to do pci_write_config_dword, but adds
789 * a mask parameter. If mask is all ones, ignore the mask. 789 * a mask parameter. If mask is all ones, ignore the mask.
790 * Otherwise utilize the mask to isolate specified bits 790 * Otherwise utilize the mask to isolate specified bits
791 * 791 *
792 * write all or some bits in a dword-register 792 * write all or some bits in a dword-register
793 */ 793 */
794 static inline void pci_write_bits32(struct pci_dev *pdev, int offset, 794 static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
795 u32 value, u32 mask) 795 u32 value, u32 mask)
796 { 796 {
797 if (mask != 0xffffffff) { 797 if (mask != 0xffffffff) {
798 u32 buf; 798 u32 buf;
799 799
800 pci_read_config_dword(pdev, offset, &buf); 800 pci_read_config_dword(pdev, offset, &buf);
801 value &= mask; 801 value &= mask;
802 buf &= ~mask; 802 buf &= ~mask;
803 value |= buf; 803 value |= buf;
804 } 804 }
805 805
806 pci_write_config_dword(pdev, offset, value); 806 pci_write_config_dword(pdev, offset, value);
807 } 807 }
808 808
809 #endif /* CONFIG_PCI */ 809 #endif /* CONFIG_PCI */
810 810
811 extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, 811 extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
812 unsigned nr_chans, int edac_index); 812 unsigned nr_chans, int edac_index);
813 extern int edac_mc_add_mc(struct mem_ctl_info *mci); 813 extern int edac_mc_add_mc(struct mem_ctl_info *mci);
814 extern void edac_mc_free(struct mem_ctl_info *mci); 814 extern void edac_mc_free(struct mem_ctl_info *mci);
815 extern struct mem_ctl_info *edac_mc_find(int idx); 815 extern struct mem_ctl_info *edac_mc_find(int idx);
816 extern struct mem_ctl_info *find_mci_by_dev(struct device *dev); 816 extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
817 extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev); 817 extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
818 extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, 818 extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
819 unsigned long page); 819 unsigned long page);
820 820
821 /* 821 /*
822 * The no info errors are used when error overflows are reported. 822 * The no info errors are used when error overflows are reported.
823 * There are a limited number of error logging registers that can 823 * There are a limited number of error logging registers that can
824 * be exausted. When all registers are exhausted and an additional 824 * be exausted. When all registers are exhausted and an additional
825 * error occurs then an error overflow register records that an 825 * error occurs then an error overflow register records that an
826 * error occured and the type of error, but doesn't have any 826 * error occured and the type of error, but doesn't have any
827 * further information. The ce/ue versions make for cleaner 827 * further information. The ce/ue versions make for cleaner
828 * reporting logic and function interface - reduces conditional 828 * reporting logic and function interface - reduces conditional
829 * statement clutter and extra function arguments. 829 * statement clutter and extra function arguments.
830 */ 830 */
831 extern void edac_mc_handle_ce(struct mem_ctl_info *mci, 831 extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
832 unsigned long page_frame_number, 832 unsigned long page_frame_number,
833 unsigned long offset_in_page, 833 unsigned long offset_in_page,
834 unsigned long syndrome, int row, int channel, 834 unsigned long syndrome, int row, int channel,
835 const char *msg); 835 const char *msg);
836 extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, 836 extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
837 const char *msg); 837 const char *msg);
838 extern void edac_mc_handle_ue(struct mem_ctl_info *mci, 838 extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
839 unsigned long page_frame_number, 839 unsigned long page_frame_number,
840 unsigned long offset_in_page, int row, 840 unsigned long offset_in_page, int row,
841 const char *msg); 841 const char *msg);
842 extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, 842 extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
843 const char *msg); 843 const char *msg);
844 extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, unsigned int csrow, 844 extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, unsigned int csrow,
845 unsigned int channel0, unsigned int channel1, 845 unsigned int channel0, unsigned int channel1,
846 char *msg); 846 char *msg);
847 extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, unsigned int csrow, 847 extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, unsigned int csrow,
848 unsigned int channel, char *msg); 848 unsigned int channel, char *msg);
849 849
850 /* 850 /*
851 * edac_device APIs 851 * edac_device APIs
852 */ 852 */
853 extern int edac_device_add_device(struct edac_device_ctl_info *edac_dev); 853 extern int edac_device_add_device(struct edac_device_ctl_info *edac_dev);
854 extern struct edac_device_ctl_info *edac_device_del_device(struct device *dev); 854 extern struct edac_device_ctl_info *edac_device_del_device(struct device *dev);
855 extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev, 855 extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
856 int inst_nr, int block_nr, const char *msg); 856 int inst_nr, int block_nr, const char *msg);
857 extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev, 857 extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
858 int inst_nr, int block_nr, const char *msg); 858 int inst_nr, int block_nr, const char *msg);
859 extern int edac_device_alloc_index(void); 859 extern int edac_device_alloc_index(void);
860 860
861 /* 861 /*
862 * edac_pci APIs 862 * edac_pci APIs
863 */ 863 */
864 extern struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt, 864 extern struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
865 const char *edac_pci_name); 865 const char *edac_pci_name);
866 866
867 extern void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci); 867 extern void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci);
868 868
869 extern void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci, 869 extern void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
870 unsigned long value); 870 unsigned long value);
871 871
872 extern int edac_pci_alloc_index(void); 872 extern int edac_pci_alloc_index(void);
873 extern int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx); 873 extern int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx);
874 extern struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev); 874 extern struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev);
875 875
876 extern struct edac_pci_ctl_info *edac_pci_create_generic_ctl( 876 extern struct edac_pci_ctl_info *edac_pci_create_generic_ctl(
877 struct device *dev, 877 struct device *dev,
878 const char *mod_name); 878 const char *mod_name);
879 879
880 extern void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci); 880 extern void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci);
881 extern int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci); 881 extern int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci);
882 extern void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci); 882 extern void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci);
883 883
884 /* 884 /*
885 * edac misc APIs 885 * edac misc APIs
886 */ 886 */
887 extern char *edac_op_state_to_string(int op_state); 887 extern char *edac_op_state_to_string(int op_state);
888 888
889 #endif /* _EDAC_CORE_H_ */ 889 #endif /* _EDAC_CORE_H_ */
890 890
drivers/edac/edac_mc_sysfs.c
1 /* 1 /*
2 * edac_mc kernel module 2 * edac_mc kernel module
3 * (C) 2005-2007 Linux Networx (http://lnxi.com) 3 * (C) 2005-2007 Linux Networx (http://lnxi.com)
4 * 4 *
5 * This file may be distributed under the terms of the 5 * This file may be distributed under the terms of the
6 * GNU General Public License. 6 * GNU General Public License.
7 * 7 *
8 * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com 8 * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
9 * 9 *
10 */ 10 */
11 11
12 #include <linux/ctype.h> 12 #include <linux/ctype.h>
13 #include <linux/slab.h> 13 #include <linux/slab.h>
14 #include <linux/edac.h> 14 #include <linux/edac.h>
15 #include <linux/bug.h> 15 #include <linux/bug.h>
16 16
17 #include "edac_core.h" 17 #include "edac_core.h"
18 #include "edac_module.h" 18 #include "edac_module.h"
19 19
20 20
21 /* MC EDAC Controls, setable by module parameter, and sysfs */ 21 /* MC EDAC Controls, setable by module parameter, and sysfs */
22 static int edac_mc_log_ue = 1; 22 static int edac_mc_log_ue = 1;
23 static int edac_mc_log_ce = 1; 23 static int edac_mc_log_ce = 1;
24 static int edac_mc_panic_on_ue; 24 static int edac_mc_panic_on_ue;
25 static int edac_mc_poll_msec = 1000; 25 static int edac_mc_poll_msec = 1000;
26 26
27 /* Getter functions for above */ 27 /* Getter functions for above */
28 int edac_mc_get_log_ue(void) 28 int edac_mc_get_log_ue(void)
29 { 29 {
30 return edac_mc_log_ue; 30 return edac_mc_log_ue;
31 } 31 }
32 32
33 int edac_mc_get_log_ce(void) 33 int edac_mc_get_log_ce(void)
34 { 34 {
35 return edac_mc_log_ce; 35 return edac_mc_log_ce;
36 } 36 }
37 37
38 int edac_mc_get_panic_on_ue(void) 38 int edac_mc_get_panic_on_ue(void)
39 { 39 {
40 return edac_mc_panic_on_ue; 40 return edac_mc_panic_on_ue;
41 } 41 }
42 42
43 /* this is temporary */ 43 /* this is temporary */
44 int edac_mc_get_poll_msec(void) 44 int edac_mc_get_poll_msec(void)
45 { 45 {
46 return edac_mc_poll_msec; 46 return edac_mc_poll_msec;
47 } 47 }
48 48
49 static int edac_set_poll_msec(const char *val, struct kernel_param *kp) 49 static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
50 { 50 {
51 long l; 51 long l;
52 int ret; 52 int ret;
53 53
54 if (!val) 54 if (!val)
55 return -EINVAL; 55 return -EINVAL;
56 56
57 ret = strict_strtol(val, 0, &l); 57 ret = strict_strtol(val, 0, &l);
58 if (ret == -EINVAL || ((int)l != l)) 58 if (ret == -EINVAL || ((int)l != l))
59 return -EINVAL; 59 return -EINVAL;
60 *((int *)kp->arg) = l; 60 *((int *)kp->arg) = l;
61 61
62 /* notify edac_mc engine to reset the poll period */ 62 /* notify edac_mc engine to reset the poll period */
63 edac_mc_reset_delay_period(l); 63 edac_mc_reset_delay_period(l);
64 64
65 return 0; 65 return 0;
66 } 66 }
67 67
68 /* Parameter declarations for above */ 68 /* Parameter declarations for above */
69 module_param(edac_mc_panic_on_ue, int, 0644); 69 module_param(edac_mc_panic_on_ue, int, 0644);
70 MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on"); 70 MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
71 module_param(edac_mc_log_ue, int, 0644); 71 module_param(edac_mc_log_ue, int, 0644);
72 MODULE_PARM_DESC(edac_mc_log_ue, 72 MODULE_PARM_DESC(edac_mc_log_ue,
73 "Log uncorrectable error to console: 0=off 1=on"); 73 "Log uncorrectable error to console: 0=off 1=on");
74 module_param(edac_mc_log_ce, int, 0644); 74 module_param(edac_mc_log_ce, int, 0644);
75 MODULE_PARM_DESC(edac_mc_log_ce, 75 MODULE_PARM_DESC(edac_mc_log_ce,
76 "Log correctable error to console: 0=off 1=on"); 76 "Log correctable error to console: 0=off 1=on");
77 module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int, 77 module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
78 &edac_mc_poll_msec, 0644); 78 &edac_mc_poll_msec, 0644);
79 MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds"); 79 MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
80 80
81 /* 81 /*
82 * various constants for Memory Controllers 82 * various constants for Memory Controllers
83 */ 83 */
84 static const char *mem_types[] = { 84 static const char *mem_types[] = {
85 [MEM_EMPTY] = "Empty", 85 [MEM_EMPTY] = "Empty",
86 [MEM_RESERVED] = "Reserved", 86 [MEM_RESERVED] = "Reserved",
87 [MEM_UNKNOWN] = "Unknown", 87 [MEM_UNKNOWN] = "Unknown",
88 [MEM_FPM] = "FPM", 88 [MEM_FPM] = "FPM",
89 [MEM_EDO] = "EDO", 89 [MEM_EDO] = "EDO",
90 [MEM_BEDO] = "BEDO", 90 [MEM_BEDO] = "BEDO",
91 [MEM_SDR] = "Unbuffered-SDR", 91 [MEM_SDR] = "Unbuffered-SDR",
92 [MEM_RDR] = "Registered-SDR", 92 [MEM_RDR] = "Registered-SDR",
93 [MEM_DDR] = "Unbuffered-DDR", 93 [MEM_DDR] = "Unbuffered-DDR",
94 [MEM_RDDR] = "Registered-DDR", 94 [MEM_RDDR] = "Registered-DDR",
95 [MEM_RMBS] = "RMBS", 95 [MEM_RMBS] = "RMBS",
96 [MEM_DDR2] = "Unbuffered-DDR2", 96 [MEM_DDR2] = "Unbuffered-DDR2",
97 [MEM_FB_DDR2] = "FullyBuffered-DDR2", 97 [MEM_FB_DDR2] = "FullyBuffered-DDR2",
98 [MEM_RDDR2] = "Registered-DDR2", 98 [MEM_RDDR2] = "Registered-DDR2",
99 [MEM_XDR] = "XDR", 99 [MEM_XDR] = "XDR",
100 [MEM_DDR3] = "Unbuffered-DDR3", 100 [MEM_DDR3] = "Unbuffered-DDR3",
101 [MEM_RDDR3] = "Registered-DDR3" 101 [MEM_RDDR3] = "Registered-DDR3"
102 }; 102 };
103 103
104 static const char *dev_types[] = { 104 static const char *dev_types[] = {
105 [DEV_UNKNOWN] = "Unknown", 105 [DEV_UNKNOWN] = "Unknown",
106 [DEV_X1] = "x1", 106 [DEV_X1] = "x1",
107 [DEV_X2] = "x2", 107 [DEV_X2] = "x2",
108 [DEV_X4] = "x4", 108 [DEV_X4] = "x4",
109 [DEV_X8] = "x8", 109 [DEV_X8] = "x8",
110 [DEV_X16] = "x16", 110 [DEV_X16] = "x16",
111 [DEV_X32] = "x32", 111 [DEV_X32] = "x32",
112 [DEV_X64] = "x64" 112 [DEV_X64] = "x64"
113 }; 113 };
114 114
115 static const char *edac_caps[] = { 115 static const char *edac_caps[] = {
116 [EDAC_UNKNOWN] = "Unknown", 116 [EDAC_UNKNOWN] = "Unknown",
117 [EDAC_NONE] = "None", 117 [EDAC_NONE] = "None",
118 [EDAC_RESERVED] = "Reserved", 118 [EDAC_RESERVED] = "Reserved",
119 [EDAC_PARITY] = "PARITY", 119 [EDAC_PARITY] = "PARITY",
120 [EDAC_EC] = "EC", 120 [EDAC_EC] = "EC",
121 [EDAC_SECDED] = "SECDED", 121 [EDAC_SECDED] = "SECDED",
122 [EDAC_S2ECD2ED] = "S2ECD2ED", 122 [EDAC_S2ECD2ED] = "S2ECD2ED",
123 [EDAC_S4ECD4ED] = "S4ECD4ED", 123 [EDAC_S4ECD4ED] = "S4ECD4ED",
124 [EDAC_S8ECD8ED] = "S8ECD8ED", 124 [EDAC_S8ECD8ED] = "S8ECD8ED",
125 [EDAC_S16ECD16ED] = "S16ECD16ED" 125 [EDAC_S16ECD16ED] = "S16ECD16ED"
126 }; 126 };
127 127
128 /* EDAC sysfs CSROW data structures and methods 128 /* EDAC sysfs CSROW data structures and methods
129 */ 129 */
130 130
131 /* Set of more default csrow<id> attribute show/store functions */ 131 /* Set of more default csrow<id> attribute show/store functions */
132 static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data, 132 static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data,
133 int private) 133 int private)
134 { 134 {
135 return sprintf(data, "%u\n", csrow->ue_count); 135 return sprintf(data, "%u\n", csrow->ue_count);
136 } 136 }
137 137
138 static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data, 138 static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data,
139 int private) 139 int private)
140 { 140 {
141 return sprintf(data, "%u\n", csrow->ce_count); 141 return sprintf(data, "%u\n", csrow->ce_count);
142 } 142 }
143 143
144 static ssize_t csrow_size_show(struct csrow_info *csrow, char *data, 144 static ssize_t csrow_size_show(struct csrow_info *csrow, char *data,
145 int private) 145 int private)
146 { 146 {
147 return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages)); 147 return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));
148 } 148 }
149 149
150 static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data, 150 static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data,
151 int private) 151 int private)
152 { 152 {
153 return sprintf(data, "%s\n", mem_types[csrow->mtype]); 153 return sprintf(data, "%s\n", mem_types[csrow->mtype]);
154 } 154 }
155 155
156 static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data, 156 static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data,
157 int private) 157 int private)
158 { 158 {
159 return sprintf(data, "%s\n", dev_types[csrow->dtype]); 159 return sprintf(data, "%s\n", dev_types[csrow->dtype]);
160 } 160 }
161 161
162 static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data, 162 static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data,
163 int private) 163 int private)
164 { 164 {
165 return sprintf(data, "%s\n", edac_caps[csrow->edac_mode]); 165 return sprintf(data, "%s\n", edac_caps[csrow->edac_mode]);
166 } 166 }
167 167
168 /* show/store functions for DIMM Label attributes */ 168 /* show/store functions for DIMM Label attributes */
169 static ssize_t channel_dimm_label_show(struct csrow_info *csrow, 169 static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
170 char *data, int channel) 170 char *data, int channel)
171 { 171 {
172 /* if field has not been initialized, there is nothing to send */ 172 /* if field has not been initialized, there is nothing to send */
173 if (!csrow->channels[channel].label[0]) 173 if (!csrow->channels[channel].label[0])
174 return 0; 174 return 0;
175 175
176 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", 176 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
177 csrow->channels[channel].label); 177 csrow->channels[channel].label);
178 } 178 }
179 179
180 static ssize_t channel_dimm_label_store(struct csrow_info *csrow, 180 static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
181 const char *data, 181 const char *data,
182 size_t count, int channel) 182 size_t count, int channel)
183 { 183 {
184 ssize_t max_size = 0; 184 ssize_t max_size = 0;
185 185
186 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); 186 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
187 strncpy(csrow->channels[channel].label, data, max_size); 187 strncpy(csrow->channels[channel].label, data, max_size);
188 csrow->channels[channel].label[max_size] = '\0'; 188 csrow->channels[channel].label[max_size] = '\0';
189 189
190 return max_size; 190 return max_size;
191 } 191 }
192 192
193 /* show function for dynamic chX_ce_count attribute */ 193 /* show function for dynamic chX_ce_count attribute */
194 static ssize_t channel_ce_count_show(struct csrow_info *csrow, 194 static ssize_t channel_ce_count_show(struct csrow_info *csrow,
195 char *data, int channel) 195 char *data, int channel)
196 { 196 {
197 return sprintf(data, "%u\n", csrow->channels[channel].ce_count); 197 return sprintf(data, "%u\n", csrow->channels[channel].ce_count);
198 } 198 }
199 199
200 /* csrow specific attribute structure */ 200 /* csrow specific attribute structure */
201 struct csrowdev_attribute { 201 struct csrowdev_attribute {
202 struct attribute attr; 202 struct attribute attr;
203 ssize_t(*show) (struct csrow_info *, char *, int); 203 ssize_t(*show) (struct csrow_info *, char *, int);
204 ssize_t(*store) (struct csrow_info *, const char *, size_t, int); 204 ssize_t(*store) (struct csrow_info *, const char *, size_t, int);
205 int private; 205 int private;
206 }; 206 };
207 207
208 #define to_csrow(k) container_of(k, struct csrow_info, kobj) 208 #define to_csrow(k) container_of(k, struct csrow_info, kobj)
209 #define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr) 209 #define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr)
210 210
211 /* Set of show/store higher level functions for default csrow attributes */ 211 /* Set of show/store higher level functions for default csrow attributes */
212 static ssize_t csrowdev_show(struct kobject *kobj, 212 static ssize_t csrowdev_show(struct kobject *kobj,
213 struct attribute *attr, char *buffer) 213 struct attribute *attr, char *buffer)
214 { 214 {
215 struct csrow_info *csrow = to_csrow(kobj); 215 struct csrow_info *csrow = to_csrow(kobj);
216 struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr); 216 struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
217 217
218 if (csrowdev_attr->show) 218 if (csrowdev_attr->show)
219 return csrowdev_attr->show(csrow, 219 return csrowdev_attr->show(csrow,
220 buffer, csrowdev_attr->private); 220 buffer, csrowdev_attr->private);
221 return -EIO; 221 return -EIO;
222 } 222 }
223 223
224 static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr, 224 static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
225 const char *buffer, size_t count) 225 const char *buffer, size_t count)
226 { 226 {
227 struct csrow_info *csrow = to_csrow(kobj); 227 struct csrow_info *csrow = to_csrow(kobj);
228 struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr); 228 struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
229 229
230 if (csrowdev_attr->store) 230 if (csrowdev_attr->store)
231 return csrowdev_attr->store(csrow, 231 return csrowdev_attr->store(csrow,
232 buffer, 232 buffer,
233 count, csrowdev_attr->private); 233 count, csrowdev_attr->private);
234 return -EIO; 234 return -EIO;
235 } 235 }
236 236
237 static const struct sysfs_ops csrowfs_ops = { 237 static const struct sysfs_ops csrowfs_ops = {
238 .show = csrowdev_show, 238 .show = csrowdev_show,
239 .store = csrowdev_store 239 .store = csrowdev_store
240 }; 240 };
241 241
242 #define CSROWDEV_ATTR(_name,_mode,_show,_store,_private) \ 242 #define CSROWDEV_ATTR(_name,_mode,_show,_store,_private) \
243 static struct csrowdev_attribute attr_##_name = { \ 243 static struct csrowdev_attribute attr_##_name = { \
244 .attr = {.name = __stringify(_name), .mode = _mode }, \ 244 .attr = {.name = __stringify(_name), .mode = _mode }, \
245 .show = _show, \ 245 .show = _show, \
246 .store = _store, \ 246 .store = _store, \
247 .private = _private, \ 247 .private = _private, \
248 }; 248 };
249 249
250 /* default cwrow<id>/attribute files */ 250 /* default cwrow<id>/attribute files */
251 CSROWDEV_ATTR(size_mb, S_IRUGO, csrow_size_show, NULL, 0); 251 CSROWDEV_ATTR(size_mb, S_IRUGO, csrow_size_show, NULL, 0);
252 CSROWDEV_ATTR(dev_type, S_IRUGO, csrow_dev_type_show, NULL, 0); 252 CSROWDEV_ATTR(dev_type, S_IRUGO, csrow_dev_type_show, NULL, 0);
253 CSROWDEV_ATTR(mem_type, S_IRUGO, csrow_mem_type_show, NULL, 0); 253 CSROWDEV_ATTR(mem_type, S_IRUGO, csrow_mem_type_show, NULL, 0);
254 CSROWDEV_ATTR(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL, 0); 254 CSROWDEV_ATTR(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL, 0);
255 CSROWDEV_ATTR(ue_count, S_IRUGO, csrow_ue_count_show, NULL, 0); 255 CSROWDEV_ATTR(ue_count, S_IRUGO, csrow_ue_count_show, NULL, 0);
256 CSROWDEV_ATTR(ce_count, S_IRUGO, csrow_ce_count_show, NULL, 0); 256 CSROWDEV_ATTR(ce_count, S_IRUGO, csrow_ce_count_show, NULL, 0);
257 257
258 /* default attributes of the CSROW<id> object */ 258 /* default attributes of the CSROW<id> object */
259 static struct csrowdev_attribute *default_csrow_attr[] = { 259 static struct csrowdev_attribute *default_csrow_attr[] = {
260 &attr_dev_type, 260 &attr_dev_type,
261 &attr_mem_type, 261 &attr_mem_type,
262 &attr_edac_mode, 262 &attr_edac_mode,
263 &attr_size_mb, 263 &attr_size_mb,
264 &attr_ue_count, 264 &attr_ue_count,
265 &attr_ce_count, 265 &attr_ce_count,
266 NULL, 266 NULL,
267 }; 267 };
268 268
269 /* possible dynamic channel DIMM Label attribute files */ 269 /* possible dynamic channel DIMM Label attribute files */
270 CSROWDEV_ATTR(ch0_dimm_label, S_IRUGO | S_IWUSR, 270 CSROWDEV_ATTR(ch0_dimm_label, S_IRUGO | S_IWUSR,
271 channel_dimm_label_show, channel_dimm_label_store, 0); 271 channel_dimm_label_show, channel_dimm_label_store, 0);
272 CSROWDEV_ATTR(ch1_dimm_label, S_IRUGO | S_IWUSR, 272 CSROWDEV_ATTR(ch1_dimm_label, S_IRUGO | S_IWUSR,
273 channel_dimm_label_show, channel_dimm_label_store, 1); 273 channel_dimm_label_show, channel_dimm_label_store, 1);
274 CSROWDEV_ATTR(ch2_dimm_label, S_IRUGO | S_IWUSR, 274 CSROWDEV_ATTR(ch2_dimm_label, S_IRUGO | S_IWUSR,
275 channel_dimm_label_show, channel_dimm_label_store, 2); 275 channel_dimm_label_show, channel_dimm_label_store, 2);
276 CSROWDEV_ATTR(ch3_dimm_label, S_IRUGO | S_IWUSR, 276 CSROWDEV_ATTR(ch3_dimm_label, S_IRUGO | S_IWUSR,
277 channel_dimm_label_show, channel_dimm_label_store, 3); 277 channel_dimm_label_show, channel_dimm_label_store, 3);
278 CSROWDEV_ATTR(ch4_dimm_label, S_IRUGO | S_IWUSR, 278 CSROWDEV_ATTR(ch4_dimm_label, S_IRUGO | S_IWUSR,
279 channel_dimm_label_show, channel_dimm_label_store, 4); 279 channel_dimm_label_show, channel_dimm_label_store, 4);
280 CSROWDEV_ATTR(ch5_dimm_label, S_IRUGO | S_IWUSR, 280 CSROWDEV_ATTR(ch5_dimm_label, S_IRUGO | S_IWUSR,
281 channel_dimm_label_show, channel_dimm_label_store, 5); 281 channel_dimm_label_show, channel_dimm_label_store, 5);
282 282
283 /* Total possible dynamic DIMM Label attribute file table */ 283 /* Total possible dynamic DIMM Label attribute file table */
284 static struct csrowdev_attribute *dynamic_csrow_dimm_attr[] = { 284 static struct csrowdev_attribute *dynamic_csrow_dimm_attr[] = {
285 &attr_ch0_dimm_label, 285 &attr_ch0_dimm_label,
286 &attr_ch1_dimm_label, 286 &attr_ch1_dimm_label,
287 &attr_ch2_dimm_label, 287 &attr_ch2_dimm_label,
288 &attr_ch3_dimm_label, 288 &attr_ch3_dimm_label,
289 &attr_ch4_dimm_label, 289 &attr_ch4_dimm_label,
290 &attr_ch5_dimm_label 290 &attr_ch5_dimm_label
291 }; 291 };
292 292
293 /* possible dynamic channel ce_count attribute files */ 293 /* possible dynamic channel ce_count attribute files */
294 CSROWDEV_ATTR(ch0_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 0); 294 CSROWDEV_ATTR(ch0_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 0);
295 CSROWDEV_ATTR(ch1_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 1); 295 CSROWDEV_ATTR(ch1_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 1);
296 CSROWDEV_ATTR(ch2_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 2); 296 CSROWDEV_ATTR(ch2_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 2);
297 CSROWDEV_ATTR(ch3_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 3); 297 CSROWDEV_ATTR(ch3_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 3);
298 CSROWDEV_ATTR(ch4_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 4); 298 CSROWDEV_ATTR(ch4_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 4);
299 CSROWDEV_ATTR(ch5_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 5); 299 CSROWDEV_ATTR(ch5_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 5);
300 300
301 /* Total possible dynamic ce_count attribute file table */ 301 /* Total possible dynamic ce_count attribute file table */
302 static struct csrowdev_attribute *dynamic_csrow_ce_count_attr[] = { 302 static struct csrowdev_attribute *dynamic_csrow_ce_count_attr[] = {
303 &attr_ch0_ce_count, 303 &attr_ch0_ce_count,
304 &attr_ch1_ce_count, 304 &attr_ch1_ce_count,
305 &attr_ch2_ce_count, 305 &attr_ch2_ce_count,
306 &attr_ch3_ce_count, 306 &attr_ch3_ce_count,
307 &attr_ch4_ce_count, 307 &attr_ch4_ce_count,
308 &attr_ch5_ce_count 308 &attr_ch5_ce_count
309 }; 309 };
310 310
311 #define EDAC_NR_CHANNELS 6 311 #define EDAC_NR_CHANNELS 6
312 312
313 /* Create dynamic CHANNEL files, indexed by 'chan', under specifed CSROW */ 313 /* Create dynamic CHANNEL files, indexed by 'chan', under specifed CSROW */
314 static int edac_create_channel_files(struct kobject *kobj, int chan) 314 static int edac_create_channel_files(struct kobject *kobj, int chan)
315 { 315 {
316 int err = -ENODEV; 316 int err = -ENODEV;
317 317
318 if (chan >= EDAC_NR_CHANNELS) 318 if (chan >= EDAC_NR_CHANNELS)
319 return err; 319 return err;
320 320
321 /* create the DIMM label attribute file */ 321 /* create the DIMM label attribute file */
322 err = sysfs_create_file(kobj, 322 err = sysfs_create_file(kobj,
323 (struct attribute *) 323 (struct attribute *)
324 dynamic_csrow_dimm_attr[chan]); 324 dynamic_csrow_dimm_attr[chan]);
325 325
326 if (!err) { 326 if (!err) {
327 /* create the CE Count attribute file */ 327 /* create the CE Count attribute file */
328 err = sysfs_create_file(kobj, 328 err = sysfs_create_file(kobj,
329 (struct attribute *) 329 (struct attribute *)
330 dynamic_csrow_ce_count_attr[chan]); 330 dynamic_csrow_ce_count_attr[chan]);
331 } else { 331 } else {
332 debugf1("%s() dimm labels and ce_count files created", 332 debugf1("%s() dimm labels and ce_count files created",
333 __func__); 333 __func__);
334 } 334 }
335 335
336 return err; 336 return err;
337 } 337 }
338 338
339 /* No memory to release for this kobj */ 339 /* No memory to release for this kobj */
340 static void edac_csrow_instance_release(struct kobject *kobj) 340 static void edac_csrow_instance_release(struct kobject *kobj)
341 { 341 {
342 struct mem_ctl_info *mci; 342 struct mem_ctl_info *mci;
343 struct csrow_info *cs; 343 struct csrow_info *cs;
344 344
345 debugf1("%s()\n", __func__); 345 debugf1("%s()\n", __func__);
346 346
347 cs = container_of(kobj, struct csrow_info, kobj); 347 cs = container_of(kobj, struct csrow_info, kobj);
348 mci = cs->mci; 348 mci = cs->mci;
349 349
350 kobject_put(&mci->edac_mci_kobj); 350 kobject_put(&mci->edac_mci_kobj);
351 } 351 }
352 352
353 /* the kobj_type instance for a CSROW */ 353 /* the kobj_type instance for a CSROW */
354 static struct kobj_type ktype_csrow = { 354 static struct kobj_type ktype_csrow = {
355 .release = edac_csrow_instance_release, 355 .release = edac_csrow_instance_release,
356 .sysfs_ops = &csrowfs_ops, 356 .sysfs_ops = &csrowfs_ops,
357 .default_attrs = (struct attribute **)default_csrow_attr, 357 .default_attrs = (struct attribute **)default_csrow_attr,
358 }; 358 };
359 359
360 /* Create a CSROW object under specifed edac_mc_device */ 360 /* Create a CSROW object under specifed edac_mc_device */
361 static int edac_create_csrow_object(struct mem_ctl_info *mci, 361 static int edac_create_csrow_object(struct mem_ctl_info *mci,
362 struct csrow_info *csrow, int index) 362 struct csrow_info *csrow, int index)
363 { 363 {
364 struct kobject *kobj_mci = &mci->edac_mci_kobj; 364 struct kobject *kobj_mci = &mci->edac_mci_kobj;
365 struct kobject *kobj; 365 struct kobject *kobj;
366 int chan; 366 int chan;
367 int err; 367 int err;
368 368
369 /* generate ..../edac/mc/mc<id>/csrow<index> */ 369 /* generate ..../edac/mc/mc<id>/csrow<index> */
370 memset(&csrow->kobj, 0, sizeof(csrow->kobj)); 370 memset(&csrow->kobj, 0, sizeof(csrow->kobj));
371 csrow->mci = mci; /* include container up link */ 371 csrow->mci = mci; /* include container up link */
372 372
373 /* bump the mci instance's kobject's ref count */ 373 /* bump the mci instance's kobject's ref count */
374 kobj = kobject_get(&mci->edac_mci_kobj); 374 kobj = kobject_get(&mci->edac_mci_kobj);
375 if (!kobj) { 375 if (!kobj) {
376 err = -ENODEV; 376 err = -ENODEV;
377 goto err_out; 377 goto err_out;
378 } 378 }
379 379
380 /* Instanstiate the csrow object */ 380 /* Instanstiate the csrow object */
381 err = kobject_init_and_add(&csrow->kobj, &ktype_csrow, kobj_mci, 381 err = kobject_init_and_add(&csrow->kobj, &ktype_csrow, kobj_mci,
382 "csrow%d", index); 382 "csrow%d", index);
383 if (err) 383 if (err)
384 goto err_release_top_kobj; 384 goto err_release_top_kobj;
385 385
386 /* At this point, to release a csrow kobj, one must 386 /* At this point, to release a csrow kobj, one must
387 * call the kobject_put and allow that tear down 387 * call the kobject_put and allow that tear down
388 * to work the releasing 388 * to work the releasing
389 */ 389 */
390 390
391 /* Create the dyanmic attribute files on this csrow, 391 /* Create the dyanmic attribute files on this csrow,
392 * namely, the DIMM labels and the channel ce_count 392 * namely, the DIMM labels and the channel ce_count
393 */ 393 */
394 for (chan = 0; chan < csrow->nr_channels; chan++) { 394 for (chan = 0; chan < csrow->nr_channels; chan++) {
395 err = edac_create_channel_files(&csrow->kobj, chan); 395 err = edac_create_channel_files(&csrow->kobj, chan);
396 if (err) { 396 if (err) {
397 /* special case the unregister here */ 397 /* special case the unregister here */
398 kobject_put(&csrow->kobj); 398 kobject_put(&csrow->kobj);
399 goto err_out; 399 goto err_out;
400 } 400 }
401 } 401 }
402 kobject_uevent(&csrow->kobj, KOBJ_ADD); 402 kobject_uevent(&csrow->kobj, KOBJ_ADD);
403 return 0; 403 return 0;
404 404
405 /* error unwind stack */ 405 /* error unwind stack */
406 err_release_top_kobj: 406 err_release_top_kobj:
407 kobject_put(&mci->edac_mci_kobj); 407 kobject_put(&mci->edac_mci_kobj);
408 408
409 err_out: 409 err_out:
410 return err; 410 return err;
411 } 411 }
412 412
413 /* default sysfs methods and data structures for the main MCI kobject */ 413 /* default sysfs methods and data structures for the main MCI kobject */
414 414
415 static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, 415 static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
416 const char *data, size_t count) 416 const char *data, size_t count)
417 { 417 {
418 int row, chan; 418 int row, chan;
419 419
420 mci->ue_noinfo_count = 0; 420 mci->ue_noinfo_count = 0;
421 mci->ce_noinfo_count = 0; 421 mci->ce_noinfo_count = 0;
422 mci->ue_count = 0; 422 mci->ue_count = 0;
423 mci->ce_count = 0; 423 mci->ce_count = 0;
424 424
425 for (row = 0; row < mci->nr_csrows; row++) { 425 for (row = 0; row < mci->nr_csrows; row++) {
426 struct csrow_info *ri = &mci->csrows[row]; 426 struct csrow_info *ri = &mci->csrows[row];
427 427
428 ri->ue_count = 0; 428 ri->ue_count = 0;
429 ri->ce_count = 0; 429 ri->ce_count = 0;
430 430
431 for (chan = 0; chan < ri->nr_channels; chan++) 431 for (chan = 0; chan < ri->nr_channels; chan++)
432 ri->channels[chan].ce_count = 0; 432 ri->channels[chan].ce_count = 0;
433 } 433 }
434 434
435 mci->start_time = jiffies; 435 mci->start_time = jiffies;
436 return count; 436 return count;
437 } 437 }
438 438
439 /* memory scrubbing */ 439 /* Memory scrubbing interface:
440 *
441 * A MC driver can limit the scrubbing bandwidth based on the CPU type.
442 * Therefore, ->set_sdram_scrub_rate should be made to return the actual
443 * bandwidth that is accepted or 0 when scrubbing is to be disabled.
444 *
445 * Negative value still means that an error has occurred while setting
446 * the scrub rate.
447 */
440 static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci, 448 static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
441 const char *data, size_t count) 449 const char *data, size_t count)
442 { 450 {
443 unsigned long bandwidth = 0; 451 unsigned long bandwidth = 0;
444 int err; 452 int new_bw = 0;
445 453
446 if (!mci->set_sdram_scrub_rate) { 454 if (!mci->set_sdram_scrub_rate)
447 edac_printk(KERN_WARNING, EDAC_MC,
448 "Memory scrub rate setting not implemented!\n");
449 return -EINVAL; 455 return -EINVAL;
450 }
451 456
452 if (strict_strtoul(data, 10, &bandwidth) < 0) 457 if (strict_strtoul(data, 10, &bandwidth) < 0)
453 return -EINVAL; 458 return -EINVAL;
454 459
455 err = mci->set_sdram_scrub_rate(mci, (u32)bandwidth); 460 new_bw = mci->set_sdram_scrub_rate(mci, bandwidth);
456 if (err) { 461 if (new_bw >= 0) {
457 edac_printk(KERN_DEBUG, EDAC_MC, 462 edac_printk(KERN_DEBUG, EDAC_MC, "Scrub rate set to %d\n", new_bw);
458 "Failed setting scrub rate to %lu\n", bandwidth);
459 return -EINVAL;
460 }
461 else {
462 edac_printk(KERN_DEBUG, EDAC_MC,
463 "Scrub rate set to: %lu\n", bandwidth);
464 return count; 463 return count;
465 } 464 }
465
466 edac_printk(KERN_DEBUG, EDAC_MC, "Error setting scrub rate to: %lu\n", bandwidth);
467 return -EINVAL;
466 } 468 }
467 469
470 /*
471 * ->get_sdram_scrub_rate() return value semantics same as above.
472 */
468 static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data) 473 static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
469 { 474 {
470 u32 bandwidth = 0; 475 int bandwidth = 0;
471 int err;
472 476
473 if (!mci->get_sdram_scrub_rate) { 477 if (!mci->get_sdram_scrub_rate)
474 edac_printk(KERN_WARNING, EDAC_MC,
475 "Memory scrub rate reading not implemented\n");
476 return -EINVAL; 478 return -EINVAL;
477 }
478 479
479 err = mci->get_sdram_scrub_rate(mci, &bandwidth); 480 bandwidth = mci->get_sdram_scrub_rate(mci);
480 if (err) { 481 if (bandwidth < 0) {
481 edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n"); 482 edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
482 return err; 483 return bandwidth;
483 } 484 }
484 else { 485
485 edac_printk(KERN_DEBUG, EDAC_MC, 486 edac_printk(KERN_DEBUG, EDAC_MC, "Read scrub rate: %d\n", bandwidth);
486 "Read scrub rate: %d\n", bandwidth); 487 return sprintf(data, "%d\n", bandwidth);
487 return sprintf(data, "%d\n", bandwidth);
488 }
489 } 488 }
490 489
491 /* default attribute files for the MCI object */ 490 /* default attribute files for the MCI object */
492 static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data) 491 static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
493 { 492 {
494 return sprintf(data, "%d\n", mci->ue_count); 493 return sprintf(data, "%d\n", mci->ue_count);
495 } 494 }
496 495
497 static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data) 496 static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
498 { 497 {
499 return sprintf(data, "%d\n", mci->ce_count); 498 return sprintf(data, "%d\n", mci->ce_count);
500 } 499 }
501 500
502 static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data) 501 static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
503 { 502 {
504 return sprintf(data, "%d\n", mci->ce_noinfo_count); 503 return sprintf(data, "%d\n", mci->ce_noinfo_count);
505 } 504 }
506 505
507 static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data) 506 static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data)
508 { 507 {
509 return sprintf(data, "%d\n", mci->ue_noinfo_count); 508 return sprintf(data, "%d\n", mci->ue_noinfo_count);
510 } 509 }
511 510
512 static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data) 511 static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data)
513 { 512 {
514 return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ); 513 return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
515 } 514 }
516 515
517 static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data) 516 static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
518 { 517 {
519 return sprintf(data, "%s\n", mci->ctl_name); 518 return sprintf(data, "%s\n", mci->ctl_name);
520 } 519 }
521 520
522 static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data) 521 static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
523 { 522 {
524 int total_pages, csrow_idx; 523 int total_pages, csrow_idx;
525 524
526 for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows; 525 for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
527 csrow_idx++) { 526 csrow_idx++) {
528 struct csrow_info *csrow = &mci->csrows[csrow_idx]; 527 struct csrow_info *csrow = &mci->csrows[csrow_idx];
529 528
530 if (!csrow->nr_pages) 529 if (!csrow->nr_pages)
531 continue; 530 continue;
532 531
533 total_pages += csrow->nr_pages; 532 total_pages += csrow->nr_pages;
534 } 533 }
535 534
536 return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages)); 535 return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
537 } 536 }
538 537
539 #define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj) 538 #define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj)
540 #define to_mcidev_attr(a) container_of(a,struct mcidev_sysfs_attribute,attr) 539 #define to_mcidev_attr(a) container_of(a,struct mcidev_sysfs_attribute,attr)
541 540
542 /* MCI show/store functions for top most object */ 541 /* MCI show/store functions for top most object */
543 static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr, 542 static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
544 char *buffer) 543 char *buffer)
545 { 544 {
546 struct mem_ctl_info *mem_ctl_info = to_mci(kobj); 545 struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
547 struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr); 546 struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
548 547
549 debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info); 548 debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
550 549
551 if (mcidev_attr->show) 550 if (mcidev_attr->show)
552 return mcidev_attr->show(mem_ctl_info, buffer); 551 return mcidev_attr->show(mem_ctl_info, buffer);
553 552
554 return -EIO; 553 return -EIO;
555 } 554 }
556 555
557 static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr, 556 static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
558 const char *buffer, size_t count) 557 const char *buffer, size_t count)
559 { 558 {
560 struct mem_ctl_info *mem_ctl_info = to_mci(kobj); 559 struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
561 struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr); 560 struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
562 561
563 debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info); 562 debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
564 563
565 if (mcidev_attr->store) 564 if (mcidev_attr->store)
566 return mcidev_attr->store(mem_ctl_info, buffer, count); 565 return mcidev_attr->store(mem_ctl_info, buffer, count);
567 566
568 return -EIO; 567 return -EIO;
569 } 568 }
570 569
571 /* Intermediate show/store table */ 570 /* Intermediate show/store table */
572 static const struct sysfs_ops mci_ops = { 571 static const struct sysfs_ops mci_ops = {
573 .show = mcidev_show, 572 .show = mcidev_show,
574 .store = mcidev_store 573 .store = mcidev_store
575 }; 574 };
576 575
577 #define MCIDEV_ATTR(_name,_mode,_show,_store) \ 576 #define MCIDEV_ATTR(_name,_mode,_show,_store) \
578 static struct mcidev_sysfs_attribute mci_attr_##_name = { \ 577 static struct mcidev_sysfs_attribute mci_attr_##_name = { \
579 .attr = {.name = __stringify(_name), .mode = _mode }, \ 578 .attr = {.name = __stringify(_name), .mode = _mode }, \
580 .show = _show, \ 579 .show = _show, \
581 .store = _store, \ 580 .store = _store, \
582 }; 581 };
583 582
584 /* default Control file */ 583 /* default Control file */
585 MCIDEV_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store); 584 MCIDEV_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
586 585
587 /* default Attribute files */ 586 /* default Attribute files */
588 MCIDEV_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL); 587 MCIDEV_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
589 MCIDEV_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL); 588 MCIDEV_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
590 MCIDEV_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL); 589 MCIDEV_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
591 MCIDEV_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL); 590 MCIDEV_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
592 MCIDEV_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL); 591 MCIDEV_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
593 MCIDEV_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL); 592 MCIDEV_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
594 MCIDEV_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL); 593 MCIDEV_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
595 594
596 /* memory scrubber attribute file */ 595 /* memory scrubber attribute file */
597 MCIDEV_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show, 596 MCIDEV_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show,
598 mci_sdram_scrub_rate_store); 597 mci_sdram_scrub_rate_store);
599 598
600 static struct mcidev_sysfs_attribute *mci_attr[] = { 599 static struct mcidev_sysfs_attribute *mci_attr[] = {
601 &mci_attr_reset_counters, 600 &mci_attr_reset_counters,
602 &mci_attr_mc_name, 601 &mci_attr_mc_name,
603 &mci_attr_size_mb, 602 &mci_attr_size_mb,
604 &mci_attr_seconds_since_reset, 603 &mci_attr_seconds_since_reset,
605 &mci_attr_ue_noinfo_count, 604 &mci_attr_ue_noinfo_count,
606 &mci_attr_ce_noinfo_count, 605 &mci_attr_ce_noinfo_count,
607 &mci_attr_ue_count, 606 &mci_attr_ue_count,
608 &mci_attr_ce_count, 607 &mci_attr_ce_count,
609 &mci_attr_sdram_scrub_rate, 608 &mci_attr_sdram_scrub_rate,
610 NULL 609 NULL
611 }; 610 };
612 611
613 612
614 /* 613 /*
615 * Release of a MC controlling instance 614 * Release of a MC controlling instance
616 * 615 *
617 * each MC control instance has the following resources upon entry: 616 * each MC control instance has the following resources upon entry:
618 * a) a ref count on the top memctl kobj 617 * a) a ref count on the top memctl kobj
619 * b) a ref count on this module 618 * b) a ref count on this module
620 * 619 *
621 * this function must decrement those ref counts and then 620 * this function must decrement those ref counts and then
622 * issue a free on the instance's memory 621 * issue a free on the instance's memory
623 */ 622 */
624 static void edac_mci_control_release(struct kobject *kobj) 623 static void edac_mci_control_release(struct kobject *kobj)
625 { 624 {
626 struct mem_ctl_info *mci; 625 struct mem_ctl_info *mci;
627 626
628 mci = to_mci(kobj); 627 mci = to_mci(kobj);
629 628
630 debugf0("%s() mci instance idx=%d releasing\n", __func__, mci->mc_idx); 629 debugf0("%s() mci instance idx=%d releasing\n", __func__, mci->mc_idx);
631 630
632 /* decrement the module ref count */ 631 /* decrement the module ref count */
633 module_put(mci->owner); 632 module_put(mci->owner);
634 } 633 }
635 634
636 static struct kobj_type ktype_mci = { 635 static struct kobj_type ktype_mci = {
637 .release = edac_mci_control_release, 636 .release = edac_mci_control_release,
638 .sysfs_ops = &mci_ops, 637 .sysfs_ops = &mci_ops,
639 .default_attrs = (struct attribute **)mci_attr, 638 .default_attrs = (struct attribute **)mci_attr,
640 }; 639 };
641 640
642 /* EDAC memory controller sysfs kset: 641 /* EDAC memory controller sysfs kset:
643 * /sys/devices/system/edac/mc 642 * /sys/devices/system/edac/mc
644 */ 643 */
645 static struct kset *mc_kset; 644 static struct kset *mc_kset;
646 645
647 /* 646 /*
648 * edac_mc_register_sysfs_main_kobj 647 * edac_mc_register_sysfs_main_kobj
649 * 648 *
650 * setups and registers the main kobject for each mci 649 * setups and registers the main kobject for each mci
651 */ 650 */
652 int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci) 651 int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci)
653 { 652 {
654 struct kobject *kobj_mci; 653 struct kobject *kobj_mci;
655 int err; 654 int err;
656 655
657 debugf1("%s()\n", __func__); 656 debugf1("%s()\n", __func__);
658 657
659 kobj_mci = &mci->edac_mci_kobj; 658 kobj_mci = &mci->edac_mci_kobj;
660 659
661 /* Init the mci's kobject */ 660 /* Init the mci's kobject */
662 memset(kobj_mci, 0, sizeof(*kobj_mci)); 661 memset(kobj_mci, 0, sizeof(*kobj_mci));
663 662
664 /* Record which module 'owns' this control structure 663 /* Record which module 'owns' this control structure
665 * and bump the ref count of the module 664 * and bump the ref count of the module
666 */ 665 */
667 mci->owner = THIS_MODULE; 666 mci->owner = THIS_MODULE;
668 667
669 /* bump ref count on this module */ 668 /* bump ref count on this module */
670 if (!try_module_get(mci->owner)) { 669 if (!try_module_get(mci->owner)) {
671 err = -ENODEV; 670 err = -ENODEV;
672 goto fail_out; 671 goto fail_out;
673 } 672 }
674 673
675 /* this instance become part of the mc_kset */ 674 /* this instance become part of the mc_kset */
676 kobj_mci->kset = mc_kset; 675 kobj_mci->kset = mc_kset;
677 676
678 /* register the mc<id> kobject to the mc_kset */ 677 /* register the mc<id> kobject to the mc_kset */
679 err = kobject_init_and_add(kobj_mci, &ktype_mci, NULL, 678 err = kobject_init_and_add(kobj_mci, &ktype_mci, NULL,
680 "mc%d", mci->mc_idx); 679 "mc%d", mci->mc_idx);
681 if (err) { 680 if (err) {
682 debugf1("%s()Failed to register '.../edac/mc%d'\n", 681 debugf1("%s()Failed to register '.../edac/mc%d'\n",
683 __func__, mci->mc_idx); 682 __func__, mci->mc_idx);
684 goto kobj_reg_fail; 683 goto kobj_reg_fail;
685 } 684 }
686 kobject_uevent(kobj_mci, KOBJ_ADD); 685 kobject_uevent(kobj_mci, KOBJ_ADD);
687 686
688 /* At this point, to 'free' the control struct, 687 /* At this point, to 'free' the control struct,
689 * edac_mc_unregister_sysfs_main_kobj() must be used 688 * edac_mc_unregister_sysfs_main_kobj() must be used
690 */ 689 */
691 690
692 debugf1("%s() Registered '.../edac/mc%d' kobject\n", 691 debugf1("%s() Registered '.../edac/mc%d' kobject\n",
693 __func__, mci->mc_idx); 692 __func__, mci->mc_idx);
694 693
695 return 0; 694 return 0;
696 695
697 /* Error exit stack */ 696 /* Error exit stack */
698 697
699 kobj_reg_fail: 698 kobj_reg_fail:
700 module_put(mci->owner); 699 module_put(mci->owner);
701 700
702 fail_out: 701 fail_out:
703 return err; 702 return err;
704 } 703 }
705 704
706 /* 705 /*
707 * edac_mc_register_sysfs_main_kobj 706 * edac_mc_register_sysfs_main_kobj
708 * 707 *
709 * tears down and the main mci kobject from the mc_kset 708 * tears down and the main mci kobject from the mc_kset
710 */ 709 */
711 void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci) 710 void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci)
712 { 711 {
713 debugf1("%s()\n", __func__); 712 debugf1("%s()\n", __func__);
714 713
715 /* delete the kobj from the mc_kset */ 714 /* delete the kobj from the mc_kset */
716 kobject_put(&mci->edac_mci_kobj); 715 kobject_put(&mci->edac_mci_kobj);
717 } 716 }
718 717
719 #define EDAC_DEVICE_SYMLINK "device" 718 #define EDAC_DEVICE_SYMLINK "device"
720 719
721 #define grp_to_mci(k) (container_of(k, struct mcidev_sysfs_group_kobj, kobj)->mci) 720 #define grp_to_mci(k) (container_of(k, struct mcidev_sysfs_group_kobj, kobj)->mci)
722 721
723 /* MCI show/store functions for top most object */ 722 /* MCI show/store functions for top most object */
724 static ssize_t inst_grp_show(struct kobject *kobj, struct attribute *attr, 723 static ssize_t inst_grp_show(struct kobject *kobj, struct attribute *attr,
725 char *buffer) 724 char *buffer)
726 { 725 {
727 struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj); 726 struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj);
728 struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr); 727 struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
729 728
730 debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info); 729 debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
731 730
732 if (mcidev_attr->show) 731 if (mcidev_attr->show)
733 return mcidev_attr->show(mem_ctl_info, buffer); 732 return mcidev_attr->show(mem_ctl_info, buffer);
734 733
735 return -EIO; 734 return -EIO;
736 } 735 }
737 736
738 static ssize_t inst_grp_store(struct kobject *kobj, struct attribute *attr, 737 static ssize_t inst_grp_store(struct kobject *kobj, struct attribute *attr,
739 const char *buffer, size_t count) 738 const char *buffer, size_t count)
740 { 739 {
741 struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj); 740 struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj);
742 struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr); 741 struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
743 742
744 debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info); 743 debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
745 744
746 if (mcidev_attr->store) 745 if (mcidev_attr->store)
747 return mcidev_attr->store(mem_ctl_info, buffer, count); 746 return mcidev_attr->store(mem_ctl_info, buffer, count);
748 747
749 return -EIO; 748 return -EIO;
750 } 749 }
751 750
752 /* No memory to release for this kobj */ 751 /* No memory to release for this kobj */
753 static void edac_inst_grp_release(struct kobject *kobj) 752 static void edac_inst_grp_release(struct kobject *kobj)
754 { 753 {
755 struct mcidev_sysfs_group_kobj *grp; 754 struct mcidev_sysfs_group_kobj *grp;
756 struct mem_ctl_info *mci; 755 struct mem_ctl_info *mci;
757 756
758 debugf1("%s()\n", __func__); 757 debugf1("%s()\n", __func__);
759 758
760 grp = container_of(kobj, struct mcidev_sysfs_group_kobj, kobj); 759 grp = container_of(kobj, struct mcidev_sysfs_group_kobj, kobj);
761 mci = grp->mci; 760 mci = grp->mci;
762 } 761 }
763 762
764 /* Intermediate show/store table */ 763 /* Intermediate show/store table */
765 static struct sysfs_ops inst_grp_ops = { 764 static struct sysfs_ops inst_grp_ops = {
766 .show = inst_grp_show, 765 .show = inst_grp_show,
767 .store = inst_grp_store 766 .store = inst_grp_store
768 }; 767 };
769 768
770 /* the kobj_type instance for a instance group */ 769 /* the kobj_type instance for a instance group */
771 static struct kobj_type ktype_inst_grp = { 770 static struct kobj_type ktype_inst_grp = {
772 .release = edac_inst_grp_release, 771 .release = edac_inst_grp_release,
773 .sysfs_ops = &inst_grp_ops, 772 .sysfs_ops = &inst_grp_ops,
774 }; 773 };
775 774
776 775
777 /* 776 /*
778 * edac_create_mci_instance_attributes 777 * edac_create_mci_instance_attributes
779 * create MC driver specific attributes bellow an specified kobj 778 * create MC driver specific attributes bellow an specified kobj
780 * This routine calls itself recursively, in order to create an entire 779 * This routine calls itself recursively, in order to create an entire
781 * object tree. 780 * object tree.
782 */ 781 */
783 static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci, 782 static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
784 const struct mcidev_sysfs_attribute *sysfs_attrib, 783 const struct mcidev_sysfs_attribute *sysfs_attrib,
785 struct kobject *kobj) 784 struct kobject *kobj)
786 { 785 {
787 int err; 786 int err;
788 787
789 debugf1("%s()\n", __func__); 788 debugf1("%s()\n", __func__);
790 789
791 while (sysfs_attrib) { 790 while (sysfs_attrib) {
792 debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib); 791 debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
793 if (sysfs_attrib->grp) { 792 if (sysfs_attrib->grp) {
794 struct mcidev_sysfs_group_kobj *grp_kobj; 793 struct mcidev_sysfs_group_kobj *grp_kobj;
795 794
796 grp_kobj = kzalloc(sizeof(*grp_kobj), GFP_KERNEL); 795 grp_kobj = kzalloc(sizeof(*grp_kobj), GFP_KERNEL);
797 if (!grp_kobj) 796 if (!grp_kobj)
798 return -ENOMEM; 797 return -ENOMEM;
799 798
800 grp_kobj->grp = sysfs_attrib->grp; 799 grp_kobj->grp = sysfs_attrib->grp;
801 grp_kobj->mci = mci; 800 grp_kobj->mci = mci;
802 list_add_tail(&grp_kobj->list, &mci->grp_kobj_list); 801 list_add_tail(&grp_kobj->list, &mci->grp_kobj_list);
803 802
804 debugf0("%s() grp %s, mci %p\n", __func__, 803 debugf0("%s() grp %s, mci %p\n", __func__,
805 sysfs_attrib->grp->name, mci); 804 sysfs_attrib->grp->name, mci);
806 805
807 err = kobject_init_and_add(&grp_kobj->kobj, 806 err = kobject_init_and_add(&grp_kobj->kobj,
808 &ktype_inst_grp, 807 &ktype_inst_grp,
809 &mci->edac_mci_kobj, 808 &mci->edac_mci_kobj,
810 sysfs_attrib->grp->name); 809 sysfs_attrib->grp->name);
811 if (err < 0) { 810 if (err < 0) {
812 printk(KERN_ERR "kobject_init_and_add failed: %d\n", err); 811 printk(KERN_ERR "kobject_init_and_add failed: %d\n", err);
813 return err; 812 return err;
814 } 813 }
815 err = edac_create_mci_instance_attributes(mci, 814 err = edac_create_mci_instance_attributes(mci,
816 grp_kobj->grp->mcidev_attr, 815 grp_kobj->grp->mcidev_attr,
817 &grp_kobj->kobj); 816 &grp_kobj->kobj);
818 817
819 if (err < 0) 818 if (err < 0)
820 return err; 819 return err;
821 } else if (sysfs_attrib->attr.name) { 820 } else if (sysfs_attrib->attr.name) {
822 debugf0("%s() file %s\n", __func__, 821 debugf0("%s() file %s\n", __func__,
823 sysfs_attrib->attr.name); 822 sysfs_attrib->attr.name);
824 823
825 err = sysfs_create_file(kobj, &sysfs_attrib->attr); 824 err = sysfs_create_file(kobj, &sysfs_attrib->attr);
826 if (err < 0) { 825 if (err < 0) {
827 printk(KERN_ERR "sysfs_create_file failed: %d\n", err); 826 printk(KERN_ERR "sysfs_create_file failed: %d\n", err);
828 return err; 827 return err;
829 } 828 }
830 } else 829 } else
831 break; 830 break;
832 831
833 sysfs_attrib++; 832 sysfs_attrib++;
834 } 833 }
835 834
836 return 0; 835 return 0;
837 } 836 }
838 837
839 /* 838 /*
840 * edac_remove_mci_instance_attributes 839 * edac_remove_mci_instance_attributes
841 * remove MC driver specific attributes at the topmost level 840 * remove MC driver specific attributes at the topmost level
842 * directory of this mci instance. 841 * directory of this mci instance.
843 */ 842 */
844 static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci, 843 static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
845 const struct mcidev_sysfs_attribute *sysfs_attrib, 844 const struct mcidev_sysfs_attribute *sysfs_attrib,
846 struct kobject *kobj, int count) 845 struct kobject *kobj, int count)
847 { 846 {
848 struct mcidev_sysfs_group_kobj *grp_kobj, *tmp; 847 struct mcidev_sysfs_group_kobj *grp_kobj, *tmp;
849 848
850 debugf1("%s()\n", __func__); 849 debugf1("%s()\n", __func__);
851 850
852 /* 851 /*
853 * loop if there are attributes and until we hit a NULL entry 852 * loop if there are attributes and until we hit a NULL entry
854 * Remove first all the atributes 853 * Remove first all the atributes
855 */ 854 */
856 while (sysfs_attrib) { 855 while (sysfs_attrib) {
857 debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib); 856 debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
858 if (sysfs_attrib->grp) { 857 if (sysfs_attrib->grp) {
859 debugf1("%s() seeking for group %s\n", 858 debugf1("%s() seeking for group %s\n",
860 __func__, sysfs_attrib->grp->name); 859 __func__, sysfs_attrib->grp->name);
861 list_for_each_entry(grp_kobj, 860 list_for_each_entry(grp_kobj,
862 &mci->grp_kobj_list, list) { 861 &mci->grp_kobj_list, list) {
863 debugf1("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp); 862 debugf1("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp);
864 if (grp_kobj->grp == sysfs_attrib->grp) { 863 if (grp_kobj->grp == sysfs_attrib->grp) {
865 edac_remove_mci_instance_attributes(mci, 864 edac_remove_mci_instance_attributes(mci,
866 grp_kobj->grp->mcidev_attr, 865 grp_kobj->grp->mcidev_attr,
867 &grp_kobj->kobj, count + 1); 866 &grp_kobj->kobj, count + 1);
868 debugf0("%s() group %s\n", __func__, 867 debugf0("%s() group %s\n", __func__,
869 sysfs_attrib->grp->name); 868 sysfs_attrib->grp->name);
870 kobject_put(&grp_kobj->kobj); 869 kobject_put(&grp_kobj->kobj);
871 } 870 }
872 } 871 }
873 debugf1("%s() end of seeking for group %s\n", 872 debugf1("%s() end of seeking for group %s\n",
874 __func__, sysfs_attrib->grp->name); 873 __func__, sysfs_attrib->grp->name);
875 } else if (sysfs_attrib->attr.name) { 874 } else if (sysfs_attrib->attr.name) {
876 debugf0("%s() file %s\n", __func__, 875 debugf0("%s() file %s\n", __func__,
877 sysfs_attrib->attr.name); 876 sysfs_attrib->attr.name);
878 sysfs_remove_file(kobj, &sysfs_attrib->attr); 877 sysfs_remove_file(kobj, &sysfs_attrib->attr);
879 } else 878 } else
880 break; 879 break;
881 sysfs_attrib++; 880 sysfs_attrib++;
882 } 881 }
883 882
884 /* Remove the group objects */ 883 /* Remove the group objects */
885 if (count) 884 if (count)
886 return; 885 return;
887 list_for_each_entry_safe(grp_kobj, tmp, 886 list_for_each_entry_safe(grp_kobj, tmp,
888 &mci->grp_kobj_list, list) { 887 &mci->grp_kobj_list, list) {
889 list_del(&grp_kobj->list); 888 list_del(&grp_kobj->list);
890 kfree(grp_kobj); 889 kfree(grp_kobj);
891 } 890 }
892 } 891 }
893 892
894 893
895 /* 894 /*
896 * Create a new Memory Controller kobject instance, 895 * Create a new Memory Controller kobject instance,
897 * mc<id> under the 'mc' directory 896 * mc<id> under the 'mc' directory
898 * 897 *
899 * Return: 898 * Return:
900 * 0 Success 899 * 0 Success
901 * !0 Failure 900 * !0 Failure
902 */ 901 */
903 int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) 902 int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
904 { 903 {
905 int i; 904 int i;
906 int err; 905 int err;
907 struct csrow_info *csrow; 906 struct csrow_info *csrow;
908 struct kobject *kobj_mci = &mci->edac_mci_kobj; 907 struct kobject *kobj_mci = &mci->edac_mci_kobj;
909 908
910 debugf0("%s() idx=%d\n", __func__, mci->mc_idx); 909 debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
911 910
912 INIT_LIST_HEAD(&mci->grp_kobj_list); 911 INIT_LIST_HEAD(&mci->grp_kobj_list);
913 912
914 /* create a symlink for the device */ 913 /* create a symlink for the device */
915 err = sysfs_create_link(kobj_mci, &mci->dev->kobj, 914 err = sysfs_create_link(kobj_mci, &mci->dev->kobj,
916 EDAC_DEVICE_SYMLINK); 915 EDAC_DEVICE_SYMLINK);
917 if (err) { 916 if (err) {
918 debugf1("%s() failure to create symlink\n", __func__); 917 debugf1("%s() failure to create symlink\n", __func__);
919 goto fail0; 918 goto fail0;
920 } 919 }
921 920
922 /* If the low level driver desires some attributes, 921 /* If the low level driver desires some attributes,
923 * then create them now for the driver. 922 * then create them now for the driver.
924 */ 923 */
925 if (mci->mc_driver_sysfs_attributes) { 924 if (mci->mc_driver_sysfs_attributes) {
926 err = edac_create_mci_instance_attributes(mci, 925 err = edac_create_mci_instance_attributes(mci,
927 mci->mc_driver_sysfs_attributes, 926 mci->mc_driver_sysfs_attributes,
928 &mci->edac_mci_kobj); 927 &mci->edac_mci_kobj);
929 if (err) { 928 if (err) {
930 debugf1("%s() failure to create mci attributes\n", 929 debugf1("%s() failure to create mci attributes\n",
931 __func__); 930 __func__);
932 goto fail0; 931 goto fail0;
933 } 932 }
934 } 933 }
935 934
936 /* Make directories for each CSROW object under the mc<id> kobject 935 /* Make directories for each CSROW object under the mc<id> kobject
937 */ 936 */
938 for (i = 0; i < mci->nr_csrows; i++) { 937 for (i = 0; i < mci->nr_csrows; i++) {
939 csrow = &mci->csrows[i]; 938 csrow = &mci->csrows[i];
940 939
941 /* Only expose populated CSROWs */ 940 /* Only expose populated CSROWs */
942 if (csrow->nr_pages > 0) { 941 if (csrow->nr_pages > 0) {
943 err = edac_create_csrow_object(mci, csrow, i); 942 err = edac_create_csrow_object(mci, csrow, i);
944 if (err) { 943 if (err) {
945 debugf1("%s() failure: create csrow %d obj\n", 944 debugf1("%s() failure: create csrow %d obj\n",
946 __func__, i); 945 __func__, i);
947 goto fail1; 946 goto fail1;
948 } 947 }
949 } 948 }
950 } 949 }
951 950
952 return 0; 951 return 0;
953 952
954 /* CSROW error: backout what has already been registered, */ 953 /* CSROW error: backout what has already been registered, */
955 fail1: 954 fail1:
956 for (i--; i >= 0; i--) { 955 for (i--; i >= 0; i--) {
957 if (csrow->nr_pages > 0) { 956 if (csrow->nr_pages > 0) {
958 kobject_put(&mci->csrows[i].kobj); 957 kobject_put(&mci->csrows[i].kobj);
959 } 958 }
960 } 959 }
961 960
962 /* remove the mci instance's attributes, if any */ 961 /* remove the mci instance's attributes, if any */
963 edac_remove_mci_instance_attributes(mci, 962 edac_remove_mci_instance_attributes(mci,
964 mci->mc_driver_sysfs_attributes, &mci->edac_mci_kobj, 0); 963 mci->mc_driver_sysfs_attributes, &mci->edac_mci_kobj, 0);
965 964
966 /* remove the symlink */ 965 /* remove the symlink */
967 sysfs_remove_link(kobj_mci, EDAC_DEVICE_SYMLINK); 966 sysfs_remove_link(kobj_mci, EDAC_DEVICE_SYMLINK);
968 967
969 fail0: 968 fail0:
970 return err; 969 return err;
971 } 970 }
972 971
973 /* 972 /*
974 * remove a Memory Controller instance 973 * remove a Memory Controller instance
975 */ 974 */
976 void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) 975 void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
977 { 976 {
978 int i; 977 int i;
979 978
980 debugf0("%s()\n", __func__); 979 debugf0("%s()\n", __func__);
981 980
982 /* remove all csrow kobjects */ 981 /* remove all csrow kobjects */
983 debugf0("%s() unregister this mci kobj\n", __func__); 982 debugf0("%s() unregister this mci kobj\n", __func__);
984 for (i = 0; i < mci->nr_csrows; i++) { 983 for (i = 0; i < mci->nr_csrows; i++) {
985 if (mci->csrows[i].nr_pages > 0) { 984 if (mci->csrows[i].nr_pages > 0) {
986 debugf0("%s() unreg csrow-%d\n", __func__, i); 985 debugf0("%s() unreg csrow-%d\n", __func__, i);
987 kobject_put(&mci->csrows[i].kobj); 986 kobject_put(&mci->csrows[i].kobj);
988 } 987 }
989 } 988 }
990 989
991 /* remove this mci instance's attribtes */ 990 /* remove this mci instance's attribtes */
992 if (mci->mc_driver_sysfs_attributes) { 991 if (mci->mc_driver_sysfs_attributes) {
993 debugf0("%s() unregister mci private attributes\n", __func__); 992 debugf0("%s() unregister mci private attributes\n", __func__);
994 edac_remove_mci_instance_attributes(mci, 993 edac_remove_mci_instance_attributes(mci,
995 mci->mc_driver_sysfs_attributes, 994 mci->mc_driver_sysfs_attributes,
996 &mci->edac_mci_kobj, 0); 995 &mci->edac_mci_kobj, 0);
997 } 996 }
998 997
999 /* remove the symlink */ 998 /* remove the symlink */
1000 debugf0("%s() remove_link\n", __func__); 999 debugf0("%s() remove_link\n", __func__);
1001 sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK); 1000 sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
1002 1001
1003 /* unregister this instance's kobject */ 1002 /* unregister this instance's kobject */
1004 debugf0("%s() remove_mci_instance\n", __func__); 1003 debugf0("%s() remove_mci_instance\n", __func__);
1005 kobject_put(&mci->edac_mci_kobj); 1004 kobject_put(&mci->edac_mci_kobj);
1006 } 1005 }
1007 1006
1008 1007
1009 1008
1010 1009
1011 /* 1010 /*
1012 * edac_setup_sysfs_mc_kset(void) 1011 * edac_setup_sysfs_mc_kset(void)
1013 * 1012 *
1014 * Initialize the mc_kset for the 'mc' entry 1013 * Initialize the mc_kset for the 'mc' entry
1015 * This requires creating the top 'mc' directory with a kset 1014 * This requires creating the top 'mc' directory with a kset
1016 * and its controls/attributes. 1015 * and its controls/attributes.
1017 * 1016 *
1018 * To this 'mc' kset, instance 'mci' will be grouped as children. 1017 * To this 'mc' kset, instance 'mci' will be grouped as children.
1019 * 1018 *
1020 * Return: 0 SUCCESS 1019 * Return: 0 SUCCESS
1021 * !0 FAILURE error code 1020 * !0 FAILURE error code
1022 */ 1021 */
1023 int edac_sysfs_setup_mc_kset(void) 1022 int edac_sysfs_setup_mc_kset(void)
1024 { 1023 {
1025 int err = -EINVAL; 1024 int err = -EINVAL;
1026 struct sysdev_class *edac_class; 1025 struct sysdev_class *edac_class;
1027 1026
1028 debugf1("%s()\n", __func__); 1027 debugf1("%s()\n", __func__);
1029 1028
1030 /* get the /sys/devices/system/edac class reference */ 1029 /* get the /sys/devices/system/edac class reference */
1031 edac_class = edac_get_sysfs_class(); 1030 edac_class = edac_get_sysfs_class();
1032 if (edac_class == NULL) { 1031 if (edac_class == NULL) {
1033 debugf1("%s() no edac_class error=%d\n", __func__, err); 1032 debugf1("%s() no edac_class error=%d\n", __func__, err);
1034 goto fail_out; 1033 goto fail_out;
1035 } 1034 }
1036 1035
1037 /* Init the MC's kobject */ 1036 /* Init the MC's kobject */
1038 mc_kset = kset_create_and_add("mc", NULL, &edac_class->kset.kobj); 1037 mc_kset = kset_create_and_add("mc", NULL, &edac_class->kset.kobj);
1039 if (!mc_kset) { 1038 if (!mc_kset) {
1040 err = -ENOMEM; 1039 err = -ENOMEM;
1041 debugf1("%s() Failed to register '.../edac/mc'\n", __func__); 1040 debugf1("%s() Failed to register '.../edac/mc'\n", __func__);
1042 goto fail_kset; 1041 goto fail_kset;
1043 } 1042 }
1044 1043
1045 debugf1("%s() Registered '.../edac/mc' kobject\n", __func__); 1044 debugf1("%s() Registered '.../edac/mc' kobject\n", __func__);
1046 1045
1047 return 0; 1046 return 0;
1048 1047
1049 fail_kset: 1048 fail_kset:
1050 edac_put_sysfs_class(); 1049 edac_put_sysfs_class();
1051 1050
1052 fail_out: 1051 fail_out:
1053 return err; 1052 return err;
drivers/edac/i5100_edac.c
1 /* 1 /*
2 * Intel 5100 Memory Controllers kernel module 2 * Intel 5100 Memory Controllers kernel module
3 * 3 *
4 * This file may be distributed under the terms of the 4 * This file may be distributed under the terms of the
5 * GNU General Public License. 5 * GNU General Public License.
6 * 6 *
7 * This module is based on the following document: 7 * This module is based on the following document:
8 * 8 *
9 * Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet 9 * Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet
10 * http://download.intel.com/design/chipsets/datashts/318378.pdf 10 * http://download.intel.com/design/chipsets/datashts/318378.pdf
11 * 11 *
12 * The intel 5100 has two independent channels. EDAC core currently 12 * The intel 5100 has two independent channels. EDAC core currently
13 * can not reflect this configuration so instead the chip-select 13 * can not reflect this configuration so instead the chip-select
14 * rows for each respective channel are layed out one after another, 14 * rows for each respective channel are layed out one after another,
15 * the first half belonging to channel 0, the second half belonging 15 * the first half belonging to channel 0, the second half belonging
16 * to channel 1. 16 * to channel 1.
17 */ 17 */
18 #include <linux/module.h> 18 #include <linux/module.h>
19 #include <linux/init.h> 19 #include <linux/init.h>
20 #include <linux/pci.h> 20 #include <linux/pci.h>
21 #include <linux/pci_ids.h> 21 #include <linux/pci_ids.h>
22 #include <linux/edac.h> 22 #include <linux/edac.h>
23 #include <linux/delay.h> 23 #include <linux/delay.h>
24 #include <linux/mmzone.h> 24 #include <linux/mmzone.h>
25 25
26 #include "edac_core.h" 26 #include "edac_core.h"
27 27
28 /* register addresses */ 28 /* register addresses */
29 29
30 /* device 16, func 1 */ 30 /* device 16, func 1 */
31 #define I5100_MC 0x40 /* Memory Control Register */ 31 #define I5100_MC 0x40 /* Memory Control Register */
32 #define I5100_MC_SCRBEN_MASK (1 << 7) 32 #define I5100_MC_SCRBEN_MASK (1 << 7)
33 #define I5100_MC_SCRBDONE_MASK (1 << 4) 33 #define I5100_MC_SCRBDONE_MASK (1 << 4)
34 #define I5100_MS 0x44 /* Memory Status Register */ 34 #define I5100_MS 0x44 /* Memory Status Register */
35 #define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */ 35 #define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */
36 #define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */ 36 #define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */
37 #define I5100_TOLM 0x6c /* Top of Low Memory */ 37 #define I5100_TOLM 0x6c /* Top of Low Memory */
38 #define I5100_MIR0 0x80 /* Memory Interleave Range 0 */ 38 #define I5100_MIR0 0x80 /* Memory Interleave Range 0 */
39 #define I5100_MIR1 0x84 /* Memory Interleave Range 1 */ 39 #define I5100_MIR1 0x84 /* Memory Interleave Range 1 */
40 #define I5100_AMIR_0 0x8c /* Adjusted Memory Interleave Range 0 */ 40 #define I5100_AMIR_0 0x8c /* Adjusted Memory Interleave Range 0 */
41 #define I5100_AMIR_1 0x90 /* Adjusted Memory Interleave Range 1 */ 41 #define I5100_AMIR_1 0x90 /* Adjusted Memory Interleave Range 1 */
42 #define I5100_FERR_NF_MEM 0xa0 /* MC First Non Fatal Errors */ 42 #define I5100_FERR_NF_MEM 0xa0 /* MC First Non Fatal Errors */
43 #define I5100_FERR_NF_MEM_M16ERR_MASK (1 << 16) 43 #define I5100_FERR_NF_MEM_M16ERR_MASK (1 << 16)
44 #define I5100_FERR_NF_MEM_M15ERR_MASK (1 << 15) 44 #define I5100_FERR_NF_MEM_M15ERR_MASK (1 << 15)
45 #define I5100_FERR_NF_MEM_M14ERR_MASK (1 << 14) 45 #define I5100_FERR_NF_MEM_M14ERR_MASK (1 << 14)
46 #define I5100_FERR_NF_MEM_M12ERR_MASK (1 << 12) 46 #define I5100_FERR_NF_MEM_M12ERR_MASK (1 << 12)
47 #define I5100_FERR_NF_MEM_M11ERR_MASK (1 << 11) 47 #define I5100_FERR_NF_MEM_M11ERR_MASK (1 << 11)
48 #define I5100_FERR_NF_MEM_M10ERR_MASK (1 << 10) 48 #define I5100_FERR_NF_MEM_M10ERR_MASK (1 << 10)
49 #define I5100_FERR_NF_MEM_M6ERR_MASK (1 << 6) 49 #define I5100_FERR_NF_MEM_M6ERR_MASK (1 << 6)
50 #define I5100_FERR_NF_MEM_M5ERR_MASK (1 << 5) 50 #define I5100_FERR_NF_MEM_M5ERR_MASK (1 << 5)
51 #define I5100_FERR_NF_MEM_M4ERR_MASK (1 << 4) 51 #define I5100_FERR_NF_MEM_M4ERR_MASK (1 << 4)
52 #define I5100_FERR_NF_MEM_M1ERR_MASK 1 52 #define I5100_FERR_NF_MEM_M1ERR_MASK 1
53 #define I5100_FERR_NF_MEM_ANY_MASK \ 53 #define I5100_FERR_NF_MEM_ANY_MASK \
54 (I5100_FERR_NF_MEM_M16ERR_MASK | \ 54 (I5100_FERR_NF_MEM_M16ERR_MASK | \
55 I5100_FERR_NF_MEM_M15ERR_MASK | \ 55 I5100_FERR_NF_MEM_M15ERR_MASK | \
56 I5100_FERR_NF_MEM_M14ERR_MASK | \ 56 I5100_FERR_NF_MEM_M14ERR_MASK | \
57 I5100_FERR_NF_MEM_M12ERR_MASK | \ 57 I5100_FERR_NF_MEM_M12ERR_MASK | \
58 I5100_FERR_NF_MEM_M11ERR_MASK | \ 58 I5100_FERR_NF_MEM_M11ERR_MASK | \
59 I5100_FERR_NF_MEM_M10ERR_MASK | \ 59 I5100_FERR_NF_MEM_M10ERR_MASK | \
60 I5100_FERR_NF_MEM_M6ERR_MASK | \ 60 I5100_FERR_NF_MEM_M6ERR_MASK | \
61 I5100_FERR_NF_MEM_M5ERR_MASK | \ 61 I5100_FERR_NF_MEM_M5ERR_MASK | \
62 I5100_FERR_NF_MEM_M4ERR_MASK | \ 62 I5100_FERR_NF_MEM_M4ERR_MASK | \
63 I5100_FERR_NF_MEM_M1ERR_MASK) 63 I5100_FERR_NF_MEM_M1ERR_MASK)
64 #define I5100_NERR_NF_MEM 0xa4 /* MC Next Non-Fatal Errors */ 64 #define I5100_NERR_NF_MEM 0xa4 /* MC Next Non-Fatal Errors */
65 #define I5100_EMASK_MEM 0xa8 /* MC Error Mask Register */ 65 #define I5100_EMASK_MEM 0xa8 /* MC Error Mask Register */
66 66
67 /* device 21 and 22, func 0 */ 67 /* device 21 and 22, func 0 */
68 #define I5100_MTR_0 0x154 /* Memory Technology Registers 0-3 */ 68 #define I5100_MTR_0 0x154 /* Memory Technology Registers 0-3 */
69 #define I5100_DMIR 0x15c /* DIMM Interleave Range */ 69 #define I5100_DMIR 0x15c /* DIMM Interleave Range */
70 #define I5100_VALIDLOG 0x18c /* Valid Log Markers */ 70 #define I5100_VALIDLOG 0x18c /* Valid Log Markers */
71 #define I5100_NRECMEMA 0x190 /* Non-Recoverable Memory Error Log Reg A */ 71 #define I5100_NRECMEMA 0x190 /* Non-Recoverable Memory Error Log Reg A */
72 #define I5100_NRECMEMB 0x194 /* Non-Recoverable Memory Error Log Reg B */ 72 #define I5100_NRECMEMB 0x194 /* Non-Recoverable Memory Error Log Reg B */
73 #define I5100_REDMEMA 0x198 /* Recoverable Memory Data Error Log Reg A */ 73 #define I5100_REDMEMA 0x198 /* Recoverable Memory Data Error Log Reg A */
74 #define I5100_REDMEMB 0x19c /* Recoverable Memory Data Error Log Reg B */ 74 #define I5100_REDMEMB 0x19c /* Recoverable Memory Data Error Log Reg B */
75 #define I5100_RECMEMA 0x1a0 /* Recoverable Memory Error Log Reg A */ 75 #define I5100_RECMEMA 0x1a0 /* Recoverable Memory Error Log Reg A */
76 #define I5100_RECMEMB 0x1a4 /* Recoverable Memory Error Log Reg B */ 76 #define I5100_RECMEMB 0x1a4 /* Recoverable Memory Error Log Reg B */
77 #define I5100_MTR_4 0x1b0 /* Memory Technology Registers 4,5 */ 77 #define I5100_MTR_4 0x1b0 /* Memory Technology Registers 4,5 */
78 78
79 /* bit field accessors */ 79 /* bit field accessors */
80 80
81 static inline u32 i5100_mc_scrben(u32 mc) 81 static inline u32 i5100_mc_scrben(u32 mc)
82 { 82 {
83 return mc >> 7 & 1; 83 return mc >> 7 & 1;
84 } 84 }
85 85
86 static inline u32 i5100_mc_errdeten(u32 mc) 86 static inline u32 i5100_mc_errdeten(u32 mc)
87 { 87 {
88 return mc >> 5 & 1; 88 return mc >> 5 & 1;
89 } 89 }
90 90
91 static inline u32 i5100_mc_scrbdone(u32 mc) 91 static inline u32 i5100_mc_scrbdone(u32 mc)
92 { 92 {
93 return mc >> 4 & 1; 93 return mc >> 4 & 1;
94 } 94 }
95 95
96 static inline u16 i5100_spddata_rdo(u16 a) 96 static inline u16 i5100_spddata_rdo(u16 a)
97 { 97 {
98 return a >> 15 & 1; 98 return a >> 15 & 1;
99 } 99 }
100 100
101 static inline u16 i5100_spddata_sbe(u16 a) 101 static inline u16 i5100_spddata_sbe(u16 a)
102 { 102 {
103 return a >> 13 & 1; 103 return a >> 13 & 1;
104 } 104 }
105 105
106 static inline u16 i5100_spddata_busy(u16 a) 106 static inline u16 i5100_spddata_busy(u16 a)
107 { 107 {
108 return a >> 12 & 1; 108 return a >> 12 & 1;
109 } 109 }
110 110
111 static inline u16 i5100_spddata_data(u16 a) 111 static inline u16 i5100_spddata_data(u16 a)
112 { 112 {
113 return a & ((1 << 8) - 1); 113 return a & ((1 << 8) - 1);
114 } 114 }
115 115
116 static inline u32 i5100_spdcmd_create(u32 dti, u32 ckovrd, u32 sa, u32 ba, 116 static inline u32 i5100_spdcmd_create(u32 dti, u32 ckovrd, u32 sa, u32 ba,
117 u32 data, u32 cmd) 117 u32 data, u32 cmd)
118 { 118 {
119 return ((dti & ((1 << 4) - 1)) << 28) | 119 return ((dti & ((1 << 4) - 1)) << 28) |
120 ((ckovrd & 1) << 27) | 120 ((ckovrd & 1) << 27) |
121 ((sa & ((1 << 3) - 1)) << 24) | 121 ((sa & ((1 << 3) - 1)) << 24) |
122 ((ba & ((1 << 8) - 1)) << 16) | 122 ((ba & ((1 << 8) - 1)) << 16) |
123 ((data & ((1 << 8) - 1)) << 8) | 123 ((data & ((1 << 8) - 1)) << 8) |
124 (cmd & 1); 124 (cmd & 1);
125 } 125 }
126 126
127 static inline u16 i5100_tolm_tolm(u16 a) 127 static inline u16 i5100_tolm_tolm(u16 a)
128 { 128 {
129 return a >> 12 & ((1 << 4) - 1); 129 return a >> 12 & ((1 << 4) - 1);
130 } 130 }
131 131
132 static inline u16 i5100_mir_limit(u16 a) 132 static inline u16 i5100_mir_limit(u16 a)
133 { 133 {
134 return a >> 4 & ((1 << 12) - 1); 134 return a >> 4 & ((1 << 12) - 1);
135 } 135 }
136 136
137 static inline u16 i5100_mir_way1(u16 a) 137 static inline u16 i5100_mir_way1(u16 a)
138 { 138 {
139 return a >> 1 & 1; 139 return a >> 1 & 1;
140 } 140 }
141 141
142 static inline u16 i5100_mir_way0(u16 a) 142 static inline u16 i5100_mir_way0(u16 a)
143 { 143 {
144 return a & 1; 144 return a & 1;
145 } 145 }
146 146
147 static inline u32 i5100_ferr_nf_mem_chan_indx(u32 a) 147 static inline u32 i5100_ferr_nf_mem_chan_indx(u32 a)
148 { 148 {
149 return a >> 28 & 1; 149 return a >> 28 & 1;
150 } 150 }
151 151
152 static inline u32 i5100_ferr_nf_mem_any(u32 a) 152 static inline u32 i5100_ferr_nf_mem_any(u32 a)
153 { 153 {
154 return a & I5100_FERR_NF_MEM_ANY_MASK; 154 return a & I5100_FERR_NF_MEM_ANY_MASK;
155 } 155 }
156 156
157 static inline u32 i5100_nerr_nf_mem_any(u32 a) 157 static inline u32 i5100_nerr_nf_mem_any(u32 a)
158 { 158 {
159 return i5100_ferr_nf_mem_any(a); 159 return i5100_ferr_nf_mem_any(a);
160 } 160 }
161 161
162 static inline u32 i5100_dmir_limit(u32 a) 162 static inline u32 i5100_dmir_limit(u32 a)
163 { 163 {
164 return a >> 16 & ((1 << 11) - 1); 164 return a >> 16 & ((1 << 11) - 1);
165 } 165 }
166 166
167 static inline u32 i5100_dmir_rank(u32 a, u32 i) 167 static inline u32 i5100_dmir_rank(u32 a, u32 i)
168 { 168 {
169 return a >> (4 * i) & ((1 << 2) - 1); 169 return a >> (4 * i) & ((1 << 2) - 1);
170 } 170 }
171 171
172 static inline u16 i5100_mtr_present(u16 a) 172 static inline u16 i5100_mtr_present(u16 a)
173 { 173 {
174 return a >> 10 & 1; 174 return a >> 10 & 1;
175 } 175 }
176 176
177 static inline u16 i5100_mtr_ethrottle(u16 a) 177 static inline u16 i5100_mtr_ethrottle(u16 a)
178 { 178 {
179 return a >> 9 & 1; 179 return a >> 9 & 1;
180 } 180 }
181 181
182 static inline u16 i5100_mtr_width(u16 a) 182 static inline u16 i5100_mtr_width(u16 a)
183 { 183 {
184 return a >> 8 & 1; 184 return a >> 8 & 1;
185 } 185 }
186 186
187 static inline u16 i5100_mtr_numbank(u16 a) 187 static inline u16 i5100_mtr_numbank(u16 a)
188 { 188 {
189 return a >> 6 & 1; 189 return a >> 6 & 1;
190 } 190 }
191 191
192 static inline u16 i5100_mtr_numrow(u16 a) 192 static inline u16 i5100_mtr_numrow(u16 a)
193 { 193 {
194 return a >> 2 & ((1 << 2) - 1); 194 return a >> 2 & ((1 << 2) - 1);
195 } 195 }
196 196
197 static inline u16 i5100_mtr_numcol(u16 a) 197 static inline u16 i5100_mtr_numcol(u16 a)
198 { 198 {
199 return a & ((1 << 2) - 1); 199 return a & ((1 << 2) - 1);
200 } 200 }
201 201
202 202
203 static inline u32 i5100_validlog_redmemvalid(u32 a) 203 static inline u32 i5100_validlog_redmemvalid(u32 a)
204 { 204 {
205 return a >> 2 & 1; 205 return a >> 2 & 1;
206 } 206 }
207 207
208 static inline u32 i5100_validlog_recmemvalid(u32 a) 208 static inline u32 i5100_validlog_recmemvalid(u32 a)
209 { 209 {
210 return a >> 1 & 1; 210 return a >> 1 & 1;
211 } 211 }
212 212
213 static inline u32 i5100_validlog_nrecmemvalid(u32 a) 213 static inline u32 i5100_validlog_nrecmemvalid(u32 a)
214 { 214 {
215 return a & 1; 215 return a & 1;
216 } 216 }
217 217
218 static inline u32 i5100_nrecmema_merr(u32 a) 218 static inline u32 i5100_nrecmema_merr(u32 a)
219 { 219 {
220 return a >> 15 & ((1 << 5) - 1); 220 return a >> 15 & ((1 << 5) - 1);
221 } 221 }
222 222
223 static inline u32 i5100_nrecmema_bank(u32 a) 223 static inline u32 i5100_nrecmema_bank(u32 a)
224 { 224 {
225 return a >> 12 & ((1 << 3) - 1); 225 return a >> 12 & ((1 << 3) - 1);
226 } 226 }
227 227
228 static inline u32 i5100_nrecmema_rank(u32 a) 228 static inline u32 i5100_nrecmema_rank(u32 a)
229 { 229 {
230 return a >> 8 & ((1 << 3) - 1); 230 return a >> 8 & ((1 << 3) - 1);
231 } 231 }
232 232
233 static inline u32 i5100_nrecmema_dm_buf_id(u32 a) 233 static inline u32 i5100_nrecmema_dm_buf_id(u32 a)
234 { 234 {
235 return a & ((1 << 8) - 1); 235 return a & ((1 << 8) - 1);
236 } 236 }
237 237
238 static inline u32 i5100_nrecmemb_cas(u32 a) 238 static inline u32 i5100_nrecmemb_cas(u32 a)
239 { 239 {
240 return a >> 16 & ((1 << 13) - 1); 240 return a >> 16 & ((1 << 13) - 1);
241 } 241 }
242 242
243 static inline u32 i5100_nrecmemb_ras(u32 a) 243 static inline u32 i5100_nrecmemb_ras(u32 a)
244 { 244 {
245 return a & ((1 << 16) - 1); 245 return a & ((1 << 16) - 1);
246 } 246 }
247 247
248 static inline u32 i5100_redmemb_ecc_locator(u32 a) 248 static inline u32 i5100_redmemb_ecc_locator(u32 a)
249 { 249 {
250 return a & ((1 << 18) - 1); 250 return a & ((1 << 18) - 1);
251 } 251 }
252 252
253 static inline u32 i5100_recmema_merr(u32 a) 253 static inline u32 i5100_recmema_merr(u32 a)
254 { 254 {
255 return i5100_nrecmema_merr(a); 255 return i5100_nrecmema_merr(a);
256 } 256 }
257 257
258 static inline u32 i5100_recmema_bank(u32 a) 258 static inline u32 i5100_recmema_bank(u32 a)
259 { 259 {
260 return i5100_nrecmema_bank(a); 260 return i5100_nrecmema_bank(a);
261 } 261 }
262 262
263 static inline u32 i5100_recmema_rank(u32 a) 263 static inline u32 i5100_recmema_rank(u32 a)
264 { 264 {
265 return i5100_nrecmema_rank(a); 265 return i5100_nrecmema_rank(a);
266 } 266 }
267 267
268 static inline u32 i5100_recmema_dm_buf_id(u32 a) 268 static inline u32 i5100_recmema_dm_buf_id(u32 a)
269 { 269 {
270 return i5100_nrecmema_dm_buf_id(a); 270 return i5100_nrecmema_dm_buf_id(a);
271 } 271 }
272 272
273 static inline u32 i5100_recmemb_cas(u32 a) 273 static inline u32 i5100_recmemb_cas(u32 a)
274 { 274 {
275 return i5100_nrecmemb_cas(a); 275 return i5100_nrecmemb_cas(a);
276 } 276 }
277 277
278 static inline u32 i5100_recmemb_ras(u32 a) 278 static inline u32 i5100_recmemb_ras(u32 a)
279 { 279 {
280 return i5100_nrecmemb_ras(a); 280 return i5100_nrecmemb_ras(a);
281 } 281 }
282 282
283 /* some generic limits */ 283 /* some generic limits */
284 #define I5100_MAX_RANKS_PER_CHAN 6 284 #define I5100_MAX_RANKS_PER_CHAN 6
285 #define I5100_CHANNELS 2 285 #define I5100_CHANNELS 2
286 #define I5100_MAX_RANKS_PER_DIMM 4 286 #define I5100_MAX_RANKS_PER_DIMM 4
287 #define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */ 287 #define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */
288 #define I5100_MAX_DIMM_SLOTS_PER_CHAN 4 288 #define I5100_MAX_DIMM_SLOTS_PER_CHAN 4
289 #define I5100_MAX_RANK_INTERLEAVE 4 289 #define I5100_MAX_RANK_INTERLEAVE 4
290 #define I5100_MAX_DMIRS 5 290 #define I5100_MAX_DMIRS 5
291 #define I5100_SCRUB_REFRESH_RATE (5 * 60 * HZ) 291 #define I5100_SCRUB_REFRESH_RATE (5 * 60 * HZ)
292 292
293 struct i5100_priv { 293 struct i5100_priv {
294 /* ranks on each dimm -- 0 maps to not present -- obtained via SPD */ 294 /* ranks on each dimm -- 0 maps to not present -- obtained via SPD */
295 int dimm_numrank[I5100_CHANNELS][I5100_MAX_DIMM_SLOTS_PER_CHAN]; 295 int dimm_numrank[I5100_CHANNELS][I5100_MAX_DIMM_SLOTS_PER_CHAN];
296 296
297 /* 297 /*
298 * mainboard chip select map -- maps i5100 chip selects to 298 * mainboard chip select map -- maps i5100 chip selects to
299 * DIMM slot chip selects. In the case of only 4 ranks per 299 * DIMM slot chip selects. In the case of only 4 ranks per
300 * channel, the mapping is fairly obvious but not unique. 300 * channel, the mapping is fairly obvious but not unique.
301 * we map -1 -> NC and assume both channels use the same 301 * we map -1 -> NC and assume both channels use the same
302 * map... 302 * map...
303 * 303 *
304 */ 304 */
305 int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CHAN][I5100_MAX_RANKS_PER_DIMM]; 305 int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CHAN][I5100_MAX_RANKS_PER_DIMM];
306 306
307 /* memory interleave range */ 307 /* memory interleave range */
308 struct { 308 struct {
309 u64 limit; 309 u64 limit;
310 unsigned way[2]; 310 unsigned way[2];
311 } mir[I5100_CHANNELS]; 311 } mir[I5100_CHANNELS];
312 312
313 /* adjusted memory interleave range register */ 313 /* adjusted memory interleave range register */
314 unsigned amir[I5100_CHANNELS]; 314 unsigned amir[I5100_CHANNELS];
315 315
316 /* dimm interleave range */ 316 /* dimm interleave range */
317 struct { 317 struct {
318 unsigned rank[I5100_MAX_RANK_INTERLEAVE]; 318 unsigned rank[I5100_MAX_RANK_INTERLEAVE];
319 u64 limit; 319 u64 limit;
320 } dmir[I5100_CHANNELS][I5100_MAX_DMIRS]; 320 } dmir[I5100_CHANNELS][I5100_MAX_DMIRS];
321 321
322 /* memory technology registers... */ 322 /* memory technology registers... */
323 struct { 323 struct {
324 unsigned present; /* 0 or 1 */ 324 unsigned present; /* 0 or 1 */
325 unsigned ethrottle; /* 0 or 1 */ 325 unsigned ethrottle; /* 0 or 1 */
326 unsigned width; /* 4 or 8 bits */ 326 unsigned width; /* 4 or 8 bits */
327 unsigned numbank; /* 2 or 3 lines */ 327 unsigned numbank; /* 2 or 3 lines */
328 unsigned numrow; /* 13 .. 16 lines */ 328 unsigned numrow; /* 13 .. 16 lines */
329 unsigned numcol; /* 11 .. 12 lines */ 329 unsigned numcol; /* 11 .. 12 lines */
330 } mtr[I5100_CHANNELS][I5100_MAX_RANKS_PER_CHAN]; 330 } mtr[I5100_CHANNELS][I5100_MAX_RANKS_PER_CHAN];
331 331
332 u64 tolm; /* top of low memory in bytes */ 332 u64 tolm; /* top of low memory in bytes */
333 unsigned ranksperchan; /* number of ranks per channel */ 333 unsigned ranksperchan; /* number of ranks per channel */
334 334
335 struct pci_dev *mc; /* device 16 func 1 */ 335 struct pci_dev *mc; /* device 16 func 1 */
336 struct pci_dev *ch0mm; /* device 21 func 0 */ 336 struct pci_dev *ch0mm; /* device 21 func 0 */
337 struct pci_dev *ch1mm; /* device 22 func 0 */ 337 struct pci_dev *ch1mm; /* device 22 func 0 */
338 338
339 struct delayed_work i5100_scrubbing; 339 struct delayed_work i5100_scrubbing;
340 int scrub_enable; 340 int scrub_enable;
341 }; 341 };
342 342
343 /* map a rank/chan to a slot number on the mainboard */ 343 /* map a rank/chan to a slot number on the mainboard */
344 static int i5100_rank_to_slot(const struct mem_ctl_info *mci, 344 static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
345 int chan, int rank) 345 int chan, int rank)
346 { 346 {
347 const struct i5100_priv *priv = mci->pvt_info; 347 const struct i5100_priv *priv = mci->pvt_info;
348 int i; 348 int i;
349 349
350 for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) { 350 for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
351 int j; 351 int j;
352 const int numrank = priv->dimm_numrank[chan][i]; 352 const int numrank = priv->dimm_numrank[chan][i];
353 353
354 for (j = 0; j < numrank; j++) 354 for (j = 0; j < numrank; j++)
355 if (priv->dimm_csmap[i][j] == rank) 355 if (priv->dimm_csmap[i][j] == rank)
356 return i * 2 + chan; 356 return i * 2 + chan;
357 } 357 }
358 358
359 return -1; 359 return -1;
360 } 360 }
361 361
362 static const char *i5100_err_msg(unsigned err) 362 static const char *i5100_err_msg(unsigned err)
363 { 363 {
364 static const char *merrs[] = { 364 static const char *merrs[] = {
365 "unknown", /* 0 */ 365 "unknown", /* 0 */
366 "uncorrectable data ECC on replay", /* 1 */ 366 "uncorrectable data ECC on replay", /* 1 */
367 "unknown", /* 2 */ 367 "unknown", /* 2 */
368 "unknown", /* 3 */ 368 "unknown", /* 3 */
369 "aliased uncorrectable demand data ECC", /* 4 */ 369 "aliased uncorrectable demand data ECC", /* 4 */
370 "aliased uncorrectable spare-copy data ECC", /* 5 */ 370 "aliased uncorrectable spare-copy data ECC", /* 5 */
371 "aliased uncorrectable patrol data ECC", /* 6 */ 371 "aliased uncorrectable patrol data ECC", /* 6 */
372 "unknown", /* 7 */ 372 "unknown", /* 7 */
373 "unknown", /* 8 */ 373 "unknown", /* 8 */
374 "unknown", /* 9 */ 374 "unknown", /* 9 */
375 "non-aliased uncorrectable demand data ECC", /* 10 */ 375 "non-aliased uncorrectable demand data ECC", /* 10 */
376 "non-aliased uncorrectable spare-copy data ECC", /* 11 */ 376 "non-aliased uncorrectable spare-copy data ECC", /* 11 */
377 "non-aliased uncorrectable patrol data ECC", /* 12 */ 377 "non-aliased uncorrectable patrol data ECC", /* 12 */
378 "unknown", /* 13 */ 378 "unknown", /* 13 */
379 "correctable demand data ECC", /* 14 */ 379 "correctable demand data ECC", /* 14 */
380 "correctable spare-copy data ECC", /* 15 */ 380 "correctable spare-copy data ECC", /* 15 */
381 "correctable patrol data ECC", /* 16 */ 381 "correctable patrol data ECC", /* 16 */
382 "unknown", /* 17 */ 382 "unknown", /* 17 */
383 "SPD protocol error", /* 18 */ 383 "SPD protocol error", /* 18 */
384 "unknown", /* 19 */ 384 "unknown", /* 19 */
385 "spare copy initiated", /* 20 */ 385 "spare copy initiated", /* 20 */
386 "spare copy completed", /* 21 */ 386 "spare copy completed", /* 21 */
387 }; 387 };
388 unsigned i; 388 unsigned i;
389 389
390 for (i = 0; i < ARRAY_SIZE(merrs); i++) 390 for (i = 0; i < ARRAY_SIZE(merrs); i++)
391 if (1 << i & err) 391 if (1 << i & err)
392 return merrs[i]; 392 return merrs[i];
393 393
394 return "none"; 394 return "none";
395 } 395 }
396 396
397 /* convert csrow index into a rank (per channel -- 0..5) */ 397 /* convert csrow index into a rank (per channel -- 0..5) */
398 static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow) 398 static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
399 { 399 {
400 const struct i5100_priv *priv = mci->pvt_info; 400 const struct i5100_priv *priv = mci->pvt_info;
401 401
402 return csrow % priv->ranksperchan; 402 return csrow % priv->ranksperchan;
403 } 403 }
404 404
405 /* convert csrow index into a channel (0..1) */ 405 /* convert csrow index into a channel (0..1) */
406 static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow) 406 static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
407 { 407 {
408 const struct i5100_priv *priv = mci->pvt_info; 408 const struct i5100_priv *priv = mci->pvt_info;
409 409
410 return csrow / priv->ranksperchan; 410 return csrow / priv->ranksperchan;
411 } 411 }
412 412
413 static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci, 413 static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
414 int chan, int rank) 414 int chan, int rank)
415 { 415 {
416 const struct i5100_priv *priv = mci->pvt_info; 416 const struct i5100_priv *priv = mci->pvt_info;
417 417
418 return chan * priv->ranksperchan + rank; 418 return chan * priv->ranksperchan + rank;
419 } 419 }
420 420
421 static void i5100_handle_ce(struct mem_ctl_info *mci, 421 static void i5100_handle_ce(struct mem_ctl_info *mci,
422 int chan, 422 int chan,
423 unsigned bank, 423 unsigned bank,
424 unsigned rank, 424 unsigned rank,
425 unsigned long syndrome, 425 unsigned long syndrome,
426 unsigned cas, 426 unsigned cas,
427 unsigned ras, 427 unsigned ras,
428 const char *msg) 428 const char *msg)
429 { 429 {
430 const int csrow = i5100_rank_to_csrow(mci, chan, rank); 430 const int csrow = i5100_rank_to_csrow(mci, chan, rank);
431 431
432 printk(KERN_ERR 432 printk(KERN_ERR
433 "CE chan %d, bank %u, rank %u, syndrome 0x%lx, " 433 "CE chan %d, bank %u, rank %u, syndrome 0x%lx, "
434 "cas %u, ras %u, csrow %u, label \"%s\": %s\n", 434 "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
435 chan, bank, rank, syndrome, cas, ras, 435 chan, bank, rank, syndrome, cas, ras,
436 csrow, mci->csrows[csrow].channels[0].label, msg); 436 csrow, mci->csrows[csrow].channels[0].label, msg);
437 437
438 mci->ce_count++; 438 mci->ce_count++;
439 mci->csrows[csrow].ce_count++; 439 mci->csrows[csrow].ce_count++;
440 mci->csrows[csrow].channels[0].ce_count++; 440 mci->csrows[csrow].channels[0].ce_count++;
441 } 441 }
442 442
443 static void i5100_handle_ue(struct mem_ctl_info *mci, 443 static void i5100_handle_ue(struct mem_ctl_info *mci,
444 int chan, 444 int chan,
445 unsigned bank, 445 unsigned bank,
446 unsigned rank, 446 unsigned rank,
447 unsigned long syndrome, 447 unsigned long syndrome,
448 unsigned cas, 448 unsigned cas,
449 unsigned ras, 449 unsigned ras,
450 const char *msg) 450 const char *msg)
451 { 451 {
452 const int csrow = i5100_rank_to_csrow(mci, chan, rank); 452 const int csrow = i5100_rank_to_csrow(mci, chan, rank);
453 453
454 printk(KERN_ERR 454 printk(KERN_ERR
455 "UE chan %d, bank %u, rank %u, syndrome 0x%lx, " 455 "UE chan %d, bank %u, rank %u, syndrome 0x%lx, "
456 "cas %u, ras %u, csrow %u, label \"%s\": %s\n", 456 "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
457 chan, bank, rank, syndrome, cas, ras, 457 chan, bank, rank, syndrome, cas, ras,
458 csrow, mci->csrows[csrow].channels[0].label, msg); 458 csrow, mci->csrows[csrow].channels[0].label, msg);
459 459
460 mci->ue_count++; 460 mci->ue_count++;
461 mci->csrows[csrow].ue_count++; 461 mci->csrows[csrow].ue_count++;
462 } 462 }
463 463
464 static void i5100_read_log(struct mem_ctl_info *mci, int chan, 464 static void i5100_read_log(struct mem_ctl_info *mci, int chan,
465 u32 ferr, u32 nerr) 465 u32 ferr, u32 nerr)
466 { 466 {
467 struct i5100_priv *priv = mci->pvt_info; 467 struct i5100_priv *priv = mci->pvt_info;
468 struct pci_dev *pdev = (chan) ? priv->ch1mm : priv->ch0mm; 468 struct pci_dev *pdev = (chan) ? priv->ch1mm : priv->ch0mm;
469 u32 dw; 469 u32 dw;
470 u32 dw2; 470 u32 dw2;
471 unsigned syndrome = 0; 471 unsigned syndrome = 0;
472 unsigned ecc_loc = 0; 472 unsigned ecc_loc = 0;
473 unsigned merr; 473 unsigned merr;
474 unsigned bank; 474 unsigned bank;
475 unsigned rank; 475 unsigned rank;
476 unsigned cas; 476 unsigned cas;
477 unsigned ras; 477 unsigned ras;
478 478
479 pci_read_config_dword(pdev, I5100_VALIDLOG, &dw); 479 pci_read_config_dword(pdev, I5100_VALIDLOG, &dw);
480 480
481 if (i5100_validlog_redmemvalid(dw)) { 481 if (i5100_validlog_redmemvalid(dw)) {
482 pci_read_config_dword(pdev, I5100_REDMEMA, &dw2); 482 pci_read_config_dword(pdev, I5100_REDMEMA, &dw2);
483 syndrome = dw2; 483 syndrome = dw2;
484 pci_read_config_dword(pdev, I5100_REDMEMB, &dw2); 484 pci_read_config_dword(pdev, I5100_REDMEMB, &dw2);
485 ecc_loc = i5100_redmemb_ecc_locator(dw2); 485 ecc_loc = i5100_redmemb_ecc_locator(dw2);
486 } 486 }
487 487
488 if (i5100_validlog_recmemvalid(dw)) { 488 if (i5100_validlog_recmemvalid(dw)) {
489 const char *msg; 489 const char *msg;
490 490
491 pci_read_config_dword(pdev, I5100_RECMEMA, &dw2); 491 pci_read_config_dword(pdev, I5100_RECMEMA, &dw2);
492 merr = i5100_recmema_merr(dw2); 492 merr = i5100_recmema_merr(dw2);
493 bank = i5100_recmema_bank(dw2); 493 bank = i5100_recmema_bank(dw2);
494 rank = i5100_recmema_rank(dw2); 494 rank = i5100_recmema_rank(dw2);
495 495
496 pci_read_config_dword(pdev, I5100_RECMEMB, &dw2); 496 pci_read_config_dword(pdev, I5100_RECMEMB, &dw2);
497 cas = i5100_recmemb_cas(dw2); 497 cas = i5100_recmemb_cas(dw2);
498 ras = i5100_recmemb_ras(dw2); 498 ras = i5100_recmemb_ras(dw2);
499 499
500 /* FIXME: not really sure if this is what merr is... 500 /* FIXME: not really sure if this is what merr is...
501 */ 501 */
502 if (!merr) 502 if (!merr)
503 msg = i5100_err_msg(ferr); 503 msg = i5100_err_msg(ferr);
504 else 504 else
505 msg = i5100_err_msg(nerr); 505 msg = i5100_err_msg(nerr);
506 506
507 i5100_handle_ce(mci, chan, bank, rank, syndrome, cas, ras, msg); 507 i5100_handle_ce(mci, chan, bank, rank, syndrome, cas, ras, msg);
508 } 508 }
509 509
510 if (i5100_validlog_nrecmemvalid(dw)) { 510 if (i5100_validlog_nrecmemvalid(dw)) {
511 const char *msg; 511 const char *msg;
512 512
513 pci_read_config_dword(pdev, I5100_NRECMEMA, &dw2); 513 pci_read_config_dword(pdev, I5100_NRECMEMA, &dw2);
514 merr = i5100_nrecmema_merr(dw2); 514 merr = i5100_nrecmema_merr(dw2);
515 bank = i5100_nrecmema_bank(dw2); 515 bank = i5100_nrecmema_bank(dw2);
516 rank = i5100_nrecmema_rank(dw2); 516 rank = i5100_nrecmema_rank(dw2);
517 517
518 pci_read_config_dword(pdev, I5100_NRECMEMB, &dw2); 518 pci_read_config_dword(pdev, I5100_NRECMEMB, &dw2);
519 cas = i5100_nrecmemb_cas(dw2); 519 cas = i5100_nrecmemb_cas(dw2);
520 ras = i5100_nrecmemb_ras(dw2); 520 ras = i5100_nrecmemb_ras(dw2);
521 521
522 /* FIXME: not really sure if this is what merr is... 522 /* FIXME: not really sure if this is what merr is...
523 */ 523 */
524 if (!merr) 524 if (!merr)
525 msg = i5100_err_msg(ferr); 525 msg = i5100_err_msg(ferr);
526 else 526 else
527 msg = i5100_err_msg(nerr); 527 msg = i5100_err_msg(nerr);
528 528
529 i5100_handle_ue(mci, chan, bank, rank, syndrome, cas, ras, msg); 529 i5100_handle_ue(mci, chan, bank, rank, syndrome, cas, ras, msg);
530 } 530 }
531 531
532 pci_write_config_dword(pdev, I5100_VALIDLOG, dw); 532 pci_write_config_dword(pdev, I5100_VALIDLOG, dw);
533 } 533 }
534 534
535 static void i5100_check_error(struct mem_ctl_info *mci) 535 static void i5100_check_error(struct mem_ctl_info *mci)
536 { 536 {
537 struct i5100_priv *priv = mci->pvt_info; 537 struct i5100_priv *priv = mci->pvt_info;
538 u32 dw; 538 u32 dw;
539 539
540 540
541 pci_read_config_dword(priv->mc, I5100_FERR_NF_MEM, &dw); 541 pci_read_config_dword(priv->mc, I5100_FERR_NF_MEM, &dw);
542 if (i5100_ferr_nf_mem_any(dw)) { 542 if (i5100_ferr_nf_mem_any(dw)) {
543 u32 dw2; 543 u32 dw2;
544 544
545 pci_read_config_dword(priv->mc, I5100_NERR_NF_MEM, &dw2); 545 pci_read_config_dword(priv->mc, I5100_NERR_NF_MEM, &dw2);
546 if (dw2) 546 if (dw2)
547 pci_write_config_dword(priv->mc, I5100_NERR_NF_MEM, 547 pci_write_config_dword(priv->mc, I5100_NERR_NF_MEM,
548 dw2); 548 dw2);
549 pci_write_config_dword(priv->mc, I5100_FERR_NF_MEM, dw); 549 pci_write_config_dword(priv->mc, I5100_FERR_NF_MEM, dw);
550 550
551 i5100_read_log(mci, i5100_ferr_nf_mem_chan_indx(dw), 551 i5100_read_log(mci, i5100_ferr_nf_mem_chan_indx(dw),
552 i5100_ferr_nf_mem_any(dw), 552 i5100_ferr_nf_mem_any(dw),
553 i5100_nerr_nf_mem_any(dw2)); 553 i5100_nerr_nf_mem_any(dw2));
554 } 554 }
555 } 555 }
556 556
557 /* The i5100 chipset will scrub the entire memory once, then 557 /* The i5100 chipset will scrub the entire memory once, then
558 * set a done bit. Continuous scrubbing is achieved by enqueing 558 * set a done bit. Continuous scrubbing is achieved by enqueing
559 * delayed work to a workqueue, checking every few minutes if 559 * delayed work to a workqueue, checking every few minutes if
560 * the scrubbing has completed and if so reinitiating it. 560 * the scrubbing has completed and if so reinitiating it.
561 */ 561 */
562 562
563 static void i5100_refresh_scrubbing(struct work_struct *work) 563 static void i5100_refresh_scrubbing(struct work_struct *work)
564 { 564 {
565 struct delayed_work *i5100_scrubbing = container_of(work, 565 struct delayed_work *i5100_scrubbing = container_of(work,
566 struct delayed_work, 566 struct delayed_work,
567 work); 567 work);
568 struct i5100_priv *priv = container_of(i5100_scrubbing, 568 struct i5100_priv *priv = container_of(i5100_scrubbing,
569 struct i5100_priv, 569 struct i5100_priv,
570 i5100_scrubbing); 570 i5100_scrubbing);
571 u32 dw; 571 u32 dw;
572 572
573 pci_read_config_dword(priv->mc, I5100_MC, &dw); 573 pci_read_config_dword(priv->mc, I5100_MC, &dw);
574 574
575 if (priv->scrub_enable) { 575 if (priv->scrub_enable) {
576 576
577 pci_read_config_dword(priv->mc, I5100_MC, &dw); 577 pci_read_config_dword(priv->mc, I5100_MC, &dw);
578 578
579 if (i5100_mc_scrbdone(dw)) { 579 if (i5100_mc_scrbdone(dw)) {
580 dw |= I5100_MC_SCRBEN_MASK; 580 dw |= I5100_MC_SCRBEN_MASK;
581 pci_write_config_dword(priv->mc, I5100_MC, dw); 581 pci_write_config_dword(priv->mc, I5100_MC, dw);
582 pci_read_config_dword(priv->mc, I5100_MC, &dw); 582 pci_read_config_dword(priv->mc, I5100_MC, &dw);
583 } 583 }
584 584
585 schedule_delayed_work(&(priv->i5100_scrubbing), 585 schedule_delayed_work(&(priv->i5100_scrubbing),
586 I5100_SCRUB_REFRESH_RATE); 586 I5100_SCRUB_REFRESH_RATE);
587 } 587 }
588 } 588 }
589 /* 589 /*
590 * The bandwidth is based on experimentation, feel free to refine it. 590 * The bandwidth is based on experimentation, feel free to refine it.
591 */ 591 */
592 static int i5100_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth) 592 static int i5100_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth)
593 { 593 {
594 struct i5100_priv *priv = mci->pvt_info; 594 struct i5100_priv *priv = mci->pvt_info;
595 u32 dw; 595 u32 dw;
596 596
597 pci_read_config_dword(priv->mc, I5100_MC, &dw); 597 pci_read_config_dword(priv->mc, I5100_MC, &dw);
598 if (bandwidth) { 598 if (bandwidth) {
599 priv->scrub_enable = 1; 599 priv->scrub_enable = 1;
600 dw |= I5100_MC_SCRBEN_MASK; 600 dw |= I5100_MC_SCRBEN_MASK;
601 schedule_delayed_work(&(priv->i5100_scrubbing), 601 schedule_delayed_work(&(priv->i5100_scrubbing),
602 I5100_SCRUB_REFRESH_RATE); 602 I5100_SCRUB_REFRESH_RATE);
603 } else { 603 } else {
604 priv->scrub_enable = 0; 604 priv->scrub_enable = 0;
605 dw &= ~I5100_MC_SCRBEN_MASK; 605 dw &= ~I5100_MC_SCRBEN_MASK;
606 cancel_delayed_work(&(priv->i5100_scrubbing)); 606 cancel_delayed_work(&(priv->i5100_scrubbing));
607 } 607 }
608 pci_write_config_dword(priv->mc, I5100_MC, dw); 608 pci_write_config_dword(priv->mc, I5100_MC, dw);
609 609
610 pci_read_config_dword(priv->mc, I5100_MC, &dw); 610 pci_read_config_dword(priv->mc, I5100_MC, &dw);
611 611
612 bandwidth = 5900000 * i5100_mc_scrben(dw); 612 bandwidth = 5900000 * i5100_mc_scrben(dw);
613 613
614 return 0; 614 return bandwidth;
615 } 615 }
616 616
617 static int i5100_get_scrub_rate(struct mem_ctl_info *mci, 617 static int i5100_get_scrub_rate(struct mem_ctl_info *mci)
618 u32 *bandwidth)
619 { 618 {
620 struct i5100_priv *priv = mci->pvt_info; 619 struct i5100_priv *priv = mci->pvt_info;
621 u32 dw; 620 u32 dw;
622 621
623 pci_read_config_dword(priv->mc, I5100_MC, &dw); 622 pci_read_config_dword(priv->mc, I5100_MC, &dw);
624 623
625 *bandwidth = 5900000 * i5100_mc_scrben(dw); 624 return 5900000 * i5100_mc_scrben(dw);
626
627 return 0;
628 } 625 }
629 626
630 static struct pci_dev *pci_get_device_func(unsigned vendor, 627 static struct pci_dev *pci_get_device_func(unsigned vendor,
631 unsigned device, 628 unsigned device,
632 unsigned func) 629 unsigned func)
633 { 630 {
634 struct pci_dev *ret = NULL; 631 struct pci_dev *ret = NULL;
635 632
636 while (1) { 633 while (1) {
637 ret = pci_get_device(vendor, device, ret); 634 ret = pci_get_device(vendor, device, ret);
638 635
639 if (!ret) 636 if (!ret)
640 break; 637 break;
641 638
642 if (PCI_FUNC(ret->devfn) == func) 639 if (PCI_FUNC(ret->devfn) == func)
643 break; 640 break;
644 } 641 }
645 642
646 return ret; 643 return ret;
647 } 644 }
648 645
649 static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci, 646 static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci,
650 int csrow) 647 int csrow)
651 { 648 {
652 struct i5100_priv *priv = mci->pvt_info; 649 struct i5100_priv *priv = mci->pvt_info;
653 const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow); 650 const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow);
654 const unsigned chan = i5100_csrow_to_chan(mci, csrow); 651 const unsigned chan = i5100_csrow_to_chan(mci, csrow);
655 unsigned addr_lines; 652 unsigned addr_lines;
656 653
657 /* dimm present? */ 654 /* dimm present? */
658 if (!priv->mtr[chan][chan_rank].present) 655 if (!priv->mtr[chan][chan_rank].present)
659 return 0ULL; 656 return 0ULL;
660 657
661 addr_lines = 658 addr_lines =
662 I5100_DIMM_ADDR_LINES + 659 I5100_DIMM_ADDR_LINES +
663 priv->mtr[chan][chan_rank].numcol + 660 priv->mtr[chan][chan_rank].numcol +
664 priv->mtr[chan][chan_rank].numrow + 661 priv->mtr[chan][chan_rank].numrow +
665 priv->mtr[chan][chan_rank].numbank; 662 priv->mtr[chan][chan_rank].numbank;
666 663
667 return (unsigned long) 664 return (unsigned long)
668 ((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE); 665 ((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
669 } 666 }
670 667
671 static void __devinit i5100_init_mtr(struct mem_ctl_info *mci) 668 static void __devinit i5100_init_mtr(struct mem_ctl_info *mci)
672 { 669 {
673 struct i5100_priv *priv = mci->pvt_info; 670 struct i5100_priv *priv = mci->pvt_info;
674 struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm }; 671 struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
675 int i; 672 int i;
676 673
677 for (i = 0; i < I5100_CHANNELS; i++) { 674 for (i = 0; i < I5100_CHANNELS; i++) {
678 int j; 675 int j;
679 struct pci_dev *pdev = mms[i]; 676 struct pci_dev *pdev = mms[i];
680 677
681 for (j = 0; j < I5100_MAX_RANKS_PER_CHAN; j++) { 678 for (j = 0; j < I5100_MAX_RANKS_PER_CHAN; j++) {
682 const unsigned addr = 679 const unsigned addr =
683 (j < 4) ? I5100_MTR_0 + j * 2 : 680 (j < 4) ? I5100_MTR_0 + j * 2 :
684 I5100_MTR_4 + (j - 4) * 2; 681 I5100_MTR_4 + (j - 4) * 2;
685 u16 w; 682 u16 w;
686 683
687 pci_read_config_word(pdev, addr, &w); 684 pci_read_config_word(pdev, addr, &w);
688 685
689 priv->mtr[i][j].present = i5100_mtr_present(w); 686 priv->mtr[i][j].present = i5100_mtr_present(w);
690 priv->mtr[i][j].ethrottle = i5100_mtr_ethrottle(w); 687 priv->mtr[i][j].ethrottle = i5100_mtr_ethrottle(w);
691 priv->mtr[i][j].width = 4 + 4 * i5100_mtr_width(w); 688 priv->mtr[i][j].width = 4 + 4 * i5100_mtr_width(w);
692 priv->mtr[i][j].numbank = 2 + i5100_mtr_numbank(w); 689 priv->mtr[i][j].numbank = 2 + i5100_mtr_numbank(w);
693 priv->mtr[i][j].numrow = 13 + i5100_mtr_numrow(w); 690 priv->mtr[i][j].numrow = 13 + i5100_mtr_numrow(w);
694 priv->mtr[i][j].numcol = 10 + i5100_mtr_numcol(w); 691 priv->mtr[i][j].numcol = 10 + i5100_mtr_numcol(w);
695 } 692 }
696 } 693 }
697 } 694 }
698 695
699 /* 696 /*
700 * FIXME: make this into a real i2c adapter (so that dimm-decode 697 * FIXME: make this into a real i2c adapter (so that dimm-decode
701 * will work)? 698 * will work)?
702 */ 699 */
703 static int i5100_read_spd_byte(const struct mem_ctl_info *mci, 700 static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
704 u8 ch, u8 slot, u8 addr, u8 *byte) 701 u8 ch, u8 slot, u8 addr, u8 *byte)
705 { 702 {
706 struct i5100_priv *priv = mci->pvt_info; 703 struct i5100_priv *priv = mci->pvt_info;
707 u16 w; 704 u16 w;
708 unsigned long et; 705 unsigned long et;
709 706
710 pci_read_config_word(priv->mc, I5100_SPDDATA, &w); 707 pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
711 if (i5100_spddata_busy(w)) 708 if (i5100_spddata_busy(w))
712 return -1; 709 return -1;
713 710
714 pci_write_config_dword(priv->mc, I5100_SPDCMD, 711 pci_write_config_dword(priv->mc, I5100_SPDCMD,
715 i5100_spdcmd_create(0xa, 1, ch * 4 + slot, addr, 712 i5100_spdcmd_create(0xa, 1, ch * 4 + slot, addr,
716 0, 0)); 713 0, 0));
717 714
718 /* wait up to 100ms */ 715 /* wait up to 100ms */
719 et = jiffies + HZ / 10; 716 et = jiffies + HZ / 10;
720 udelay(100); 717 udelay(100);
721 while (1) { 718 while (1) {
722 pci_read_config_word(priv->mc, I5100_SPDDATA, &w); 719 pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
723 if (!i5100_spddata_busy(w)) 720 if (!i5100_spddata_busy(w))
724 break; 721 break;
725 udelay(100); 722 udelay(100);
726 } 723 }
727 724
728 if (!i5100_spddata_rdo(w) || i5100_spddata_sbe(w)) 725 if (!i5100_spddata_rdo(w) || i5100_spddata_sbe(w))
729 return -1; 726 return -1;
730 727
731 *byte = i5100_spddata_data(w); 728 *byte = i5100_spddata_data(w);
732 729
733 return 0; 730 return 0;
734 } 731 }
735 732
736 /* 733 /*
737 * fill dimm chip select map 734 * fill dimm chip select map
738 * 735 *
739 * FIXME: 736 * FIXME:
740 * o not the only way to may chip selects to dimm slots 737 * o not the only way to may chip selects to dimm slots
741 * o investigate if there is some way to obtain this map from the bios 738 * o investigate if there is some way to obtain this map from the bios
742 */ 739 */
743 static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci) 740 static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
744 { 741 {
745 struct i5100_priv *priv = mci->pvt_info; 742 struct i5100_priv *priv = mci->pvt_info;
746 int i; 743 int i;
747 744
748 for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) { 745 for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
749 int j; 746 int j;
750 747
751 for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++) 748 for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++)
752 priv->dimm_csmap[i][j] = -1; /* default NC */ 749 priv->dimm_csmap[i][j] = -1; /* default NC */
753 } 750 }
754 751
755 /* only 2 chip selects per slot... */ 752 /* only 2 chip selects per slot... */
756 if (priv->ranksperchan == 4) { 753 if (priv->ranksperchan == 4) {
757 priv->dimm_csmap[0][0] = 0; 754 priv->dimm_csmap[0][0] = 0;
758 priv->dimm_csmap[0][1] = 3; 755 priv->dimm_csmap[0][1] = 3;
759 priv->dimm_csmap[1][0] = 1; 756 priv->dimm_csmap[1][0] = 1;
760 priv->dimm_csmap[1][1] = 2; 757 priv->dimm_csmap[1][1] = 2;
761 priv->dimm_csmap[2][0] = 2; 758 priv->dimm_csmap[2][0] = 2;
762 priv->dimm_csmap[3][0] = 3; 759 priv->dimm_csmap[3][0] = 3;
763 } else { 760 } else {
764 priv->dimm_csmap[0][0] = 0; 761 priv->dimm_csmap[0][0] = 0;
765 priv->dimm_csmap[0][1] = 1; 762 priv->dimm_csmap[0][1] = 1;
766 priv->dimm_csmap[1][0] = 2; 763 priv->dimm_csmap[1][0] = 2;
767 priv->dimm_csmap[1][1] = 3; 764 priv->dimm_csmap[1][1] = 3;
768 priv->dimm_csmap[2][0] = 4; 765 priv->dimm_csmap[2][0] = 4;
769 priv->dimm_csmap[2][1] = 5; 766 priv->dimm_csmap[2][1] = 5;
770 } 767 }
771 } 768 }
772 769
773 static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev, 770 static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
774 struct mem_ctl_info *mci) 771 struct mem_ctl_info *mci)
775 { 772 {
776 struct i5100_priv *priv = mci->pvt_info; 773 struct i5100_priv *priv = mci->pvt_info;
777 int i; 774 int i;
778 775
779 for (i = 0; i < I5100_CHANNELS; i++) { 776 for (i = 0; i < I5100_CHANNELS; i++) {
780 int j; 777 int j;
781 778
782 for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CHAN; j++) { 779 for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CHAN; j++) {
783 u8 rank; 780 u8 rank;
784 781
785 if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0) 782 if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0)
786 priv->dimm_numrank[i][j] = 0; 783 priv->dimm_numrank[i][j] = 0;
787 else 784 else
788 priv->dimm_numrank[i][j] = (rank & 3) + 1; 785 priv->dimm_numrank[i][j] = (rank & 3) + 1;
789 } 786 }
790 } 787 }
791 788
792 i5100_init_dimm_csmap(mci); 789 i5100_init_dimm_csmap(mci);
793 } 790 }
794 791
795 static void __devinit i5100_init_interleaving(struct pci_dev *pdev, 792 static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
796 struct mem_ctl_info *mci) 793 struct mem_ctl_info *mci)
797 { 794 {
798 u16 w; 795 u16 w;
799 u32 dw; 796 u32 dw;
800 struct i5100_priv *priv = mci->pvt_info; 797 struct i5100_priv *priv = mci->pvt_info;
801 struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm }; 798 struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
802 int i; 799 int i;
803 800
804 pci_read_config_word(pdev, I5100_TOLM, &w); 801 pci_read_config_word(pdev, I5100_TOLM, &w);
805 priv->tolm = (u64) i5100_tolm_tolm(w) * 256 * 1024 * 1024; 802 priv->tolm = (u64) i5100_tolm_tolm(w) * 256 * 1024 * 1024;
806 803
807 pci_read_config_word(pdev, I5100_MIR0, &w); 804 pci_read_config_word(pdev, I5100_MIR0, &w);
808 priv->mir[0].limit = (u64) i5100_mir_limit(w) << 28; 805 priv->mir[0].limit = (u64) i5100_mir_limit(w) << 28;
809 priv->mir[0].way[1] = i5100_mir_way1(w); 806 priv->mir[0].way[1] = i5100_mir_way1(w);
810 priv->mir[0].way[0] = i5100_mir_way0(w); 807 priv->mir[0].way[0] = i5100_mir_way0(w);
811 808
812 pci_read_config_word(pdev, I5100_MIR1, &w); 809 pci_read_config_word(pdev, I5100_MIR1, &w);
813 priv->mir[1].limit = (u64) i5100_mir_limit(w) << 28; 810 priv->mir[1].limit = (u64) i5100_mir_limit(w) << 28;
814 priv->mir[1].way[1] = i5100_mir_way1(w); 811 priv->mir[1].way[1] = i5100_mir_way1(w);
815 priv->mir[1].way[0] = i5100_mir_way0(w); 812 priv->mir[1].way[0] = i5100_mir_way0(w);
816 813
817 pci_read_config_word(pdev, I5100_AMIR_0, &w); 814 pci_read_config_word(pdev, I5100_AMIR_0, &w);
818 priv->amir[0] = w; 815 priv->amir[0] = w;
819 pci_read_config_word(pdev, I5100_AMIR_1, &w); 816 pci_read_config_word(pdev, I5100_AMIR_1, &w);
820 priv->amir[1] = w; 817 priv->amir[1] = w;
821 818
822 for (i = 0; i < I5100_CHANNELS; i++) { 819 for (i = 0; i < I5100_CHANNELS; i++) {
823 int j; 820 int j;
824 821
825 for (j = 0; j < 5; j++) { 822 for (j = 0; j < 5; j++) {
826 int k; 823 int k;
827 824
828 pci_read_config_dword(mms[i], I5100_DMIR + j * 4, &dw); 825 pci_read_config_dword(mms[i], I5100_DMIR + j * 4, &dw);
829 826
830 priv->dmir[i][j].limit = 827 priv->dmir[i][j].limit =
831 (u64) i5100_dmir_limit(dw) << 28; 828 (u64) i5100_dmir_limit(dw) << 28;
832 for (k = 0; k < I5100_MAX_RANKS_PER_DIMM; k++) 829 for (k = 0; k < I5100_MAX_RANKS_PER_DIMM; k++)
833 priv->dmir[i][j].rank[k] = 830 priv->dmir[i][j].rank[k] =
834 i5100_dmir_rank(dw, k); 831 i5100_dmir_rank(dw, k);
835 } 832 }
836 } 833 }
837 834
838 i5100_init_mtr(mci); 835 i5100_init_mtr(mci);
839 } 836 }
840 837
841 static void __devinit i5100_init_csrows(struct mem_ctl_info *mci) 838 static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
842 { 839 {
843 int i; 840 int i;
844 unsigned long total_pages = 0UL; 841 unsigned long total_pages = 0UL;
845 struct i5100_priv *priv = mci->pvt_info; 842 struct i5100_priv *priv = mci->pvt_info;
846 843
847 for (i = 0; i < mci->nr_csrows; i++) { 844 for (i = 0; i < mci->nr_csrows; i++) {
848 const unsigned long npages = i5100_npages(mci, i); 845 const unsigned long npages = i5100_npages(mci, i);
849 const unsigned chan = i5100_csrow_to_chan(mci, i); 846 const unsigned chan = i5100_csrow_to_chan(mci, i);
850 const unsigned rank = i5100_csrow_to_rank(mci, i); 847 const unsigned rank = i5100_csrow_to_rank(mci, i);
851 848
852 if (!npages) 849 if (!npages)
853 continue; 850 continue;
854 851
855 /* 852 /*
856 * FIXME: these two are totally bogus -- I don't see how to 853 * FIXME: these two are totally bogus -- I don't see how to
857 * map them correctly to this structure... 854 * map them correctly to this structure...
858 */ 855 */
859 mci->csrows[i].first_page = total_pages; 856 mci->csrows[i].first_page = total_pages;
860 mci->csrows[i].last_page = total_pages + npages - 1; 857 mci->csrows[i].last_page = total_pages + npages - 1;
861 mci->csrows[i].page_mask = 0UL; 858 mci->csrows[i].page_mask = 0UL;
862 859
863 mci->csrows[i].nr_pages = npages; 860 mci->csrows[i].nr_pages = npages;
864 mci->csrows[i].grain = 32; 861 mci->csrows[i].grain = 32;
865 mci->csrows[i].csrow_idx = i; 862 mci->csrows[i].csrow_idx = i;
866 mci->csrows[i].dtype = 863 mci->csrows[i].dtype =
867 (priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8; 864 (priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8;
868 mci->csrows[i].ue_count = 0; 865 mci->csrows[i].ue_count = 0;
869 mci->csrows[i].ce_count = 0; 866 mci->csrows[i].ce_count = 0;
870 mci->csrows[i].mtype = MEM_RDDR2; 867 mci->csrows[i].mtype = MEM_RDDR2;
871 mci->csrows[i].edac_mode = EDAC_SECDED; 868 mci->csrows[i].edac_mode = EDAC_SECDED;
872 mci->csrows[i].mci = mci; 869 mci->csrows[i].mci = mci;
873 mci->csrows[i].nr_channels = 1; 870 mci->csrows[i].nr_channels = 1;
874 mci->csrows[i].channels[0].chan_idx = 0; 871 mci->csrows[i].channels[0].chan_idx = 0;
875 mci->csrows[i].channels[0].ce_count = 0; 872 mci->csrows[i].channels[0].ce_count = 0;
876 mci->csrows[i].channels[0].csrow = mci->csrows + i; 873 mci->csrows[i].channels[0].csrow = mci->csrows + i;
877 snprintf(mci->csrows[i].channels[0].label, 874 snprintf(mci->csrows[i].channels[0].label,
878 sizeof(mci->csrows[i].channels[0].label), 875 sizeof(mci->csrows[i].channels[0].label),
879 "DIMM%u", i5100_rank_to_slot(mci, chan, rank)); 876 "DIMM%u", i5100_rank_to_slot(mci, chan, rank));
880 877
881 total_pages += npages; 878 total_pages += npages;
882 } 879 }
883 } 880 }
884 881
885 static int __devinit i5100_init_one(struct pci_dev *pdev, 882 static int __devinit i5100_init_one(struct pci_dev *pdev,
886 const struct pci_device_id *id) 883 const struct pci_device_id *id)
887 { 884 {
888 int rc; 885 int rc;
889 struct mem_ctl_info *mci; 886 struct mem_ctl_info *mci;
890 struct i5100_priv *priv; 887 struct i5100_priv *priv;
891 struct pci_dev *ch0mm, *ch1mm; 888 struct pci_dev *ch0mm, *ch1mm;
892 int ret = 0; 889 int ret = 0;
893 u32 dw; 890 u32 dw;
894 int ranksperch; 891 int ranksperch;
895 892
896 if (PCI_FUNC(pdev->devfn) != 1) 893 if (PCI_FUNC(pdev->devfn) != 1)
897 return -ENODEV; 894 return -ENODEV;
898 895
899 rc = pci_enable_device(pdev); 896 rc = pci_enable_device(pdev);
900 if (rc < 0) { 897 if (rc < 0) {
901 ret = rc; 898 ret = rc;
902 goto bail; 899 goto bail;
903 } 900 }
904 901
905 /* ECC enabled? */ 902 /* ECC enabled? */
906 pci_read_config_dword(pdev, I5100_MC, &dw); 903 pci_read_config_dword(pdev, I5100_MC, &dw);
907 if (!i5100_mc_errdeten(dw)) { 904 if (!i5100_mc_errdeten(dw)) {
908 printk(KERN_INFO "i5100_edac: ECC not enabled.\n"); 905 printk(KERN_INFO "i5100_edac: ECC not enabled.\n");
909 ret = -ENODEV; 906 ret = -ENODEV;
910 goto bail_pdev; 907 goto bail_pdev;
911 } 908 }
912 909
913 /* figure out how many ranks, from strapped state of 48GB_Mode input */ 910 /* figure out how many ranks, from strapped state of 48GB_Mode input */
914 pci_read_config_dword(pdev, I5100_MS, &dw); 911 pci_read_config_dword(pdev, I5100_MS, &dw);
915 ranksperch = !!(dw & (1 << 8)) * 2 + 4; 912 ranksperch = !!(dw & (1 << 8)) * 2 + 4;
916 913
917 /* enable error reporting... */ 914 /* enable error reporting... */
918 pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw); 915 pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw);
919 dw &= ~I5100_FERR_NF_MEM_ANY_MASK; 916 dw &= ~I5100_FERR_NF_MEM_ANY_MASK;
920 pci_write_config_dword(pdev, I5100_EMASK_MEM, dw); 917 pci_write_config_dword(pdev, I5100_EMASK_MEM, dw);
921 918
922 /* device 21, func 0, Channel 0 Memory Map, Error Flag/Mask, etc... */ 919 /* device 21, func 0, Channel 0 Memory Map, Error Flag/Mask, etc... */
923 ch0mm = pci_get_device_func(PCI_VENDOR_ID_INTEL, 920 ch0mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
924 PCI_DEVICE_ID_INTEL_5100_21, 0); 921 PCI_DEVICE_ID_INTEL_5100_21, 0);
925 if (!ch0mm) { 922 if (!ch0mm) {
926 ret = -ENODEV; 923 ret = -ENODEV;
927 goto bail_pdev; 924 goto bail_pdev;
928 } 925 }
929 926
930 rc = pci_enable_device(ch0mm); 927 rc = pci_enable_device(ch0mm);
931 if (rc < 0) { 928 if (rc < 0) {
932 ret = rc; 929 ret = rc;
933 goto bail_ch0; 930 goto bail_ch0;
934 } 931 }
935 932
936 /* device 22, func 0, Channel 1 Memory Map, Error Flag/Mask, etc... */ 933 /* device 22, func 0, Channel 1 Memory Map, Error Flag/Mask, etc... */
937 ch1mm = pci_get_device_func(PCI_VENDOR_ID_INTEL, 934 ch1mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
938 PCI_DEVICE_ID_INTEL_5100_22, 0); 935 PCI_DEVICE_ID_INTEL_5100_22, 0);
939 if (!ch1mm) { 936 if (!ch1mm) {
940 ret = -ENODEV; 937 ret = -ENODEV;
941 goto bail_disable_ch0; 938 goto bail_disable_ch0;
942 } 939 }
943 940
944 rc = pci_enable_device(ch1mm); 941 rc = pci_enable_device(ch1mm);
945 if (rc < 0) { 942 if (rc < 0) {
946 ret = rc; 943 ret = rc;
947 goto bail_ch1; 944 goto bail_ch1;
948 } 945 }
949 946
950 mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0); 947 mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0);
951 if (!mci) { 948 if (!mci) {
952 ret = -ENOMEM; 949 ret = -ENOMEM;
953 goto bail_disable_ch1; 950 goto bail_disable_ch1;
954 } 951 }
955 952
956 mci->dev = &pdev->dev; 953 mci->dev = &pdev->dev;
957 954
958 priv = mci->pvt_info; 955 priv = mci->pvt_info;
959 priv->ranksperchan = ranksperch; 956 priv->ranksperchan = ranksperch;
960 priv->mc = pdev; 957 priv->mc = pdev;
961 priv->ch0mm = ch0mm; 958 priv->ch0mm = ch0mm;
962 priv->ch1mm = ch1mm; 959 priv->ch1mm = ch1mm;
963 960
964 INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing); 961 INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing);
965 962
966 /* If scrubbing was already enabled by the bios, start maintaining it */ 963 /* If scrubbing was already enabled by the bios, start maintaining it */
967 pci_read_config_dword(pdev, I5100_MC, &dw); 964 pci_read_config_dword(pdev, I5100_MC, &dw);
968 if (i5100_mc_scrben(dw)) { 965 if (i5100_mc_scrben(dw)) {
969 priv->scrub_enable = 1; 966 priv->scrub_enable = 1;
970 schedule_delayed_work(&(priv->i5100_scrubbing), 967 schedule_delayed_work(&(priv->i5100_scrubbing),
971 I5100_SCRUB_REFRESH_RATE); 968 I5100_SCRUB_REFRESH_RATE);
972 } 969 }
973 970
974 i5100_init_dimm_layout(pdev, mci); 971 i5100_init_dimm_layout(pdev, mci);
975 i5100_init_interleaving(pdev, mci); 972 i5100_init_interleaving(pdev, mci);
976 973
977 mci->mtype_cap = MEM_FLAG_FB_DDR2; 974 mci->mtype_cap = MEM_FLAG_FB_DDR2;
978 mci->edac_ctl_cap = EDAC_FLAG_SECDED; 975 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
979 mci->edac_cap = EDAC_FLAG_SECDED; 976 mci->edac_cap = EDAC_FLAG_SECDED;
980 mci->mod_name = "i5100_edac.c"; 977 mci->mod_name = "i5100_edac.c";
981 mci->mod_ver = "not versioned"; 978 mci->mod_ver = "not versioned";
982 mci->ctl_name = "i5100"; 979 mci->ctl_name = "i5100";
983 mci->dev_name = pci_name(pdev); 980 mci->dev_name = pci_name(pdev);
984 mci->ctl_page_to_phys = NULL; 981 mci->ctl_page_to_phys = NULL;
985 982
986 mci->edac_check = i5100_check_error; 983 mci->edac_check = i5100_check_error;
987 mci->set_sdram_scrub_rate = i5100_set_scrub_rate; 984 mci->set_sdram_scrub_rate = i5100_set_scrub_rate;
988 mci->get_sdram_scrub_rate = i5100_get_scrub_rate; 985 mci->get_sdram_scrub_rate = i5100_get_scrub_rate;
989 986
990 i5100_init_csrows(mci); 987 i5100_init_csrows(mci);
991 988
992 /* this strange construction seems to be in every driver, dunno why */ 989 /* this strange construction seems to be in every driver, dunno why */
993 switch (edac_op_state) { 990 switch (edac_op_state) {
994 case EDAC_OPSTATE_POLL: 991 case EDAC_OPSTATE_POLL:
995 case EDAC_OPSTATE_NMI: 992 case EDAC_OPSTATE_NMI:
996 break; 993 break;
997 default: 994 default:
998 edac_op_state = EDAC_OPSTATE_POLL; 995 edac_op_state = EDAC_OPSTATE_POLL;
999 break; 996 break;
1000 } 997 }
1001 998
1002 if (edac_mc_add_mc(mci)) { 999 if (edac_mc_add_mc(mci)) {
1003 ret = -ENODEV; 1000 ret = -ENODEV;
1004 goto bail_scrub; 1001 goto bail_scrub;
1005 } 1002 }
1006 1003
1007 return ret; 1004 return ret;
1008 1005
1009 bail_scrub: 1006 bail_scrub:
1010 priv->scrub_enable = 0; 1007 priv->scrub_enable = 0;
1011 cancel_delayed_work_sync(&(priv->i5100_scrubbing)); 1008 cancel_delayed_work_sync(&(priv->i5100_scrubbing));
1012 edac_mc_free(mci); 1009 edac_mc_free(mci);
1013 1010
1014 bail_disable_ch1: 1011 bail_disable_ch1:
1015 pci_disable_device(ch1mm); 1012 pci_disable_device(ch1mm);
1016 1013
1017 bail_ch1: 1014 bail_ch1:
1018 pci_dev_put(ch1mm); 1015 pci_dev_put(ch1mm);
1019 1016
1020 bail_disable_ch0: 1017 bail_disable_ch0:
1021 pci_disable_device(ch0mm); 1018 pci_disable_device(ch0mm);
1022 1019
1023 bail_ch0: 1020 bail_ch0:
1024 pci_dev_put(ch0mm); 1021 pci_dev_put(ch0mm);
1025 1022
1026 bail_pdev: 1023 bail_pdev:
1027 pci_disable_device(pdev); 1024 pci_disable_device(pdev);
1028 1025
1029 bail: 1026 bail:
1030 return ret; 1027 return ret;
1031 } 1028 }
1032 1029
1033 static void __devexit i5100_remove_one(struct pci_dev *pdev) 1030 static void __devexit i5100_remove_one(struct pci_dev *pdev)
1034 { 1031 {
1035 struct mem_ctl_info *mci; 1032 struct mem_ctl_info *mci;
1036 struct i5100_priv *priv; 1033 struct i5100_priv *priv;
1037 1034
1038 mci = edac_mc_del_mc(&pdev->dev); 1035 mci = edac_mc_del_mc(&pdev->dev);
1039 1036
1040 if (!mci) 1037 if (!mci)
1041 return; 1038 return;
1042 1039
1043 priv = mci->pvt_info; 1040 priv = mci->pvt_info;
1044 1041
1045 priv->scrub_enable = 0; 1042 priv->scrub_enable = 0;
1046 cancel_delayed_work_sync(&(priv->i5100_scrubbing)); 1043 cancel_delayed_work_sync(&(priv->i5100_scrubbing));
1047 1044
1048 pci_disable_device(pdev); 1045 pci_disable_device(pdev);
1049 pci_disable_device(priv->ch0mm); 1046 pci_disable_device(priv->ch0mm);
1050 pci_disable_device(priv->ch1mm); 1047 pci_disable_device(priv->ch1mm);
1051 pci_dev_put(priv->ch0mm); 1048 pci_dev_put(priv->ch0mm);
1052 pci_dev_put(priv->ch1mm); 1049 pci_dev_put(priv->ch1mm);
1053 1050
1054 edac_mc_free(mci); 1051 edac_mc_free(mci);
1055 } 1052 }
1056 1053
1057 static const struct pci_device_id i5100_pci_tbl[] __devinitdata = { 1054 static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
1058 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */ 1055 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
1059 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) }, 1056 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
1060 { 0, } 1057 { 0, }
1061 }; 1058 };
1062 MODULE_DEVICE_TABLE(pci, i5100_pci_tbl); 1059 MODULE_DEVICE_TABLE(pci, i5100_pci_tbl);
1063 1060
1064 static struct pci_driver i5100_driver = { 1061 static struct pci_driver i5100_driver = {
1065 .name = KBUILD_BASENAME, 1062 .name = KBUILD_BASENAME,
1066 .probe = i5100_init_one, 1063 .probe = i5100_init_one,
1067 .remove = __devexit_p(i5100_remove_one), 1064 .remove = __devexit_p(i5100_remove_one),
1068 .id_table = i5100_pci_tbl, 1065 .id_table = i5100_pci_tbl,
1069 }; 1066 };
1070 1067
1071 static int __init i5100_init(void) 1068 static int __init i5100_init(void)
1072 { 1069 {
1073 int pci_rc; 1070 int pci_rc;
1074 1071
1075 pci_rc = pci_register_driver(&i5100_driver); 1072 pci_rc = pci_register_driver(&i5100_driver);
1076 1073
1077 return (pci_rc < 0) ? pci_rc : 0; 1074 return (pci_rc < 0) ? pci_rc : 0;
1078 } 1075 }
1079 1076
1080 static void __exit i5100_exit(void) 1077 static void __exit i5100_exit(void)
1081 { 1078 {
1082 pci_unregister_driver(&i5100_driver); 1079 pci_unregister_driver(&i5100_driver);
1083 } 1080 }
1084 1081
1085 module_init(i5100_init); 1082 module_init(i5100_init);
1086 module_exit(i5100_exit); 1083 module_exit(i5100_exit);
1087 1084
1088 MODULE_LICENSE("GPL"); 1085 MODULE_LICENSE("GPL");
1089 MODULE_AUTHOR 1086 MODULE_AUTHOR
1090 ("Arthur Jones <ajones@riverbed.com>"); 1087 ("Arthur Jones <ajones@riverbed.com>");
1091 MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers"); 1088 MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers");
1092 1089