Commit 870897a5ab60a6afeba0a7eff42d21faf79edf33

Authored by Jason Uhlenkott
Committed by Linus Torvalds
1 parent 7ed31e0fa0

drivers/edac/i3000: document type promotion

By popular request, add a comment documenting the implicit type promotion
here.

Signed-off-by: Jason Uhlenkott <juhlenko@akamai.com>
Signed-off-by: Doug Thompson <dougthompson@xmission.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 7 additions and 0 deletions Inline Diff

drivers/edac/i3000_edac.c
1 /* 1 /*
2 * Intel 3000/3010 Memory Controller kernel module 2 * Intel 3000/3010 Memory Controller kernel module
3 * Copyright (C) 2007 Akamai Technologies, Inc. 3 * Copyright (C) 2007 Akamai Technologies, Inc.
4 * Shamelessly copied from: 4 * Shamelessly copied from:
5 * Intel D82875P Memory Controller kernel module 5 * Intel D82875P Memory Controller kernel module
6 * (C) 2003 Linux Networx (http://lnxi.com) 6 * (C) 2003 Linux Networx (http://lnxi.com)
7 * 7 *
8 * This file may be distributed under the terms of the 8 * This file may be distributed under the terms of the
9 * GNU General Public License. 9 * GNU General Public License.
10 */ 10 */
11 11
12 #include <linux/module.h> 12 #include <linux/module.h>
13 #include <linux/init.h> 13 #include <linux/init.h>
14 #include <linux/pci.h> 14 #include <linux/pci.h>
15 #include <linux/pci_ids.h> 15 #include <linux/pci_ids.h>
16 #include <linux/slab.h> 16 #include <linux/slab.h>
17 #include <linux/edac.h> 17 #include <linux/edac.h>
18 #include "edac_core.h" 18 #include "edac_core.h"
19 19
20 #define I3000_REVISION "1.1" 20 #define I3000_REVISION "1.1"
21 21
22 #define EDAC_MOD_STR "i3000_edac" 22 #define EDAC_MOD_STR "i3000_edac"
23 23
24 #define I3000_RANKS 8 24 #define I3000_RANKS 8
25 #define I3000_RANKS_PER_CHANNEL 4 25 #define I3000_RANKS_PER_CHANNEL 4
26 #define I3000_CHANNELS 2 26 #define I3000_CHANNELS 2
27 27
28 /* Intel 3000 register addresses - device 0 function 0 - DRAM Controller */ 28 /* Intel 3000 register addresses - device 0 function 0 - DRAM Controller */
29 29
30 #define I3000_MCHBAR 0x44 /* MCH Memory Mapped Register BAR */ 30 #define I3000_MCHBAR 0x44 /* MCH Memory Mapped Register BAR */
31 #define I3000_MCHBAR_MASK 0xffffc000 31 #define I3000_MCHBAR_MASK 0xffffc000
32 #define I3000_MMR_WINDOW_SIZE 16384 32 #define I3000_MMR_WINDOW_SIZE 16384
33 33
34 #define I3000_EDEAP 0x70 /* Extended DRAM Error Address Pointer (8b) 34 #define I3000_EDEAP 0x70 /* Extended DRAM Error Address Pointer (8b)
35 * 35 *
36 * 7:1 reserved 36 * 7:1 reserved
37 * 0 bit 32 of address 37 * 0 bit 32 of address
38 */ 38 */
39 #define I3000_DEAP 0x58 /* DRAM Error Address Pointer (32b) 39 #define I3000_DEAP 0x58 /* DRAM Error Address Pointer (32b)
40 * 40 *
41 * 31:7 address 41 * 31:7 address
42 * 6:1 reserved 42 * 6:1 reserved
43 * 0 Error channel 0/1 43 * 0 Error channel 0/1
44 */ 44 */
45 #define I3000_DEAP_GRAIN (1 << 7) 45 #define I3000_DEAP_GRAIN (1 << 7)
46 46
47 /*
48 * Helper functions to decode the DEAP/EDEAP hardware registers.
49 *
50 * The type promotion here is deliberate; we're deriving an
51 * unsigned long pfn and offset from hardware regs which are u8/u32.
52 */
53
47 static inline unsigned long deap_pfn(u8 edeap, u32 deap) 54 static inline unsigned long deap_pfn(u8 edeap, u32 deap)
48 { 55 {
49 deap >>= PAGE_SHIFT; 56 deap >>= PAGE_SHIFT;
50 deap |= (edeap & 1) << (32 - PAGE_SHIFT); 57 deap |= (edeap & 1) << (32 - PAGE_SHIFT);
51 return deap; 58 return deap;
52 } 59 }
53 60
54 static inline unsigned long deap_offset(u32 deap) 61 static inline unsigned long deap_offset(u32 deap)
55 { 62 {
56 return deap & ~(I3000_DEAP_GRAIN - 1) & ~PAGE_MASK; 63 return deap & ~(I3000_DEAP_GRAIN - 1) & ~PAGE_MASK;
57 } 64 }
58 65
59 static inline int deap_channel(u32 deap) 66 static inline int deap_channel(u32 deap)
60 { 67 {
61 return deap & 1; 68 return deap & 1;
62 } 69 }
63 70
64 #define I3000_DERRSYN 0x5c /* DRAM Error Syndrome (8b) 71 #define I3000_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
65 * 72 *
66 * 7:0 DRAM ECC Syndrome 73 * 7:0 DRAM ECC Syndrome
67 */ 74 */
68 75
69 #define I3000_ERRSTS 0xc8 /* Error Status Register (16b) 76 #define I3000_ERRSTS 0xc8 /* Error Status Register (16b)
70 * 77 *
71 * 15:12 reserved 78 * 15:12 reserved
72 * 11 MCH Thermal Sensor Event 79 * 11 MCH Thermal Sensor Event
73 * for SMI/SCI/SERR 80 * for SMI/SCI/SERR
74 * 10 reserved 81 * 10 reserved
75 * 9 LOCK to non-DRAM Memory Flag (LCKF) 82 * 9 LOCK to non-DRAM Memory Flag (LCKF)
76 * 8 Received Refresh Timeout Flag (RRTOF) 83 * 8 Received Refresh Timeout Flag (RRTOF)
77 * 7:2 reserved 84 * 7:2 reserved
78 * 1 Multi-bit DRAM ECC Error Flag (DMERR) 85 * 1 Multi-bit DRAM ECC Error Flag (DMERR)
79 * 0 Single-bit DRAM ECC Error Flag (DSERR) 86 * 0 Single-bit DRAM ECC Error Flag (DSERR)
80 */ 87 */
81 #define I3000_ERRSTS_BITS 0x0b03 /* bits which indicate errors */ 88 #define I3000_ERRSTS_BITS 0x0b03 /* bits which indicate errors */
82 #define I3000_ERRSTS_UE 0x0002 89 #define I3000_ERRSTS_UE 0x0002
83 #define I3000_ERRSTS_CE 0x0001 90 #define I3000_ERRSTS_CE 0x0001
84 91
85 #define I3000_ERRCMD 0xca /* Error Command (16b) 92 #define I3000_ERRCMD 0xca /* Error Command (16b)
86 * 93 *
87 * 15:12 reserved 94 * 15:12 reserved
88 * 11 SERR on MCH Thermal Sensor Event 95 * 11 SERR on MCH Thermal Sensor Event
89 * (TSESERR) 96 * (TSESERR)
90 * 10 reserved 97 * 10 reserved
91 * 9 SERR on LOCK to non-DRAM Memory 98 * 9 SERR on LOCK to non-DRAM Memory
92 * (LCKERR) 99 * (LCKERR)
93 * 8 SERR on DRAM Refresh Timeout 100 * 8 SERR on DRAM Refresh Timeout
94 * (DRTOERR) 101 * (DRTOERR)
95 * 7:2 reserved 102 * 7:2 reserved
96 * 1 SERR Multi-Bit DRAM ECC Error 103 * 1 SERR Multi-Bit DRAM ECC Error
97 * (DMERR) 104 * (DMERR)
98 * 0 SERR on Single-Bit ECC Error 105 * 0 SERR on Single-Bit ECC Error
99 * (DSERR) 106 * (DSERR)
100 */ 107 */
101 108
102 /* Intel MMIO register space - device 0 function 0 - MMR space */ 109 /* Intel MMIO register space - device 0 function 0 - MMR space */
103 110
104 #define I3000_DRB_SHIFT 25 /* 32MiB grain */ 111 #define I3000_DRB_SHIFT 25 /* 32MiB grain */
105 112
106 #define I3000_C0DRB 0x100 /* Channel 0 DRAM Rank Boundary (8b x 4) 113 #define I3000_C0DRB 0x100 /* Channel 0 DRAM Rank Boundary (8b x 4)
107 * 114 *
108 * 7:0 Channel 0 DRAM Rank Boundary Address 115 * 7:0 Channel 0 DRAM Rank Boundary Address
109 */ 116 */
110 #define I3000_C1DRB 0x180 /* Channel 1 DRAM Rank Boundary (8b x 4) 117 #define I3000_C1DRB 0x180 /* Channel 1 DRAM Rank Boundary (8b x 4)
111 * 118 *
112 * 7:0 Channel 1 DRAM Rank Boundary Address 119 * 7:0 Channel 1 DRAM Rank Boundary Address
113 */ 120 */
114 121
115 #define I3000_C0DRA 0x108 /* Channel 0 DRAM Rank Attribute (8b x 2) 122 #define I3000_C0DRA 0x108 /* Channel 0 DRAM Rank Attribute (8b x 2)
116 * 123 *
117 * 7 reserved 124 * 7 reserved
118 * 6:4 DRAM odd Rank Attribute 125 * 6:4 DRAM odd Rank Attribute
119 * 3 reserved 126 * 3 reserved
120 * 2:0 DRAM even Rank Attribute 127 * 2:0 DRAM even Rank Attribute
121 * 128 *
122 * Each attribute defines the page 129 * Each attribute defines the page
123 * size of the corresponding rank: 130 * size of the corresponding rank:
124 * 000: unpopulated 131 * 000: unpopulated
125 * 001: reserved 132 * 001: reserved
126 * 010: 4 KB 133 * 010: 4 KB
127 * 011: 8 KB 134 * 011: 8 KB
128 * 100: 16 KB 135 * 100: 16 KB
129 * Others: reserved 136 * Others: reserved
130 */ 137 */
131 #define I3000_C1DRA 0x188 /* Channel 1 DRAM Rank Attribute (8b x 2) */ 138 #define I3000_C1DRA 0x188 /* Channel 1 DRAM Rank Attribute (8b x 2) */
132 139
133 static inline unsigned char odd_rank_attrib(unsigned char dra) 140 static inline unsigned char odd_rank_attrib(unsigned char dra)
134 { 141 {
135 return (dra & 0x70) >> 4; 142 return (dra & 0x70) >> 4;
136 } 143 }
137 144
138 static inline unsigned char even_rank_attrib(unsigned char dra) 145 static inline unsigned char even_rank_attrib(unsigned char dra)
139 { 146 {
140 return dra & 0x07; 147 return dra & 0x07;
141 } 148 }
142 149
143 #define I3000_C0DRC0 0x120 /* DRAM Controller Mode 0 (32b) 150 #define I3000_C0DRC0 0x120 /* DRAM Controller Mode 0 (32b)
144 * 151 *
145 * 31:30 reserved 152 * 31:30 reserved
146 * 29 Initialization Complete (IC) 153 * 29 Initialization Complete (IC)
147 * 28:11 reserved 154 * 28:11 reserved
148 * 10:8 Refresh Mode Select (RMS) 155 * 10:8 Refresh Mode Select (RMS)
149 * 7 reserved 156 * 7 reserved
150 * 6:4 Mode Select (SMS) 157 * 6:4 Mode Select (SMS)
151 * 3:2 reserved 158 * 3:2 reserved
152 * 1:0 DRAM Type (DT) 159 * 1:0 DRAM Type (DT)
153 */ 160 */
154 161
155 #define I3000_C0DRC1 0x124 /* DRAM Controller Mode 1 (32b) 162 #define I3000_C0DRC1 0x124 /* DRAM Controller Mode 1 (32b)
156 * 163 *
157 * 31 Enhanced Addressing Enable (ENHADE) 164 * 31 Enhanced Addressing Enable (ENHADE)
158 * 30:0 reserved 165 * 30:0 reserved
159 */ 166 */
160 167
161 enum i3000p_chips { 168 enum i3000p_chips {
162 I3000 = 0, 169 I3000 = 0,
163 }; 170 };
164 171
165 struct i3000_dev_info { 172 struct i3000_dev_info {
166 const char *ctl_name; 173 const char *ctl_name;
167 }; 174 };
168 175
169 struct i3000_error_info { 176 struct i3000_error_info {
170 u16 errsts; 177 u16 errsts;
171 u8 derrsyn; 178 u8 derrsyn;
172 u8 edeap; 179 u8 edeap;
173 u32 deap; 180 u32 deap;
174 u16 errsts2; 181 u16 errsts2;
175 }; 182 };
176 183
177 static const struct i3000_dev_info i3000_devs[] = { 184 static const struct i3000_dev_info i3000_devs[] = {
178 [I3000] = { 185 [I3000] = {
179 .ctl_name = "i3000"}, 186 .ctl_name = "i3000"},
180 }; 187 };
181 188
182 static struct pci_dev *mci_pdev; 189 static struct pci_dev *mci_pdev;
183 static int i3000_registered = 1; 190 static int i3000_registered = 1;
184 static struct edac_pci_ctl_info *i3000_pci; 191 static struct edac_pci_ctl_info *i3000_pci;
185 192
186 static void i3000_get_error_info(struct mem_ctl_info *mci, 193 static void i3000_get_error_info(struct mem_ctl_info *mci,
187 struct i3000_error_info *info) 194 struct i3000_error_info *info)
188 { 195 {
189 struct pci_dev *pdev; 196 struct pci_dev *pdev;
190 197
191 pdev = to_pci_dev(mci->dev); 198 pdev = to_pci_dev(mci->dev);
192 199
193 /* 200 /*
194 * This is a mess because there is no atomic way to read all the 201 * This is a mess because there is no atomic way to read all the
195 * registers at once and the registers can transition from CE being 202 * registers at once and the registers can transition from CE being
196 * overwritten by UE. 203 * overwritten by UE.
197 */ 204 */
198 pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts); 205 pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts);
199 if (!(info->errsts & I3000_ERRSTS_BITS)) 206 if (!(info->errsts & I3000_ERRSTS_BITS))
200 return; 207 return;
201 pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap); 208 pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
202 pci_read_config_dword(pdev, I3000_DEAP, &info->deap); 209 pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
203 pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn); 210 pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
204 pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts2); 211 pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts2);
205 212
206 /* 213 /*
207 * If the error is the same for both reads then the first set 214 * If the error is the same for both reads then the first set
208 * of reads is valid. If there is a change then there is a CE 215 * of reads is valid. If there is a change then there is a CE
209 * with no info and the second set of reads is valid and 216 * with no info and the second set of reads is valid and
210 * should be UE info. 217 * should be UE info.
211 */ 218 */
212 if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) { 219 if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
213 pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap); 220 pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
214 pci_read_config_dword(pdev, I3000_DEAP, &info->deap); 221 pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
215 pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn); 222 pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
216 } 223 }
217 224
218 /* 225 /*
219 * Clear any error bits. 226 * Clear any error bits.
220 * (Yes, we really clear bits by writing 1 to them.) 227 * (Yes, we really clear bits by writing 1 to them.)
221 */ 228 */
222 pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS, 229 pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
223 I3000_ERRSTS_BITS); 230 I3000_ERRSTS_BITS);
224 } 231 }
225 232
226 static int i3000_process_error_info(struct mem_ctl_info *mci, 233 static int i3000_process_error_info(struct mem_ctl_info *mci,
227 struct i3000_error_info *info, 234 struct i3000_error_info *info,
228 int handle_errors) 235 int handle_errors)
229 { 236 {
230 int row, multi_chan, channel; 237 int row, multi_chan, channel;
231 unsigned long pfn, offset; 238 unsigned long pfn, offset;
232 239
233 multi_chan = mci->csrows[0].nr_channels - 1; 240 multi_chan = mci->csrows[0].nr_channels - 1;
234 241
235 if (!(info->errsts & I3000_ERRSTS_BITS)) 242 if (!(info->errsts & I3000_ERRSTS_BITS))
236 return 0; 243 return 0;
237 244
238 if (!handle_errors) 245 if (!handle_errors)
239 return 1; 246 return 1;
240 247
241 if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) { 248 if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
242 edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); 249 edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
243 info->errsts = info->errsts2; 250 info->errsts = info->errsts2;
244 } 251 }
245 252
246 pfn = deap_pfn(info->edeap, info->deap); 253 pfn = deap_pfn(info->edeap, info->deap);
247 offset = deap_offset(info->deap); 254 offset = deap_offset(info->deap);
248 channel = deap_channel(info->deap); 255 channel = deap_channel(info->deap);
249 256
250 row = edac_mc_find_csrow_by_page(mci, pfn); 257 row = edac_mc_find_csrow_by_page(mci, pfn);
251 258
252 if (info->errsts & I3000_ERRSTS_UE) 259 if (info->errsts & I3000_ERRSTS_UE)
253 edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE"); 260 edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE");
254 else 261 else
255 edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row, 262 edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row,
256 multi_chan ? channel : 0, "i3000 CE"); 263 multi_chan ? channel : 0, "i3000 CE");
257 264
258 return 1; 265 return 1;
259 } 266 }
260 267
261 static void i3000_check(struct mem_ctl_info *mci) 268 static void i3000_check(struct mem_ctl_info *mci)
262 { 269 {
263 struct i3000_error_info info; 270 struct i3000_error_info info;
264 271
265 debugf1("MC%d: %s()\n", mci->mc_idx, __func__); 272 debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
266 i3000_get_error_info(mci, &info); 273 i3000_get_error_info(mci, &info);
267 i3000_process_error_info(mci, &info, 1); 274 i3000_process_error_info(mci, &info, 1);
268 } 275 }
269 276
270 static int i3000_is_interleaved(const unsigned char *c0dra, 277 static int i3000_is_interleaved(const unsigned char *c0dra,
271 const unsigned char *c1dra, 278 const unsigned char *c1dra,
272 const unsigned char *c0drb, 279 const unsigned char *c0drb,
273 const unsigned char *c1drb) 280 const unsigned char *c1drb)
274 { 281 {
275 int i; 282 int i;
276 283
277 /* 284 /*
278 * If the channels aren't populated identically then 285 * If the channels aren't populated identically then
279 * we're not interleaved. 286 * we're not interleaved.
280 */ 287 */
281 for (i = 0; i < I3000_RANKS_PER_CHANNEL / 2; i++) 288 for (i = 0; i < I3000_RANKS_PER_CHANNEL / 2; i++)
282 if (odd_rank_attrib(c0dra[i]) != odd_rank_attrib(c1dra[i]) || 289 if (odd_rank_attrib(c0dra[i]) != odd_rank_attrib(c1dra[i]) ||
283 even_rank_attrib(c0dra[i]) != 290 even_rank_attrib(c0dra[i]) !=
284 even_rank_attrib(c1dra[i])) 291 even_rank_attrib(c1dra[i]))
285 return 0; 292 return 0;
286 293
287 /* 294 /*
288 * If the rank boundaries for the two channels are different 295 * If the rank boundaries for the two channels are different
289 * then we're not interleaved. 296 * then we're not interleaved.
290 */ 297 */
291 for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++) 298 for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++)
292 if (c0drb[i] != c1drb[i]) 299 if (c0drb[i] != c1drb[i])
293 return 0; 300 return 0;
294 301
295 return 1; 302 return 1;
296 } 303 }
297 304
298 static int i3000_probe1(struct pci_dev *pdev, int dev_idx) 305 static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
299 { 306 {
300 int rc; 307 int rc;
301 int i; 308 int i;
302 struct mem_ctl_info *mci = NULL; 309 struct mem_ctl_info *mci = NULL;
303 unsigned long last_cumul_size; 310 unsigned long last_cumul_size;
304 int interleaved, nr_channels; 311 int interleaved, nr_channels;
305 unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS]; 312 unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS];
306 unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2]; 313 unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2];
307 unsigned char *c0drb = drb, *c1drb = &drb[I3000_RANKS_PER_CHANNEL]; 314 unsigned char *c0drb = drb, *c1drb = &drb[I3000_RANKS_PER_CHANNEL];
308 unsigned long mchbar; 315 unsigned long mchbar;
309 void __iomem *window; 316 void __iomem *window;
310 317
311 debugf0("MC: %s()\n", __func__); 318 debugf0("MC: %s()\n", __func__);
312 319
313 pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar); 320 pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar);
314 mchbar &= I3000_MCHBAR_MASK; 321 mchbar &= I3000_MCHBAR_MASK;
315 window = ioremap_nocache(mchbar, I3000_MMR_WINDOW_SIZE); 322 window = ioremap_nocache(mchbar, I3000_MMR_WINDOW_SIZE);
316 if (!window) { 323 if (!window) {
317 printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n", 324 printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n",
318 mchbar); 325 mchbar);
319 return -ENODEV; 326 return -ENODEV;
320 } 327 }
321 328
322 switch (edac_op_state) { 329 switch (edac_op_state) {
323 case EDAC_OPSTATE_POLL: 330 case EDAC_OPSTATE_POLL:
324 case EDAC_OPSTATE_NMI: 331 case EDAC_OPSTATE_NMI:
325 break; 332 break;
326 default: 333 default:
327 edac_op_state = EDAC_OPSTATE_POLL; 334 edac_op_state = EDAC_OPSTATE_POLL;
328 break; 335 break;
329 } 336 }
330 337
331 c0dra[0] = readb(window + I3000_C0DRA + 0); /* ranks 0,1 */ 338 c0dra[0] = readb(window + I3000_C0DRA + 0); /* ranks 0,1 */
332 c0dra[1] = readb(window + I3000_C0DRA + 1); /* ranks 2,3 */ 339 c0dra[1] = readb(window + I3000_C0DRA + 1); /* ranks 2,3 */
333 c1dra[0] = readb(window + I3000_C1DRA + 0); /* ranks 0,1 */ 340 c1dra[0] = readb(window + I3000_C1DRA + 0); /* ranks 0,1 */
334 c1dra[1] = readb(window + I3000_C1DRA + 1); /* ranks 2,3 */ 341 c1dra[1] = readb(window + I3000_C1DRA + 1); /* ranks 2,3 */
335 342
336 for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++) { 343 for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++) {
337 c0drb[i] = readb(window + I3000_C0DRB + i); 344 c0drb[i] = readb(window + I3000_C0DRB + i);
338 c1drb[i] = readb(window + I3000_C1DRB + i); 345 c1drb[i] = readb(window + I3000_C1DRB + i);
339 } 346 }
340 347
341 iounmap(window); 348 iounmap(window);
342 349
343 /* 350 /*
344 * Figure out how many channels we have. 351 * Figure out how many channels we have.
345 * 352 *
346 * If we have what the datasheet calls "asymmetric channels" 353 * If we have what the datasheet calls "asymmetric channels"
347 * (essentially the same as what was called "virtual single 354 * (essentially the same as what was called "virtual single
348 * channel mode" in the i82875) then it's a single channel as 355 * channel mode" in the i82875) then it's a single channel as
349 * far as EDAC is concerned. 356 * far as EDAC is concerned.
350 */ 357 */
351 interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb); 358 interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb);
352 nr_channels = interleaved ? 2 : 1; 359 nr_channels = interleaved ? 2 : 1;
353 mci = edac_mc_alloc(0, I3000_RANKS / nr_channels, nr_channels, 0); 360 mci = edac_mc_alloc(0, I3000_RANKS / nr_channels, nr_channels, 0);
354 if (!mci) 361 if (!mci)
355 return -ENOMEM; 362 return -ENOMEM;
356 363
357 debugf3("MC: %s(): init mci\n", __func__); 364 debugf3("MC: %s(): init mci\n", __func__);
358 365
359 mci->dev = &pdev->dev; 366 mci->dev = &pdev->dev;
360 mci->mtype_cap = MEM_FLAG_DDR2; 367 mci->mtype_cap = MEM_FLAG_DDR2;
361 368
362 mci->edac_ctl_cap = EDAC_FLAG_SECDED; 369 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
363 mci->edac_cap = EDAC_FLAG_SECDED; 370 mci->edac_cap = EDAC_FLAG_SECDED;
364 371
365 mci->mod_name = EDAC_MOD_STR; 372 mci->mod_name = EDAC_MOD_STR;
366 mci->mod_ver = I3000_REVISION; 373 mci->mod_ver = I3000_REVISION;
367 mci->ctl_name = i3000_devs[dev_idx].ctl_name; 374 mci->ctl_name = i3000_devs[dev_idx].ctl_name;
368 mci->dev_name = pci_name(pdev); 375 mci->dev_name = pci_name(pdev);
369 mci->edac_check = i3000_check; 376 mci->edac_check = i3000_check;
370 mci->ctl_page_to_phys = NULL; 377 mci->ctl_page_to_phys = NULL;
371 378
372 /* 379 /*
373 * The dram rank boundary (DRB) reg values are boundary addresses 380 * The dram rank boundary (DRB) reg values are boundary addresses
374 * for each DRAM rank with a granularity of 32MB. DRB regs are 381 * for each DRAM rank with a granularity of 32MB. DRB regs are
375 * cumulative; the last one will contain the total memory 382 * cumulative; the last one will contain the total memory
376 * contained in all ranks. 383 * contained in all ranks.
377 * 384 *
378 * If we're in interleaved mode then we're only walking through 385 * If we're in interleaved mode then we're only walking through
379 * the ranks of controller 0, so we double all the values we see. 386 * the ranks of controller 0, so we double all the values we see.
380 */ 387 */
381 for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) { 388 for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) {
382 u8 value; 389 u8 value;
383 u32 cumul_size; 390 u32 cumul_size;
384 struct csrow_info *csrow = &mci->csrows[i]; 391 struct csrow_info *csrow = &mci->csrows[i];
385 392
386 value = drb[i]; 393 value = drb[i];
387 cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT); 394 cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT);
388 if (interleaved) 395 if (interleaved)
389 cumul_size <<= 1; 396 cumul_size <<= 1;
390 debugf3("MC: %s(): (%d) cumul_size 0x%x\n", 397 debugf3("MC: %s(): (%d) cumul_size 0x%x\n",
391 __func__, i, cumul_size); 398 __func__, i, cumul_size);
392 if (cumul_size == last_cumul_size) { 399 if (cumul_size == last_cumul_size) {
393 csrow->mtype = MEM_EMPTY; 400 csrow->mtype = MEM_EMPTY;
394 continue; 401 continue;
395 } 402 }
396 403
397 csrow->first_page = last_cumul_size; 404 csrow->first_page = last_cumul_size;
398 csrow->last_page = cumul_size - 1; 405 csrow->last_page = cumul_size - 1;
399 csrow->nr_pages = cumul_size - last_cumul_size; 406 csrow->nr_pages = cumul_size - last_cumul_size;
400 last_cumul_size = cumul_size; 407 last_cumul_size = cumul_size;
401 csrow->grain = I3000_DEAP_GRAIN; 408 csrow->grain = I3000_DEAP_GRAIN;
402 csrow->mtype = MEM_DDR2; 409 csrow->mtype = MEM_DDR2;
403 csrow->dtype = DEV_UNKNOWN; 410 csrow->dtype = DEV_UNKNOWN;
404 csrow->edac_mode = EDAC_UNKNOWN; 411 csrow->edac_mode = EDAC_UNKNOWN;
405 } 412 }
406 413
407 /* 414 /*
408 * Clear any error bits. 415 * Clear any error bits.
409 * (Yes, we really clear bits by writing 1 to them.) 416 * (Yes, we really clear bits by writing 1 to them.)
410 */ 417 */
411 pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS, 418 pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
412 I3000_ERRSTS_BITS); 419 I3000_ERRSTS_BITS);
413 420
414 rc = -ENODEV; 421 rc = -ENODEV;
415 if (edac_mc_add_mc(mci)) { 422 if (edac_mc_add_mc(mci)) {
416 debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); 423 debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
417 goto fail; 424 goto fail;
418 } 425 }
419 426
420 /* allocating generic PCI control info */ 427 /* allocating generic PCI control info */
421 i3000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); 428 i3000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
422 if (!i3000_pci) { 429 if (!i3000_pci) {
423 printk(KERN_WARNING 430 printk(KERN_WARNING
424 "%s(): Unable to create PCI control\n", 431 "%s(): Unable to create PCI control\n",
425 __func__); 432 __func__);
426 printk(KERN_WARNING 433 printk(KERN_WARNING
427 "%s(): PCI error report via EDAC not setup\n", 434 "%s(): PCI error report via EDAC not setup\n",
428 __func__); 435 __func__);
429 } 436 }
430 437
431 /* get this far and it's successful */ 438 /* get this far and it's successful */
432 debugf3("MC: %s(): success\n", __func__); 439 debugf3("MC: %s(): success\n", __func__);
433 return 0; 440 return 0;
434 441
435 fail: 442 fail:
436 if (mci) 443 if (mci)
437 edac_mc_free(mci); 444 edac_mc_free(mci);
438 445
439 return rc; 446 return rc;
440 } 447 }
441 448
442 /* returns count (>= 0), or negative on error */ 449 /* returns count (>= 0), or negative on error */
443 static int __devinit i3000_init_one(struct pci_dev *pdev, 450 static int __devinit i3000_init_one(struct pci_dev *pdev,
444 const struct pci_device_id *ent) 451 const struct pci_device_id *ent)
445 { 452 {
446 int rc; 453 int rc;
447 454
448 debugf0("MC: %s()\n", __func__); 455 debugf0("MC: %s()\n", __func__);
449 456
450 if (pci_enable_device(pdev) < 0) 457 if (pci_enable_device(pdev) < 0)
451 return -EIO; 458 return -EIO;
452 459
453 rc = i3000_probe1(pdev, ent->driver_data); 460 rc = i3000_probe1(pdev, ent->driver_data);
454 if (!mci_pdev) 461 if (!mci_pdev)
455 mci_pdev = pci_dev_get(pdev); 462 mci_pdev = pci_dev_get(pdev);
456 463
457 return rc; 464 return rc;
458 } 465 }
459 466
460 static void __devexit i3000_remove_one(struct pci_dev *pdev) 467 static void __devexit i3000_remove_one(struct pci_dev *pdev)
461 { 468 {
462 struct mem_ctl_info *mci; 469 struct mem_ctl_info *mci;
463 470
464 debugf0("%s()\n", __func__); 471 debugf0("%s()\n", __func__);
465 472
466 if (i3000_pci) 473 if (i3000_pci)
467 edac_pci_release_generic_ctl(i3000_pci); 474 edac_pci_release_generic_ctl(i3000_pci);
468 475
469 mci = edac_mc_del_mc(&pdev->dev); 476 mci = edac_mc_del_mc(&pdev->dev);
470 if (!mci) 477 if (!mci)
471 return; 478 return;
472 479
473 edac_mc_free(mci); 480 edac_mc_free(mci);
474 } 481 }
475 482
476 static const struct pci_device_id i3000_pci_tbl[] __devinitdata = { 483 static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
477 { 484 {
478 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 485 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
479 I3000}, 486 I3000},
480 { 487 {
481 0, 488 0,
482 } /* 0 terminated list. */ 489 } /* 0 terminated list. */
483 }; 490 };
484 491
485 MODULE_DEVICE_TABLE(pci, i3000_pci_tbl); 492 MODULE_DEVICE_TABLE(pci, i3000_pci_tbl);
486 493
487 static struct pci_driver i3000_driver = { 494 static struct pci_driver i3000_driver = {
488 .name = EDAC_MOD_STR, 495 .name = EDAC_MOD_STR,
489 .probe = i3000_init_one, 496 .probe = i3000_init_one,
490 .remove = __devexit_p(i3000_remove_one), 497 .remove = __devexit_p(i3000_remove_one),
491 .id_table = i3000_pci_tbl, 498 .id_table = i3000_pci_tbl,
492 }; 499 };
493 500
494 static int __init i3000_init(void) 501 static int __init i3000_init(void)
495 { 502 {
496 int pci_rc; 503 int pci_rc;
497 504
498 debugf3("MC: %s()\n", __func__); 505 debugf3("MC: %s()\n", __func__);
499 pci_rc = pci_register_driver(&i3000_driver); 506 pci_rc = pci_register_driver(&i3000_driver);
500 if (pci_rc < 0) 507 if (pci_rc < 0)
501 goto fail0; 508 goto fail0;
502 509
503 if (!mci_pdev) { 510 if (!mci_pdev) {
504 i3000_registered = 0; 511 i3000_registered = 0;
505 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 512 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
506 PCI_DEVICE_ID_INTEL_3000_HB, NULL); 513 PCI_DEVICE_ID_INTEL_3000_HB, NULL);
507 if (!mci_pdev) { 514 if (!mci_pdev) {
508 debugf0("i3000 pci_get_device fail\n"); 515 debugf0("i3000 pci_get_device fail\n");
509 pci_rc = -ENODEV; 516 pci_rc = -ENODEV;
510 goto fail1; 517 goto fail1;
511 } 518 }
512 519
513 pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl); 520 pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl);
514 if (pci_rc < 0) { 521 if (pci_rc < 0) {
515 debugf0("i3000 init fail\n"); 522 debugf0("i3000 init fail\n");
516 pci_rc = -ENODEV; 523 pci_rc = -ENODEV;
517 goto fail1; 524 goto fail1;
518 } 525 }
519 } 526 }
520 527
521 return 0; 528 return 0;
522 529
523 fail1: 530 fail1:
524 pci_unregister_driver(&i3000_driver); 531 pci_unregister_driver(&i3000_driver);
525 532
526 fail0: 533 fail0:
527 if (mci_pdev) 534 if (mci_pdev)
528 pci_dev_put(mci_pdev); 535 pci_dev_put(mci_pdev);
529 536
530 return pci_rc; 537 return pci_rc;
531 } 538 }
532 539
533 static void __exit i3000_exit(void) 540 static void __exit i3000_exit(void)
534 { 541 {
535 debugf3("MC: %s()\n", __func__); 542 debugf3("MC: %s()\n", __func__);
536 543
537 pci_unregister_driver(&i3000_driver); 544 pci_unregister_driver(&i3000_driver);
538 if (!i3000_registered) { 545 if (!i3000_registered) {
539 i3000_remove_one(mci_pdev); 546 i3000_remove_one(mci_pdev);
540 pci_dev_put(mci_pdev); 547 pci_dev_put(mci_pdev);
541 } 548 }
542 } 549 }
543 550
544 module_init(i3000_init); 551 module_init(i3000_init);
545 module_exit(i3000_exit); 552 module_exit(i3000_exit);
546 553
547 MODULE_LICENSE("GPL"); 554 MODULE_LICENSE("GPL");
548 MODULE_AUTHOR("Akamai Technologies Arthur Ulfeldt/Jason Uhlenkott"); 555 MODULE_AUTHOR("Akamai Technologies Arthur Ulfeldt/Jason Uhlenkott");
549 MODULE_DESCRIPTION("MC support for Intel 3000 memory hub controllers"); 556 MODULE_DESCRIPTION("MC support for Intel 3000 memory hub controllers");
550 557
551 module_param(edac_op_state, int, 0444); 558 module_param(edac_op_state, int, 0444);
552 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 559 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
553 560