Commit d0585cd815faef50ce3d12cbe173438eb4d81eb8

Authored by Andy Lutomirski
Committed by Mauro Carvalho Chehab
1 parent 68939df1d7

sb_edac: Claim a different PCI device

sb_edac controls a large number of different PCI functions.  Rather
than registering as a normal PCI driver for all of them, it
registers for just one so that it gets probed and, at probe time, it
looks for all the others.

Coincidentally, the device it registers for also contains the SMBUS
registers, so the PCI core will refuse to probe both sb_edac and a
future iMC SMBUS driver.  The drivers don't actually conflict, so
just change sb_edac's device table to probe a different device.

An alternative fix would be to merge the two drivers, but sb_edac
will also refuse to load on non-ECC systems, whereas i2c_imc would
still be useful without ECC.

The only user-visible change should be that sb_edac appears to bind
a different device.

Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Cc: Rui Wang <ruiv.wang@gmail.com>
Acked-by: Aristeu Rozanski <aris@redhat.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>

Showing 1 changed file with 1 additions and 1 deletions Inline Diff

drivers/edac/sb_edac.c
1 /* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module 1 /* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module
2 * 2 *
3 * This driver supports the memory controllers found on the Intel 3 * This driver supports the memory controllers found on the Intel
4 * processor family Sandy Bridge. 4 * processor family Sandy Bridge.
5 * 5 *
6 * This file may be distributed under the terms of the 6 * This file may be distributed under the terms of the
7 * GNU General Public License version 2 only. 7 * GNU General Public License version 2 only.
8 * 8 *
9 * Copyright (c) 2011 by: 9 * Copyright (c) 2011 by:
10 * Mauro Carvalho Chehab 10 * Mauro Carvalho Chehab
11 */ 11 */
12 12
13 #include <linux/module.h> 13 #include <linux/module.h>
14 #include <linux/init.h> 14 #include <linux/init.h>
15 #include <linux/pci.h> 15 #include <linux/pci.h>
16 #include <linux/pci_ids.h> 16 #include <linux/pci_ids.h>
17 #include <linux/slab.h> 17 #include <linux/slab.h>
18 #include <linux/delay.h> 18 #include <linux/delay.h>
19 #include <linux/edac.h> 19 #include <linux/edac.h>
20 #include <linux/mmzone.h> 20 #include <linux/mmzone.h>
21 #include <linux/smp.h> 21 #include <linux/smp.h>
22 #include <linux/bitmap.h> 22 #include <linux/bitmap.h>
23 #include <linux/math64.h> 23 #include <linux/math64.h>
24 #include <asm/processor.h> 24 #include <asm/processor.h>
25 #include <asm/mce.h> 25 #include <asm/mce.h>
26 26
27 #include "edac_core.h" 27 #include "edac_core.h"
28 28
29 /* Static vars */ 29 /* Static vars */
30 static LIST_HEAD(sbridge_edac_list); 30 static LIST_HEAD(sbridge_edac_list);
31 static DEFINE_MUTEX(sbridge_edac_lock); 31 static DEFINE_MUTEX(sbridge_edac_lock);
32 static int probed; 32 static int probed;
33 33
34 /* 34 /*
35 * Alter this version for the module when modifications are made 35 * Alter this version for the module when modifications are made
36 */ 36 */
37 #define SBRIDGE_REVISION " Ver: 1.1.0 " 37 #define SBRIDGE_REVISION " Ver: 1.1.0 "
38 #define EDAC_MOD_STR "sbridge_edac" 38 #define EDAC_MOD_STR "sbridge_edac"
39 39
40 /* 40 /*
41 * Debug macros 41 * Debug macros
42 */ 42 */
43 #define sbridge_printk(level, fmt, arg...) \ 43 #define sbridge_printk(level, fmt, arg...) \
44 edac_printk(level, "sbridge", fmt, ##arg) 44 edac_printk(level, "sbridge", fmt, ##arg)
45 45
46 #define sbridge_mc_printk(mci, level, fmt, arg...) \ 46 #define sbridge_mc_printk(mci, level, fmt, arg...) \
47 edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg) 47 edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
48 48
49 /* 49 /*
50 * Get a bit field at register value <v>, from bit <lo> to bit <hi> 50 * Get a bit field at register value <v>, from bit <lo> to bit <hi>
51 */ 51 */
52 #define GET_BITFIELD(v, lo, hi) \ 52 #define GET_BITFIELD(v, lo, hi) \
53 (((v) & GENMASK_ULL(hi, lo)) >> (lo)) 53 (((v) & GENMASK_ULL(hi, lo)) >> (lo))
54 54
55 /* Devices 12 Function 6, Offsets 0x80 to 0xcc */ 55 /* Devices 12 Function 6, Offsets 0x80 to 0xcc */
56 static const u32 sbridge_dram_rule[] = { 56 static const u32 sbridge_dram_rule[] = {
57 0x80, 0x88, 0x90, 0x98, 0xa0, 57 0x80, 0x88, 0x90, 0x98, 0xa0,
58 0xa8, 0xb0, 0xb8, 0xc0, 0xc8, 58 0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
59 }; 59 };
60 60
61 static const u32 ibridge_dram_rule[] = { 61 static const u32 ibridge_dram_rule[] = {
62 0x60, 0x68, 0x70, 0x78, 0x80, 62 0x60, 0x68, 0x70, 0x78, 0x80,
63 0x88, 0x90, 0x98, 0xa0, 0xa8, 63 0x88, 0x90, 0x98, 0xa0, 0xa8,
64 0xb0, 0xb8, 0xc0, 0xc8, 0xd0, 64 0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
65 0xd8, 0xe0, 0xe8, 0xf0, 0xf8, 65 0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
66 }; 66 };
67 67
68 #define SAD_LIMIT(reg) ((GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff) 68 #define SAD_LIMIT(reg) ((GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff)
69 #define DRAM_ATTR(reg) GET_BITFIELD(reg, 2, 3) 69 #define DRAM_ATTR(reg) GET_BITFIELD(reg, 2, 3)
70 #define INTERLEAVE_MODE(reg) GET_BITFIELD(reg, 1, 1) 70 #define INTERLEAVE_MODE(reg) GET_BITFIELD(reg, 1, 1)
71 #define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0) 71 #define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
72 #define A7MODE(reg) GET_BITFIELD(reg, 26, 26) 72 #define A7MODE(reg) GET_BITFIELD(reg, 26, 26)
73 73
74 static char *get_dram_attr(u32 reg) 74 static char *get_dram_attr(u32 reg)
75 { 75 {
76 switch(DRAM_ATTR(reg)) { 76 switch(DRAM_ATTR(reg)) {
77 case 0: 77 case 0:
78 return "DRAM"; 78 return "DRAM";
79 case 1: 79 case 1:
80 return "MMCFG"; 80 return "MMCFG";
81 case 2: 81 case 2:
82 return "NXM"; 82 return "NXM";
83 default: 83 default:
84 return "unknown"; 84 return "unknown";
85 } 85 }
86 } 86 }
87 87
88 static const u32 sbridge_interleave_list[] = { 88 static const u32 sbridge_interleave_list[] = {
89 0x84, 0x8c, 0x94, 0x9c, 0xa4, 89 0x84, 0x8c, 0x94, 0x9c, 0xa4,
90 0xac, 0xb4, 0xbc, 0xc4, 0xcc, 90 0xac, 0xb4, 0xbc, 0xc4, 0xcc,
91 }; 91 };
92 92
93 static const u32 ibridge_interleave_list[] = { 93 static const u32 ibridge_interleave_list[] = {
94 0x64, 0x6c, 0x74, 0x7c, 0x84, 94 0x64, 0x6c, 0x74, 0x7c, 0x84,
95 0x8c, 0x94, 0x9c, 0xa4, 0xac, 95 0x8c, 0x94, 0x9c, 0xa4, 0xac,
96 0xb4, 0xbc, 0xc4, 0xcc, 0xd4, 96 0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
97 0xdc, 0xe4, 0xec, 0xf4, 0xfc, 97 0xdc, 0xe4, 0xec, 0xf4, 0xfc,
98 }; 98 };
99 99
100 struct interleave_pkg { 100 struct interleave_pkg {
101 unsigned char start; 101 unsigned char start;
102 unsigned char end; 102 unsigned char end;
103 }; 103 };
104 104
105 static const struct interleave_pkg sbridge_interleave_pkg[] = { 105 static const struct interleave_pkg sbridge_interleave_pkg[] = {
106 { 0, 2 }, 106 { 0, 2 },
107 { 3, 5 }, 107 { 3, 5 },
108 { 8, 10 }, 108 { 8, 10 },
109 { 11, 13 }, 109 { 11, 13 },
110 { 16, 18 }, 110 { 16, 18 },
111 { 19, 21 }, 111 { 19, 21 },
112 { 24, 26 }, 112 { 24, 26 },
113 { 27, 29 }, 113 { 27, 29 },
114 }; 114 };
115 115
116 static const struct interleave_pkg ibridge_interleave_pkg[] = { 116 static const struct interleave_pkg ibridge_interleave_pkg[] = {
117 { 0, 3 }, 117 { 0, 3 },
118 { 4, 7 }, 118 { 4, 7 },
119 { 8, 11 }, 119 { 8, 11 },
120 { 12, 15 }, 120 { 12, 15 },
121 { 16, 19 }, 121 { 16, 19 },
122 { 20, 23 }, 122 { 20, 23 },
123 { 24, 27 }, 123 { 24, 27 },
124 { 28, 31 }, 124 { 28, 31 },
125 }; 125 };
126 126
127 static inline int sad_pkg(const struct interleave_pkg *table, u32 reg, 127 static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
128 int interleave) 128 int interleave)
129 { 129 {
130 return GET_BITFIELD(reg, table[interleave].start, 130 return GET_BITFIELD(reg, table[interleave].start,
131 table[interleave].end); 131 table[interleave].end);
132 } 132 }
133 133
134 /* Devices 12 Function 7 */ 134 /* Devices 12 Function 7 */
135 135
136 #define TOLM 0x80 136 #define TOLM 0x80
137 #define TOHM 0x84 137 #define TOHM 0x84
138 #define HASWELL_TOHM_0 0xd4 138 #define HASWELL_TOHM_0 0xd4
139 #define HASWELL_TOHM_1 0xd8 139 #define HASWELL_TOHM_1 0xd8
140 140
141 #define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff) 141 #define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff)
142 #define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff) 142 #define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
143 143
144 /* Device 13 Function 6 */ 144 /* Device 13 Function 6 */
145 145
146 #define SAD_TARGET 0xf0 146 #define SAD_TARGET 0xf0
147 147
148 #define SOURCE_ID(reg) GET_BITFIELD(reg, 9, 11) 148 #define SOURCE_ID(reg) GET_BITFIELD(reg, 9, 11)
149 149
150 #define SAD_CONTROL 0xf4 150 #define SAD_CONTROL 0xf4
151 151
152 /* Device 14 function 0 */ 152 /* Device 14 function 0 */
153 153
154 static const u32 tad_dram_rule[] = { 154 static const u32 tad_dram_rule[] = {
155 0x40, 0x44, 0x48, 0x4c, 155 0x40, 0x44, 0x48, 0x4c,
156 0x50, 0x54, 0x58, 0x5c, 156 0x50, 0x54, 0x58, 0x5c,
157 0x60, 0x64, 0x68, 0x6c, 157 0x60, 0x64, 0x68, 0x6c,
158 }; 158 };
159 #define MAX_TAD ARRAY_SIZE(tad_dram_rule) 159 #define MAX_TAD ARRAY_SIZE(tad_dram_rule)
160 160
161 #define TAD_LIMIT(reg) ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff) 161 #define TAD_LIMIT(reg) ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
162 #define TAD_SOCK(reg) GET_BITFIELD(reg, 10, 11) 162 #define TAD_SOCK(reg) GET_BITFIELD(reg, 10, 11)
163 #define TAD_CH(reg) GET_BITFIELD(reg, 8, 9) 163 #define TAD_CH(reg) GET_BITFIELD(reg, 8, 9)
164 #define TAD_TGT3(reg) GET_BITFIELD(reg, 6, 7) 164 #define TAD_TGT3(reg) GET_BITFIELD(reg, 6, 7)
165 #define TAD_TGT2(reg) GET_BITFIELD(reg, 4, 5) 165 #define TAD_TGT2(reg) GET_BITFIELD(reg, 4, 5)
166 #define TAD_TGT1(reg) GET_BITFIELD(reg, 2, 3) 166 #define TAD_TGT1(reg) GET_BITFIELD(reg, 2, 3)
167 #define TAD_TGT0(reg) GET_BITFIELD(reg, 0, 1) 167 #define TAD_TGT0(reg) GET_BITFIELD(reg, 0, 1)
168 168
169 /* Device 15, function 0 */ 169 /* Device 15, function 0 */
170 170
171 #define MCMTR 0x7c 171 #define MCMTR 0x7c
172 172
173 #define IS_ECC_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 2, 2) 173 #define IS_ECC_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 2, 2)
174 #define IS_LOCKSTEP_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 1, 1) 174 #define IS_LOCKSTEP_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 1, 1)
175 #define IS_CLOSE_PG(mcmtr) GET_BITFIELD(mcmtr, 0, 0) 175 #define IS_CLOSE_PG(mcmtr) GET_BITFIELD(mcmtr, 0, 0)
176 176
177 /* Device 15, function 1 */ 177 /* Device 15, function 1 */
178 178
179 #define RASENABLES 0xac 179 #define RASENABLES 0xac
180 #define IS_MIRROR_ENABLED(reg) GET_BITFIELD(reg, 0, 0) 180 #define IS_MIRROR_ENABLED(reg) GET_BITFIELD(reg, 0, 0)
181 181
182 /* Device 15, functions 2-5 */ 182 /* Device 15, functions 2-5 */
183 183
184 static const int mtr_regs[] = { 184 static const int mtr_regs[] = {
185 0x80, 0x84, 0x88, 185 0x80, 0x84, 0x88,
186 }; 186 };
187 187
188 #define RANK_DISABLE(mtr) GET_BITFIELD(mtr, 16, 19) 188 #define RANK_DISABLE(mtr) GET_BITFIELD(mtr, 16, 19)
189 #define IS_DIMM_PRESENT(mtr) GET_BITFIELD(mtr, 14, 14) 189 #define IS_DIMM_PRESENT(mtr) GET_BITFIELD(mtr, 14, 14)
190 #define RANK_CNT_BITS(mtr) GET_BITFIELD(mtr, 12, 13) 190 #define RANK_CNT_BITS(mtr) GET_BITFIELD(mtr, 12, 13)
191 #define RANK_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 2, 4) 191 #define RANK_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 2, 4)
192 #define COL_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 0, 1) 192 #define COL_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 0, 1)
193 193
194 static const u32 tad_ch_nilv_offset[] = { 194 static const u32 tad_ch_nilv_offset[] = {
195 0x90, 0x94, 0x98, 0x9c, 195 0x90, 0x94, 0x98, 0x9c,
196 0xa0, 0xa4, 0xa8, 0xac, 196 0xa0, 0xa4, 0xa8, 0xac,
197 0xb0, 0xb4, 0xb8, 0xbc, 197 0xb0, 0xb4, 0xb8, 0xbc,
198 }; 198 };
199 #define CHN_IDX_OFFSET(reg) GET_BITFIELD(reg, 28, 29) 199 #define CHN_IDX_OFFSET(reg) GET_BITFIELD(reg, 28, 29)
200 #define TAD_OFFSET(reg) (GET_BITFIELD(reg, 6, 25) << 26) 200 #define TAD_OFFSET(reg) (GET_BITFIELD(reg, 6, 25) << 26)
201 201
202 static const u32 rir_way_limit[] = { 202 static const u32 rir_way_limit[] = {
203 0x108, 0x10c, 0x110, 0x114, 0x118, 203 0x108, 0x10c, 0x110, 0x114, 0x118,
204 }; 204 };
205 #define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit) 205 #define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
206 206
207 #define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31) 207 #define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31)
208 #define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29) 208 #define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29)
209 209
210 #define MAX_RIR_WAY 8 210 #define MAX_RIR_WAY 8
211 211
212 static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = { 212 static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
213 { 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c }, 213 { 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
214 { 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c }, 214 { 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
215 { 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c }, 215 { 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
216 { 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c }, 216 { 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
217 { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc }, 217 { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
218 }; 218 };
219 219
220 #define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19) 220 #define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19)
221 #define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14) 221 #define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14)
222 222
223 /* Device 16, functions 2-7 */ 223 /* Device 16, functions 2-7 */
224 224
225 /* 225 /*
226 * FIXME: Implement the error count reads directly 226 * FIXME: Implement the error count reads directly
227 */ 227 */
228 228
229 static const u32 correrrcnt[] = { 229 static const u32 correrrcnt[] = {
230 0x104, 0x108, 0x10c, 0x110, 230 0x104, 0x108, 0x10c, 0x110,
231 }; 231 };
232 232
233 #define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31) 233 #define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31)
234 #define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30) 234 #define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30)
235 #define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15) 235 #define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15)
236 #define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14) 236 #define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14)
237 237
238 static const u32 correrrthrsld[] = { 238 static const u32 correrrthrsld[] = {
239 0x11c, 0x120, 0x124, 0x128, 239 0x11c, 0x120, 0x124, 0x128,
240 }; 240 };
241 241
242 #define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30) 242 #define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30)
243 #define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14) 243 #define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14)
244 244
245 245
246 /* Device 17, function 0 */ 246 /* Device 17, function 0 */
247 247
248 #define SB_RANK_CFG_A 0x0328 248 #define SB_RANK_CFG_A 0x0328
249 249
250 #define IB_RANK_CFG_A 0x0320 250 #define IB_RANK_CFG_A 0x0320
251 251
252 /* 252 /*
253 * sbridge structs 253 * sbridge structs
254 */ 254 */
255 255
256 #define NUM_CHANNELS 4 256 #define NUM_CHANNELS 4
257 #define MAX_DIMMS 3 /* Max DIMMS per channel */ 257 #define MAX_DIMMS 3 /* Max DIMMS per channel */
258 #define CHANNEL_UNSPECIFIED 0xf /* Intel IA32 SDM 15-14 */ 258 #define CHANNEL_UNSPECIFIED 0xf /* Intel IA32 SDM 15-14 */
259 259
260 enum type { 260 enum type {
261 SANDY_BRIDGE, 261 SANDY_BRIDGE,
262 IVY_BRIDGE, 262 IVY_BRIDGE,
263 HASWELL, 263 HASWELL,
264 }; 264 };
265 265
266 struct sbridge_pvt; 266 struct sbridge_pvt;
267 struct sbridge_info { 267 struct sbridge_info {
268 enum type type; 268 enum type type;
269 u32 mcmtr; 269 u32 mcmtr;
270 u32 rankcfgr; 270 u32 rankcfgr;
271 u64 (*get_tolm)(struct sbridge_pvt *pvt); 271 u64 (*get_tolm)(struct sbridge_pvt *pvt);
272 u64 (*get_tohm)(struct sbridge_pvt *pvt); 272 u64 (*get_tohm)(struct sbridge_pvt *pvt);
273 u64 (*rir_limit)(u32 reg); 273 u64 (*rir_limit)(u32 reg);
274 const u32 *dram_rule; 274 const u32 *dram_rule;
275 const u32 *interleave_list; 275 const u32 *interleave_list;
276 const struct interleave_pkg *interleave_pkg; 276 const struct interleave_pkg *interleave_pkg;
277 u8 max_sad; 277 u8 max_sad;
278 u8 max_interleave; 278 u8 max_interleave;
279 u8 (*get_node_id)(struct sbridge_pvt *pvt); 279 u8 (*get_node_id)(struct sbridge_pvt *pvt);
280 enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt); 280 enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt);
281 struct pci_dev *pci_vtd; 281 struct pci_dev *pci_vtd;
282 }; 282 };
283 283
284 struct sbridge_channel { 284 struct sbridge_channel {
285 u32 ranks; 285 u32 ranks;
286 u32 dimms; 286 u32 dimms;
287 }; 287 };
288 288
289 struct pci_id_descr { 289 struct pci_id_descr {
290 int dev_id; 290 int dev_id;
291 int optional; 291 int optional;
292 }; 292 };
293 293
294 struct pci_id_table { 294 struct pci_id_table {
295 const struct pci_id_descr *descr; 295 const struct pci_id_descr *descr;
296 int n_devs; 296 int n_devs;
297 }; 297 };
298 298
299 struct sbridge_dev { 299 struct sbridge_dev {
300 struct list_head list; 300 struct list_head list;
301 u8 bus, mc; 301 u8 bus, mc;
302 u8 node_id, source_id; 302 u8 node_id, source_id;
303 struct pci_dev **pdev; 303 struct pci_dev **pdev;
304 int n_devs; 304 int n_devs;
305 struct mem_ctl_info *mci; 305 struct mem_ctl_info *mci;
306 }; 306 };
307 307
308 struct sbridge_pvt { 308 struct sbridge_pvt {
309 struct pci_dev *pci_ta, *pci_ddrio, *pci_ras; 309 struct pci_dev *pci_ta, *pci_ddrio, *pci_ras;
310 struct pci_dev *pci_sad0, *pci_sad1; 310 struct pci_dev *pci_sad0, *pci_sad1;
311 struct pci_dev *pci_ha0, *pci_ha1; 311 struct pci_dev *pci_ha0, *pci_ha1;
312 struct pci_dev *pci_br0, *pci_br1; 312 struct pci_dev *pci_br0, *pci_br1;
313 struct pci_dev *pci_ha1_ta; 313 struct pci_dev *pci_ha1_ta;
314 struct pci_dev *pci_tad[NUM_CHANNELS]; 314 struct pci_dev *pci_tad[NUM_CHANNELS];
315 315
316 struct sbridge_dev *sbridge_dev; 316 struct sbridge_dev *sbridge_dev;
317 317
318 struct sbridge_info info; 318 struct sbridge_info info;
319 struct sbridge_channel channel[NUM_CHANNELS]; 319 struct sbridge_channel channel[NUM_CHANNELS];
320 320
321 /* Memory type detection */ 321 /* Memory type detection */
322 bool is_mirrored, is_lockstep, is_close_pg; 322 bool is_mirrored, is_lockstep, is_close_pg;
323 323
324 /* Fifo double buffers */ 324 /* Fifo double buffers */
325 struct mce mce_entry[MCE_LOG_LEN]; 325 struct mce mce_entry[MCE_LOG_LEN];
326 struct mce mce_outentry[MCE_LOG_LEN]; 326 struct mce mce_outentry[MCE_LOG_LEN];
327 327
328 /* Fifo in/out counters */ 328 /* Fifo in/out counters */
329 unsigned mce_in, mce_out; 329 unsigned mce_in, mce_out;
330 330
331 /* Count indicator to show errors not got */ 331 /* Count indicator to show errors not got */
332 unsigned mce_overrun; 332 unsigned mce_overrun;
333 333
334 /* Memory description */ 334 /* Memory description */
335 u64 tolm, tohm; 335 u64 tolm, tohm;
336 }; 336 };
337 337
338 #define PCI_DESCR(device_id, opt) \ 338 #define PCI_DESCR(device_id, opt) \
339 .dev_id = (device_id), \ 339 .dev_id = (device_id), \
340 .optional = opt 340 .optional = opt
341 341
342 static const struct pci_id_descr pci_dev_descr_sbridge[] = { 342 static const struct pci_id_descr pci_dev_descr_sbridge[] = {
343 /* Processor Home Agent */ 343 /* Processor Home Agent */
344 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0) }, 344 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0) },
345 345
346 /* Memory controller */ 346 /* Memory controller */
347 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0) }, 347 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0) },
348 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0) }, 348 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0) },
349 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0) }, 349 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0) },
350 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0) }, 350 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0) },
351 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0) }, 351 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0) },
352 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0) }, 352 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0) },
353 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1) }, 353 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1) },
354 354
355 /* System Address Decoder */ 355 /* System Address Decoder */
356 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0) }, 356 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0) },
357 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0) }, 357 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0) },
358 358
359 /* Broadcast Registers */ 359 /* Broadcast Registers */
360 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) }, 360 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) },
361 }; 361 };
362 362
363 #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) } 363 #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
364 static const struct pci_id_table pci_dev_descr_sbridge_table[] = { 364 static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
365 PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge), 365 PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge),
366 {0,} /* 0 terminated list. */ 366 {0,} /* 0 terminated list. */
367 }; 367 };
368 368
369 /* This changes depending if 1HA or 2HA: 369 /* This changes depending if 1HA or 2HA:
370 * 1HA: 370 * 1HA:
371 * 0x0eb8 (17.0) is DDRIO0 371 * 0x0eb8 (17.0) is DDRIO0
372 * 2HA: 372 * 2HA:
373 * 0x0ebc (17.4) is DDRIO0 373 * 0x0ebc (17.4) is DDRIO0
374 */ 374 */
375 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0 0x0eb8 375 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0 0x0eb8
376 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0 0x0ebc 376 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0 0x0ebc
377 377
378 /* pci ids */ 378 /* pci ids */
379 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0 0x0ea0 379 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0 0x0ea0
380 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA 0x0ea8 380 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA 0x0ea8
381 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS 0x0e71 381 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS 0x0e71
382 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0 0x0eaa 382 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0 0x0eaa
383 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1 0x0eab 383 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1 0x0eab
384 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2 0x0eac 384 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2 0x0eac
385 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3 0x0ead 385 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3 0x0ead
386 #define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD 0x0ec8 386 #define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD 0x0ec8
387 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0 0x0ec9 387 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0 0x0ec9
388 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1 0x0eca 388 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1 0x0eca
389 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1 0x0e60 389 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1 0x0e60
390 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA 0x0e68 390 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA 0x0e68
391 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS 0x0e79 391 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS 0x0e79
392 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 0x0e6a 392 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 0x0e6a
393 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1 0x0e6b 393 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1 0x0e6b
394 394
395 static const struct pci_id_descr pci_dev_descr_ibridge[] = { 395 static const struct pci_id_descr pci_dev_descr_ibridge[] = {
396 /* Processor Home Agent */ 396 /* Processor Home Agent */
397 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0) }, 397 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0) },
398 398
399 /* Memory controller */ 399 /* Memory controller */
400 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0) }, 400 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0) },
401 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0) }, 401 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0) },
402 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0) }, 402 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0) },
403 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0) }, 403 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0) },
404 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0) }, 404 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0) },
405 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0) }, 405 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0) },
406 406
407 /* System Address Decoder */ 407 /* System Address Decoder */
408 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0) }, 408 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0) },
409 409
410 /* Broadcast Registers */ 410 /* Broadcast Registers */
411 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1) }, 411 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1) },
412 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0) }, 412 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0) },
413 413
414 /* Optional, mode 2HA */ 414 /* Optional, mode 2HA */
415 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1) }, 415 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1) },
416 #if 0 416 #if 0
417 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1) }, 417 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1) },
418 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1) }, 418 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1) },
419 #endif 419 #endif
420 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1) }, 420 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1) },
421 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1) }, 421 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1) },
422 422
423 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1) }, 423 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1) },
424 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1) }, 424 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1) },
425 }; 425 };
426 426
427 static const struct pci_id_table pci_dev_descr_ibridge_table[] = { 427 static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
428 PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge), 428 PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge),
429 {0,} /* 0 terminated list. */ 429 {0,} /* 0 terminated list. */
430 }; 430 };
431 431
432 /* Haswell support */ 432 /* Haswell support */
433 /* EN processor: 433 /* EN processor:
434 * - 1 IMC 434 * - 1 IMC
435 * - 3 DDR3 channels, 2 DPC per channel 435 * - 3 DDR3 channels, 2 DPC per channel
436 * EP processor: 436 * EP processor:
437 * - 1 or 2 IMC 437 * - 1 or 2 IMC
438 * - 4 DDR4 channels, 3 DPC per channel 438 * - 4 DDR4 channels, 3 DPC per channel
439 * EP 4S processor: 439 * EP 4S processor:
440 * - 2 IMC 440 * - 2 IMC
441 * - 4 DDR4 channels, 3 DPC per channel 441 * - 4 DDR4 channels, 3 DPC per channel
442 * EX processor: 442 * EX processor:
443 * - 2 IMC 443 * - 2 IMC
444 * - each IMC interfaces with a SMI 2 channel 444 * - each IMC interfaces with a SMI 2 channel
445 * - each SMI channel interfaces with a scalable memory buffer 445 * - each SMI channel interfaces with a scalable memory buffer
446 * - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC 446 * - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
447 */ 447 */
448 #define HASWELL_DDRCRCLKCONTROLS 0xa10 448 #define HASWELL_DDRCRCLKCONTROLS 0xa10
449 #define HASWELL_HASYSDEFEATURE2 0x84 449 #define HASWELL_HASYSDEFEATURE2 0x84
450 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28 450 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28
451 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0 451 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0
452 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1 0x2f60 452 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1 0x2f60
453 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA 0x2fa8 453 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA 0x2fa8
454 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL 0x2f71 454 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL 0x2f71
455 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA 0x2f68 455 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA 0x2f68
456 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL 0x2f79 456 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL 0x2f79
457 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc 457 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
458 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd 458 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
459 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa 459 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
460 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab 460 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab
461 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac 461 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac
462 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad 462 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad
463 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a 463 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a
464 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b 464 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b
465 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c 465 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
466 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d 466 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
467 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd 467 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
468 static const struct pci_id_descr pci_dev_descr_haswell[] = { 468 static const struct pci_id_descr pci_dev_descr_haswell[] = {
469 /* first item must be the HA */ 469 /* first item must be the HA */
470 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0) }, 470 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0) },
471 471
472 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0) }, 472 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0) },
473 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0) }, 473 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0) },
474 474
475 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1) }, 475 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1) },
476 476
477 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0) }, 477 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0) },
478 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL, 0) }, 478 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL, 0) },
479 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0) }, 479 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0) },
480 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0) }, 480 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0) },
481 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1) }, 481 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1) },
482 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1) }, 482 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1) },
483 483
484 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1) }, 484 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1) },
485 485
486 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1) }, 486 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1) },
487 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL, 1) }, 487 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL, 1) },
488 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1) }, 488 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1) },
489 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1) }, 489 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1) },
490 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1) }, 490 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1) },
491 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1) }, 491 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1) },
492 }; 492 };
493 493
494 static const struct pci_id_table pci_dev_descr_haswell_table[] = { 494 static const struct pci_id_table pci_dev_descr_haswell_table[] = {
495 PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell), 495 PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell),
496 {0,} /* 0 terminated list. */ 496 {0,} /* 0 terminated list. */
497 }; 497 };
498 498
499 /* 499 /*
500 * pci_device_id table for which devices we are looking for 500 * pci_device_id table for which devices we are looking for
501 */ 501 */
502 static const struct pci_device_id sbridge_pci_tbl[] = { 502 static const struct pci_device_id sbridge_pci_tbl[] = {
503 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)}, 503 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0)},
504 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)}, 504 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)},
505 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0)}, 505 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0)},
506 {0,} /* 0 terminated list. */ 506 {0,} /* 0 terminated list. */
507 }; 507 };
508 508
509 509
510 /**************************************************************************** 510 /****************************************************************************
511 Ancillary status routines 511 Ancillary status routines
512 ****************************************************************************/ 512 ****************************************************************************/
513 513
514 static inline int numrank(enum type type, u32 mtr) 514 static inline int numrank(enum type type, u32 mtr)
515 { 515 {
516 int ranks = (1 << RANK_CNT_BITS(mtr)); 516 int ranks = (1 << RANK_CNT_BITS(mtr));
517 int max = 4; 517 int max = 4;
518 518
519 if (type == HASWELL) 519 if (type == HASWELL)
520 max = 8; 520 max = 8;
521 521
522 if (ranks > max) { 522 if (ranks > max) {
523 edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n", 523 edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n",
524 ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr); 524 ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr);
525 return -EINVAL; 525 return -EINVAL;
526 } 526 }
527 527
528 return ranks; 528 return ranks;
529 } 529 }
530 530
531 static inline int numrow(u32 mtr) 531 static inline int numrow(u32 mtr)
532 { 532 {
533 int rows = (RANK_WIDTH_BITS(mtr) + 12); 533 int rows = (RANK_WIDTH_BITS(mtr) + 12);
534 534
535 if (rows < 13 || rows > 18) { 535 if (rows < 13 || rows > 18) {
536 edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n", 536 edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
537 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr); 537 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
538 return -EINVAL; 538 return -EINVAL;
539 } 539 }
540 540
541 return 1 << rows; 541 return 1 << rows;
542 } 542 }
543 543
544 static inline int numcol(u32 mtr) 544 static inline int numcol(u32 mtr)
545 { 545 {
546 int cols = (COL_WIDTH_BITS(mtr) + 10); 546 int cols = (COL_WIDTH_BITS(mtr) + 10);
547 547
548 if (cols > 12) { 548 if (cols > 12) {
549 edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n", 549 edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
550 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr); 550 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
551 return -EINVAL; 551 return -EINVAL;
552 } 552 }
553 553
554 return 1 << cols; 554 return 1 << cols;
555 } 555 }
556 556
557 static struct sbridge_dev *get_sbridge_dev(u8 bus) 557 static struct sbridge_dev *get_sbridge_dev(u8 bus)
558 { 558 {
559 struct sbridge_dev *sbridge_dev; 559 struct sbridge_dev *sbridge_dev;
560 560
561 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { 561 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
562 if (sbridge_dev->bus == bus) 562 if (sbridge_dev->bus == bus)
563 return sbridge_dev; 563 return sbridge_dev;
564 } 564 }
565 565
566 return NULL; 566 return NULL;
567 } 567 }
568 568
569 static struct sbridge_dev *alloc_sbridge_dev(u8 bus, 569 static struct sbridge_dev *alloc_sbridge_dev(u8 bus,
570 const struct pci_id_table *table) 570 const struct pci_id_table *table)
571 { 571 {
572 struct sbridge_dev *sbridge_dev; 572 struct sbridge_dev *sbridge_dev;
573 573
574 sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL); 574 sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
575 if (!sbridge_dev) 575 if (!sbridge_dev)
576 return NULL; 576 return NULL;
577 577
578 sbridge_dev->pdev = kzalloc(sizeof(*sbridge_dev->pdev) * table->n_devs, 578 sbridge_dev->pdev = kzalloc(sizeof(*sbridge_dev->pdev) * table->n_devs,
579 GFP_KERNEL); 579 GFP_KERNEL);
580 if (!sbridge_dev->pdev) { 580 if (!sbridge_dev->pdev) {
581 kfree(sbridge_dev); 581 kfree(sbridge_dev);
582 return NULL; 582 return NULL;
583 } 583 }
584 584
585 sbridge_dev->bus = bus; 585 sbridge_dev->bus = bus;
586 sbridge_dev->n_devs = table->n_devs; 586 sbridge_dev->n_devs = table->n_devs;
587 list_add_tail(&sbridge_dev->list, &sbridge_edac_list); 587 list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
588 588
589 return sbridge_dev; 589 return sbridge_dev;
590 } 590 }
591 591
592 static void free_sbridge_dev(struct sbridge_dev *sbridge_dev) 592 static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
593 { 593 {
594 list_del(&sbridge_dev->list); 594 list_del(&sbridge_dev->list);
595 kfree(sbridge_dev->pdev); 595 kfree(sbridge_dev->pdev);
596 kfree(sbridge_dev); 596 kfree(sbridge_dev);
597 } 597 }
598 598
599 static u64 sbridge_get_tolm(struct sbridge_pvt *pvt) 599 static u64 sbridge_get_tolm(struct sbridge_pvt *pvt)
600 { 600 {
601 u32 reg; 601 u32 reg;
602 602
603 /* Address range is 32:28 */ 603 /* Address range is 32:28 */
604 pci_read_config_dword(pvt->pci_sad1, TOLM, &reg); 604 pci_read_config_dword(pvt->pci_sad1, TOLM, &reg);
605 return GET_TOLM(reg); 605 return GET_TOLM(reg);
606 } 606 }
607 607
608 static u64 sbridge_get_tohm(struct sbridge_pvt *pvt) 608 static u64 sbridge_get_tohm(struct sbridge_pvt *pvt)
609 { 609 {
610 u32 reg; 610 u32 reg;
611 611
612 pci_read_config_dword(pvt->pci_sad1, TOHM, &reg); 612 pci_read_config_dword(pvt->pci_sad1, TOHM, &reg);
613 return GET_TOHM(reg); 613 return GET_TOHM(reg);
614 } 614 }
615 615
616 static u64 ibridge_get_tolm(struct sbridge_pvt *pvt) 616 static u64 ibridge_get_tolm(struct sbridge_pvt *pvt)
617 { 617 {
618 u32 reg; 618 u32 reg;
619 619
620 pci_read_config_dword(pvt->pci_br1, TOLM, &reg); 620 pci_read_config_dword(pvt->pci_br1, TOLM, &reg);
621 621
622 return GET_TOLM(reg); 622 return GET_TOLM(reg);
623 } 623 }
624 624
625 static u64 ibridge_get_tohm(struct sbridge_pvt *pvt) 625 static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
626 { 626 {
627 u32 reg; 627 u32 reg;
628 628
629 pci_read_config_dword(pvt->pci_br1, TOHM, &reg); 629 pci_read_config_dword(pvt->pci_br1, TOHM, &reg);
630 630
631 return GET_TOHM(reg); 631 return GET_TOHM(reg);
632 } 632 }
633 633
634 static u64 rir_limit(u32 reg) 634 static u64 rir_limit(u32 reg)
635 { 635 {
636 return ((u64)GET_BITFIELD(reg, 1, 10) << 29) | 0x1fffffff; 636 return ((u64)GET_BITFIELD(reg, 1, 10) << 29) | 0x1fffffff;
637 } 637 }
638 638
639 static enum mem_type get_memory_type(struct sbridge_pvt *pvt) 639 static enum mem_type get_memory_type(struct sbridge_pvt *pvt)
640 { 640 {
641 u32 reg; 641 u32 reg;
642 enum mem_type mtype; 642 enum mem_type mtype;
643 643
644 if (pvt->pci_ddrio) { 644 if (pvt->pci_ddrio) {
645 pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr, 645 pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
646 &reg); 646 &reg);
647 if (GET_BITFIELD(reg, 11, 11)) 647 if (GET_BITFIELD(reg, 11, 11))
648 /* FIXME: Can also be LRDIMM */ 648 /* FIXME: Can also be LRDIMM */
649 mtype = MEM_RDDR3; 649 mtype = MEM_RDDR3;
650 else 650 else
651 mtype = MEM_DDR3; 651 mtype = MEM_DDR3;
652 } else 652 } else
653 mtype = MEM_UNKNOWN; 653 mtype = MEM_UNKNOWN;
654 654
655 return mtype; 655 return mtype;
656 } 656 }
657 657
658 static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt) 658 static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt)
659 { 659 {
660 u32 reg; 660 u32 reg;
661 bool registered = false; 661 bool registered = false;
662 enum mem_type mtype = MEM_UNKNOWN; 662 enum mem_type mtype = MEM_UNKNOWN;
663 663
664 if (!pvt->pci_ddrio) 664 if (!pvt->pci_ddrio)
665 goto out; 665 goto out;
666 666
667 pci_read_config_dword(pvt->pci_ddrio, 667 pci_read_config_dword(pvt->pci_ddrio,
668 HASWELL_DDRCRCLKCONTROLS, &reg); 668 HASWELL_DDRCRCLKCONTROLS, &reg);
669 /* Is_Rdimm */ 669 /* Is_Rdimm */
670 if (GET_BITFIELD(reg, 16, 16)) 670 if (GET_BITFIELD(reg, 16, 16))
671 registered = true; 671 registered = true;
672 672
673 pci_read_config_dword(pvt->pci_ta, MCMTR, &reg); 673 pci_read_config_dword(pvt->pci_ta, MCMTR, &reg);
674 if (GET_BITFIELD(reg, 14, 14)) { 674 if (GET_BITFIELD(reg, 14, 14)) {
675 if (registered) 675 if (registered)
676 mtype = MEM_RDDR4; 676 mtype = MEM_RDDR4;
677 else 677 else
678 mtype = MEM_DDR4; 678 mtype = MEM_DDR4;
679 } else { 679 } else {
680 if (registered) 680 if (registered)
681 mtype = MEM_RDDR3; 681 mtype = MEM_RDDR3;
682 else 682 else
683 mtype = MEM_DDR3; 683 mtype = MEM_DDR3;
684 } 684 }
685 685
686 out: 686 out:
687 return mtype; 687 return mtype;
688 } 688 }
689 689
690 static u8 get_node_id(struct sbridge_pvt *pvt) 690 static u8 get_node_id(struct sbridge_pvt *pvt)
691 { 691 {
692 u32 reg; 692 u32 reg;
693 pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, &reg); 693 pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, &reg);
694 return GET_BITFIELD(reg, 0, 2); 694 return GET_BITFIELD(reg, 0, 2);
695 } 695 }
696 696
697 static u8 haswell_get_node_id(struct sbridge_pvt *pvt) 697 static u8 haswell_get_node_id(struct sbridge_pvt *pvt)
698 { 698 {
699 u32 reg; 699 u32 reg;
700 700
701 pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg); 701 pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg);
702 return GET_BITFIELD(reg, 0, 3); 702 return GET_BITFIELD(reg, 0, 3);
703 } 703 }
704 704
705 static u64 haswell_get_tolm(struct sbridge_pvt *pvt) 705 static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
706 { 706 {
707 u32 reg; 707 u32 reg;
708 708
709 pci_read_config_dword(pvt->info.pci_vtd, TOLM, &reg); 709 pci_read_config_dword(pvt->info.pci_vtd, TOLM, &reg);
710 return (GET_BITFIELD(reg, 26, 31) << 26) | 0x1ffffff; 710 return (GET_BITFIELD(reg, 26, 31) << 26) | 0x1ffffff;
711 } 711 }
712 712
713 static u64 haswell_get_tohm(struct sbridge_pvt *pvt) 713 static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
714 { 714 {
715 u64 rc; 715 u64 rc;
716 u32 reg; 716 u32 reg;
717 717
718 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, &reg); 718 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, &reg);
719 rc = GET_BITFIELD(reg, 26, 31); 719 rc = GET_BITFIELD(reg, 26, 31);
720 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, &reg); 720 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, &reg);
721 rc = ((reg << 6) | rc) << 26; 721 rc = ((reg << 6) | rc) << 26;
722 722
723 return rc | 0x1ffffff; 723 return rc | 0x1ffffff;
724 } 724 }
725 725
726 static u64 haswell_rir_limit(u32 reg) 726 static u64 haswell_rir_limit(u32 reg)
727 { 727 {
728 return (((u64)GET_BITFIELD(reg, 1, 11) + 1) << 29) - 1; 728 return (((u64)GET_BITFIELD(reg, 1, 11) + 1) << 29) - 1;
729 } 729 }
730 730
731 static inline u8 sad_pkg_socket(u8 pkg) 731 static inline u8 sad_pkg_socket(u8 pkg)
732 { 732 {
733 /* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */ 733 /* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */
734 return ((pkg >> 3) << 2) | (pkg & 0x3); 734 return ((pkg >> 3) << 2) | (pkg & 0x3);
735 } 735 }
736 736
737 static inline u8 sad_pkg_ha(u8 pkg) 737 static inline u8 sad_pkg_ha(u8 pkg)
738 { 738 {
739 return (pkg >> 2) & 0x1; 739 return (pkg >> 2) & 0x1;
740 } 740 }
741 741
742 /**************************************************************************** 742 /****************************************************************************
743 Memory check routines 743 Memory check routines
744 ****************************************************************************/ 744 ****************************************************************************/
745 static struct pci_dev *get_pdev_same_bus(u8 bus, u32 id) 745 static struct pci_dev *get_pdev_same_bus(u8 bus, u32 id)
746 { 746 {
747 struct pci_dev *pdev = NULL; 747 struct pci_dev *pdev = NULL;
748 748
749 do { 749 do {
750 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, pdev); 750 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, pdev);
751 if (pdev && pdev->bus->number == bus) 751 if (pdev && pdev->bus->number == bus)
752 break; 752 break;
753 } while (pdev); 753 } while (pdev);
754 754
755 return pdev; 755 return pdev;
756 } 756 }
757 757
758 /** 758 /**
759 * check_if_ecc_is_active() - Checks if ECC is active 759 * check_if_ecc_is_active() - Checks if ECC is active
760 * @bus: Device bus 760 * @bus: Device bus
761 * @type: Memory controller type 761 * @type: Memory controller type
762 * returns: 0 in case ECC is active, -ENODEV if it can't be determined or 762 * returns: 0 in case ECC is active, -ENODEV if it can't be determined or
763 * disabled 763 * disabled
764 */ 764 */
765 static int check_if_ecc_is_active(const u8 bus, enum type type) 765 static int check_if_ecc_is_active(const u8 bus, enum type type)
766 { 766 {
767 struct pci_dev *pdev = NULL; 767 struct pci_dev *pdev = NULL;
768 u32 mcmtr, id; 768 u32 mcmtr, id;
769 769
770 if (type == IVY_BRIDGE) 770 if (type == IVY_BRIDGE)
771 id = PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA; 771 id = PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA;
772 else if (type == HASWELL) 772 else if (type == HASWELL)
773 id = PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA; 773 id = PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA;
774 else 774 else
775 id = PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA; 775 id = PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA;
776 776
777 pdev = get_pdev_same_bus(bus, id); 777 pdev = get_pdev_same_bus(bus, id);
778 if (!pdev) { 778 if (!pdev) {
779 sbridge_printk(KERN_ERR, "Couldn't find PCI device " 779 sbridge_printk(KERN_ERR, "Couldn't find PCI device "
780 "%04x:%04x! on bus %02d\n", 780 "%04x:%04x! on bus %02d\n",
781 PCI_VENDOR_ID_INTEL, id, bus); 781 PCI_VENDOR_ID_INTEL, id, bus);
782 return -ENODEV; 782 return -ENODEV;
783 } 783 }
784 784
785 pci_read_config_dword(pdev, MCMTR, &mcmtr); 785 pci_read_config_dword(pdev, MCMTR, &mcmtr);
786 if (!IS_ECC_ENABLED(mcmtr)) { 786 if (!IS_ECC_ENABLED(mcmtr)) {
787 sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n"); 787 sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
788 return -ENODEV; 788 return -ENODEV;
789 } 789 }
790 return 0; 790 return 0;
791 } 791 }
792 792
793 static int get_dimm_config(struct mem_ctl_info *mci) 793 static int get_dimm_config(struct mem_ctl_info *mci)
794 { 794 {
795 struct sbridge_pvt *pvt = mci->pvt_info; 795 struct sbridge_pvt *pvt = mci->pvt_info;
796 struct dimm_info *dimm; 796 struct dimm_info *dimm;
797 unsigned i, j, banks, ranks, rows, cols, npages; 797 unsigned i, j, banks, ranks, rows, cols, npages;
798 u64 size; 798 u64 size;
799 u32 reg; 799 u32 reg;
800 enum edac_type mode; 800 enum edac_type mode;
801 enum mem_type mtype; 801 enum mem_type mtype;
802 802
803 if (pvt->info.type == HASWELL) 803 if (pvt->info.type == HASWELL)
804 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg); 804 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
805 else 805 else
806 pci_read_config_dword(pvt->pci_br0, SAD_TARGET, &reg); 806 pci_read_config_dword(pvt->pci_br0, SAD_TARGET, &reg);
807 807
808 pvt->sbridge_dev->source_id = SOURCE_ID(reg); 808 pvt->sbridge_dev->source_id = SOURCE_ID(reg);
809 809
810 pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt); 810 pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
811 edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n", 811 edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
812 pvt->sbridge_dev->mc, 812 pvt->sbridge_dev->mc,
813 pvt->sbridge_dev->node_id, 813 pvt->sbridge_dev->node_id,
814 pvt->sbridge_dev->source_id); 814 pvt->sbridge_dev->source_id);
815 815
816 pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg); 816 pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg);
817 if (IS_MIRROR_ENABLED(reg)) { 817 if (IS_MIRROR_ENABLED(reg)) {
818 edac_dbg(0, "Memory mirror is enabled\n"); 818 edac_dbg(0, "Memory mirror is enabled\n");
819 pvt->is_mirrored = true; 819 pvt->is_mirrored = true;
820 } else { 820 } else {
821 edac_dbg(0, "Memory mirror is disabled\n"); 821 edac_dbg(0, "Memory mirror is disabled\n");
822 pvt->is_mirrored = false; 822 pvt->is_mirrored = false;
823 } 823 }
824 824
825 pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr); 825 pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
826 if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) { 826 if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
827 edac_dbg(0, "Lockstep is enabled\n"); 827 edac_dbg(0, "Lockstep is enabled\n");
828 mode = EDAC_S8ECD8ED; 828 mode = EDAC_S8ECD8ED;
829 pvt->is_lockstep = true; 829 pvt->is_lockstep = true;
830 } else { 830 } else {
831 edac_dbg(0, "Lockstep is disabled\n"); 831 edac_dbg(0, "Lockstep is disabled\n");
832 mode = EDAC_S4ECD4ED; 832 mode = EDAC_S4ECD4ED;
833 pvt->is_lockstep = false; 833 pvt->is_lockstep = false;
834 } 834 }
835 if (IS_CLOSE_PG(pvt->info.mcmtr)) { 835 if (IS_CLOSE_PG(pvt->info.mcmtr)) {
836 edac_dbg(0, "address map is on closed page mode\n"); 836 edac_dbg(0, "address map is on closed page mode\n");
837 pvt->is_close_pg = true; 837 pvt->is_close_pg = true;
838 } else { 838 } else {
839 edac_dbg(0, "address map is on open page mode\n"); 839 edac_dbg(0, "address map is on open page mode\n");
840 pvt->is_close_pg = false; 840 pvt->is_close_pg = false;
841 } 841 }
842 842
843 mtype = pvt->info.get_memory_type(pvt); 843 mtype = pvt->info.get_memory_type(pvt);
844 if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4) 844 if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
845 edac_dbg(0, "Memory is registered\n"); 845 edac_dbg(0, "Memory is registered\n");
846 else if (mtype == MEM_UNKNOWN) 846 else if (mtype == MEM_UNKNOWN)
847 edac_dbg(0, "Cannot determine memory type\n"); 847 edac_dbg(0, "Cannot determine memory type\n");
848 else 848 else
849 edac_dbg(0, "Memory is unregistered\n"); 849 edac_dbg(0, "Memory is unregistered\n");
850 850
851 if (mtype == MEM_DDR4 || MEM_RDDR4) 851 if (mtype == MEM_DDR4 || MEM_RDDR4)
852 banks = 16; 852 banks = 16;
853 else 853 else
854 banks = 8; 854 banks = 8;
855 855
856 for (i = 0; i < NUM_CHANNELS; i++) { 856 for (i = 0; i < NUM_CHANNELS; i++) {
857 u32 mtr; 857 u32 mtr;
858 858
859 for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) { 859 for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
860 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, 860 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
861 i, j, 0); 861 i, j, 0);
862 pci_read_config_dword(pvt->pci_tad[i], 862 pci_read_config_dword(pvt->pci_tad[i],
863 mtr_regs[j], &mtr); 863 mtr_regs[j], &mtr);
864 edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr); 864 edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr);
865 if (IS_DIMM_PRESENT(mtr)) { 865 if (IS_DIMM_PRESENT(mtr)) {
866 pvt->channel[i].dimms++; 866 pvt->channel[i].dimms++;
867 867
868 ranks = numrank(pvt->info.type, mtr); 868 ranks = numrank(pvt->info.type, mtr);
869 rows = numrow(mtr); 869 rows = numrow(mtr);
870 cols = numcol(mtr); 870 cols = numcol(mtr);
871 871
872 size = ((u64)rows * cols * banks * ranks) >> (20 - 3); 872 size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
873 npages = MiB_TO_PAGES(size); 873 npages = MiB_TO_PAGES(size);
874 874
875 edac_dbg(0, "mc#%d: channel %d, dimm %d, %Ld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", 875 edac_dbg(0, "mc#%d: channel %d, dimm %d, %Ld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
876 pvt->sbridge_dev->mc, i, j, 876 pvt->sbridge_dev->mc, i, j,
877 size, npages, 877 size, npages,
878 banks, ranks, rows, cols); 878 banks, ranks, rows, cols);
879 879
880 dimm->nr_pages = npages; 880 dimm->nr_pages = npages;
881 dimm->grain = 32; 881 dimm->grain = 32;
882 switch (banks) { 882 switch (banks) {
883 case 16: 883 case 16:
884 dimm->dtype = DEV_X16; 884 dimm->dtype = DEV_X16;
885 break; 885 break;
886 case 8: 886 case 8:
887 dimm->dtype = DEV_X8; 887 dimm->dtype = DEV_X8;
888 break; 888 break;
889 case 4: 889 case 4:
890 dimm->dtype = DEV_X4; 890 dimm->dtype = DEV_X4;
891 break; 891 break;
892 } 892 }
893 dimm->mtype = mtype; 893 dimm->mtype = mtype;
894 dimm->edac_mode = mode; 894 dimm->edac_mode = mode;
895 snprintf(dimm->label, sizeof(dimm->label), 895 snprintf(dimm->label, sizeof(dimm->label),
896 "CPU_SrcID#%u_Channel#%u_DIMM#%u", 896 "CPU_SrcID#%u_Channel#%u_DIMM#%u",
897 pvt->sbridge_dev->source_id, i, j); 897 pvt->sbridge_dev->source_id, i, j);
898 } 898 }
899 } 899 }
900 } 900 }
901 901
902 return 0; 902 return 0;
903 } 903 }
904 904
905 static void get_memory_layout(const struct mem_ctl_info *mci) 905 static void get_memory_layout(const struct mem_ctl_info *mci)
906 { 906 {
907 struct sbridge_pvt *pvt = mci->pvt_info; 907 struct sbridge_pvt *pvt = mci->pvt_info;
908 int i, j, k, n_sads, n_tads, sad_interl; 908 int i, j, k, n_sads, n_tads, sad_interl;
909 u32 reg; 909 u32 reg;
910 u64 limit, prv = 0; 910 u64 limit, prv = 0;
911 u64 tmp_mb; 911 u64 tmp_mb;
912 u32 mb, kb; 912 u32 mb, kb;
913 u32 rir_way; 913 u32 rir_way;
914 914
915 /* 915 /*
916 * Step 1) Get TOLM/TOHM ranges 916 * Step 1) Get TOLM/TOHM ranges
917 */ 917 */
918 918
919 pvt->tolm = pvt->info.get_tolm(pvt); 919 pvt->tolm = pvt->info.get_tolm(pvt);
920 tmp_mb = (1 + pvt->tolm) >> 20; 920 tmp_mb = (1 + pvt->tolm) >> 20;
921 921
922 mb = div_u64_rem(tmp_mb, 1000, &kb); 922 mb = div_u64_rem(tmp_mb, 1000, &kb);
923 edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm); 923 edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
924 924
925 /* Address range is already 45:25 */ 925 /* Address range is already 45:25 */
926 pvt->tohm = pvt->info.get_tohm(pvt); 926 pvt->tohm = pvt->info.get_tohm(pvt);
927 tmp_mb = (1 + pvt->tohm) >> 20; 927 tmp_mb = (1 + pvt->tohm) >> 20;
928 928
929 mb = div_u64_rem(tmp_mb, 1000, &kb); 929 mb = div_u64_rem(tmp_mb, 1000, &kb);
930 edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm); 930 edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm);
931 931
932 /* 932 /*
933 * Step 2) Get SAD range and SAD Interleave list 933 * Step 2) Get SAD range and SAD Interleave list
934 * TAD registers contain the interleave wayness. However, it 934 * TAD registers contain the interleave wayness. However, it
935 * seems simpler to just discover it indirectly, with the 935 * seems simpler to just discover it indirectly, with the
936 * algorithm bellow. 936 * algorithm bellow.
937 */ 937 */
938 prv = 0; 938 prv = 0;
939 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) { 939 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
940 /* SAD_LIMIT Address range is 45:26 */ 940 /* SAD_LIMIT Address range is 45:26 */
941 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads], 941 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
942 &reg); 942 &reg);
943 limit = SAD_LIMIT(reg); 943 limit = SAD_LIMIT(reg);
944 944
945 if (!DRAM_RULE_ENABLE(reg)) 945 if (!DRAM_RULE_ENABLE(reg))
946 continue; 946 continue;
947 947
948 if (limit <= prv) 948 if (limit <= prv)
949 break; 949 break;
950 950
951 tmp_mb = (limit + 1) >> 20; 951 tmp_mb = (limit + 1) >> 20;
952 mb = div_u64_rem(tmp_mb, 1000, &kb); 952 mb = div_u64_rem(tmp_mb, 1000, &kb);
953 edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n", 953 edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
954 n_sads, 954 n_sads,
955 get_dram_attr(reg), 955 get_dram_attr(reg),
956 mb, kb, 956 mb, kb,
957 ((u64)tmp_mb) << 20L, 957 ((u64)tmp_mb) << 20L,
958 INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]", 958 INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
959 reg); 959 reg);
960 prv = limit; 960 prv = limit;
961 961
962 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads], 962 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
963 &reg); 963 &reg);
964 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0); 964 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
965 for (j = 0; j < 8; j++) { 965 for (j = 0; j < 8; j++) {
966 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j); 966 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j);
967 if (j > 0 && sad_interl == pkg) 967 if (j > 0 && sad_interl == pkg)
968 break; 968 break;
969 969
970 edac_dbg(0, "SAD#%d, interleave #%d: %d\n", 970 edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
971 n_sads, j, pkg); 971 n_sads, j, pkg);
972 } 972 }
973 } 973 }
974 974
975 /* 975 /*
976 * Step 3) Get TAD range 976 * Step 3) Get TAD range
977 */ 977 */
978 prv = 0; 978 prv = 0;
979 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) { 979 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
980 pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads], 980 pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads],
981 &reg); 981 &reg);
982 limit = TAD_LIMIT(reg); 982 limit = TAD_LIMIT(reg);
983 if (limit <= prv) 983 if (limit <= prv)
984 break; 984 break;
985 tmp_mb = (limit + 1) >> 20; 985 tmp_mb = (limit + 1) >> 20;
986 986
987 mb = div_u64_rem(tmp_mb, 1000, &kb); 987 mb = div_u64_rem(tmp_mb, 1000, &kb);
988 edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n", 988 edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
989 n_tads, mb, kb, 989 n_tads, mb, kb,
990 ((u64)tmp_mb) << 20L, 990 ((u64)tmp_mb) << 20L,
991 (u32)TAD_SOCK(reg), 991 (u32)TAD_SOCK(reg),
992 (u32)TAD_CH(reg), 992 (u32)TAD_CH(reg),
993 (u32)TAD_TGT0(reg), 993 (u32)TAD_TGT0(reg),
994 (u32)TAD_TGT1(reg), 994 (u32)TAD_TGT1(reg),
995 (u32)TAD_TGT2(reg), 995 (u32)TAD_TGT2(reg),
996 (u32)TAD_TGT3(reg), 996 (u32)TAD_TGT3(reg),
997 reg); 997 reg);
998 prv = limit; 998 prv = limit;
999 } 999 }
1000 1000
1001 /* 1001 /*
1002 * Step 4) Get TAD offsets, per each channel 1002 * Step 4) Get TAD offsets, per each channel
1003 */ 1003 */
1004 for (i = 0; i < NUM_CHANNELS; i++) { 1004 for (i = 0; i < NUM_CHANNELS; i++) {
1005 if (!pvt->channel[i].dimms) 1005 if (!pvt->channel[i].dimms)
1006 continue; 1006 continue;
1007 for (j = 0; j < n_tads; j++) { 1007 for (j = 0; j < n_tads; j++) {
1008 pci_read_config_dword(pvt->pci_tad[i], 1008 pci_read_config_dword(pvt->pci_tad[i],
1009 tad_ch_nilv_offset[j], 1009 tad_ch_nilv_offset[j],
1010 &reg); 1010 &reg);
1011 tmp_mb = TAD_OFFSET(reg) >> 20; 1011 tmp_mb = TAD_OFFSET(reg) >> 20;
1012 mb = div_u64_rem(tmp_mb, 1000, &kb); 1012 mb = div_u64_rem(tmp_mb, 1000, &kb);
1013 edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n", 1013 edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
1014 i, j, 1014 i, j,
1015 mb, kb, 1015 mb, kb,
1016 ((u64)tmp_mb) << 20L, 1016 ((u64)tmp_mb) << 20L,
1017 reg); 1017 reg);
1018 } 1018 }
1019 } 1019 }
1020 1020
1021 /* 1021 /*
1022 * Step 6) Get RIR Wayness/Limit, per each channel 1022 * Step 6) Get RIR Wayness/Limit, per each channel
1023 */ 1023 */
1024 for (i = 0; i < NUM_CHANNELS; i++) { 1024 for (i = 0; i < NUM_CHANNELS; i++) {
1025 if (!pvt->channel[i].dimms) 1025 if (!pvt->channel[i].dimms)
1026 continue; 1026 continue;
1027 for (j = 0; j < MAX_RIR_RANGES; j++) { 1027 for (j = 0; j < MAX_RIR_RANGES; j++) {
1028 pci_read_config_dword(pvt->pci_tad[i], 1028 pci_read_config_dword(pvt->pci_tad[i],
1029 rir_way_limit[j], 1029 rir_way_limit[j],
1030 &reg); 1030 &reg);
1031 1031
1032 if (!IS_RIR_VALID(reg)) 1032 if (!IS_RIR_VALID(reg))
1033 continue; 1033 continue;
1034 1034
1035 tmp_mb = pvt->info.rir_limit(reg) >> 20; 1035 tmp_mb = pvt->info.rir_limit(reg) >> 20;
1036 rir_way = 1 << RIR_WAY(reg); 1036 rir_way = 1 << RIR_WAY(reg);
1037 mb = div_u64_rem(tmp_mb, 1000, &kb); 1037 mb = div_u64_rem(tmp_mb, 1000, &kb);
1038 edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n", 1038 edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
1039 i, j, 1039 i, j,
1040 mb, kb, 1040 mb, kb,
1041 ((u64)tmp_mb) << 20L, 1041 ((u64)tmp_mb) << 20L,
1042 rir_way, 1042 rir_way,
1043 reg); 1043 reg);
1044 1044
1045 for (k = 0; k < rir_way; k++) { 1045 for (k = 0; k < rir_way; k++) {
1046 pci_read_config_dword(pvt->pci_tad[i], 1046 pci_read_config_dword(pvt->pci_tad[i],
1047 rir_offset[j][k], 1047 rir_offset[j][k],
1048 &reg); 1048 &reg);
1049 tmp_mb = RIR_OFFSET(reg) << 6; 1049 tmp_mb = RIR_OFFSET(reg) << 6;
1050 1050
1051 mb = div_u64_rem(tmp_mb, 1000, &kb); 1051 mb = div_u64_rem(tmp_mb, 1000, &kb);
1052 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", 1052 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
1053 i, j, k, 1053 i, j, k,
1054 mb, kb, 1054 mb, kb,
1055 ((u64)tmp_mb) << 20L, 1055 ((u64)tmp_mb) << 20L,
1056 (u32)RIR_RNK_TGT(reg), 1056 (u32)RIR_RNK_TGT(reg),
1057 reg); 1057 reg);
1058 } 1058 }
1059 } 1059 }
1060 } 1060 }
1061 } 1061 }
1062 1062
1063 static struct mem_ctl_info *get_mci_for_node_id(u8 node_id) 1063 static struct mem_ctl_info *get_mci_for_node_id(u8 node_id)
1064 { 1064 {
1065 struct sbridge_dev *sbridge_dev; 1065 struct sbridge_dev *sbridge_dev;
1066 1066
1067 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { 1067 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
1068 if (sbridge_dev->node_id == node_id) 1068 if (sbridge_dev->node_id == node_id)
1069 return sbridge_dev->mci; 1069 return sbridge_dev->mci;
1070 } 1070 }
1071 return NULL; 1071 return NULL;
1072 } 1072 }
1073 1073
1074 static int get_memory_error_data(struct mem_ctl_info *mci, 1074 static int get_memory_error_data(struct mem_ctl_info *mci,
1075 u64 addr, 1075 u64 addr,
1076 u8 *socket, 1076 u8 *socket,
1077 long *channel_mask, 1077 long *channel_mask,
1078 u8 *rank, 1078 u8 *rank,
1079 char **area_type, char *msg) 1079 char **area_type, char *msg)
1080 { 1080 {
1081 struct mem_ctl_info *new_mci; 1081 struct mem_ctl_info *new_mci;
1082 struct sbridge_pvt *pvt = mci->pvt_info; 1082 struct sbridge_pvt *pvt = mci->pvt_info;
1083 struct pci_dev *pci_ha; 1083 struct pci_dev *pci_ha;
1084 int n_rir, n_sads, n_tads, sad_way, sck_xch; 1084 int n_rir, n_sads, n_tads, sad_way, sck_xch;
1085 int sad_interl, idx, base_ch; 1085 int sad_interl, idx, base_ch;
1086 int interleave_mode, shiftup = 0; 1086 int interleave_mode, shiftup = 0;
1087 unsigned sad_interleave[pvt->info.max_interleave]; 1087 unsigned sad_interleave[pvt->info.max_interleave];
1088 u32 reg, dram_rule; 1088 u32 reg, dram_rule;
1089 u8 ch_way, sck_way, pkg, sad_ha = 0; 1089 u8 ch_way, sck_way, pkg, sad_ha = 0;
1090 u32 tad_offset; 1090 u32 tad_offset;
1091 u32 rir_way; 1091 u32 rir_way;
1092 u32 mb, kb; 1092 u32 mb, kb;
1093 u64 ch_addr, offset, limit = 0, prv = 0; 1093 u64 ch_addr, offset, limit = 0, prv = 0;
1094 1094
1095 1095
1096 /* 1096 /*
1097 * Step 0) Check if the address is at special memory ranges 1097 * Step 0) Check if the address is at special memory ranges
1098 * The check bellow is probably enough to fill all cases where 1098 * The check bellow is probably enough to fill all cases where
1099 * the error is not inside a memory, except for the legacy 1099 * the error is not inside a memory, except for the legacy
1100 * range (e. g. VGA addresses). It is unlikely, however, that the 1100 * range (e. g. VGA addresses). It is unlikely, however, that the
1101 * memory controller would generate an error on that range. 1101 * memory controller would generate an error on that range.
1102 */ 1102 */
1103 if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) { 1103 if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
1104 sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr); 1104 sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
1105 return -EINVAL; 1105 return -EINVAL;
1106 } 1106 }
1107 if (addr >= (u64)pvt->tohm) { 1107 if (addr >= (u64)pvt->tohm) {
1108 sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr); 1108 sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
1109 return -EINVAL; 1109 return -EINVAL;
1110 } 1110 }
1111 1111
1112 /* 1112 /*
1113 * Step 1) Get socket 1113 * Step 1) Get socket
1114 */ 1114 */
1115 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) { 1115 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1116 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads], 1116 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1117 &reg); 1117 &reg);
1118 1118
1119 if (!DRAM_RULE_ENABLE(reg)) 1119 if (!DRAM_RULE_ENABLE(reg))
1120 continue; 1120 continue;
1121 1121
1122 limit = SAD_LIMIT(reg); 1122 limit = SAD_LIMIT(reg);
1123 if (limit <= prv) { 1123 if (limit <= prv) {
1124 sprintf(msg, "Can't discover the memory socket"); 1124 sprintf(msg, "Can't discover the memory socket");
1125 return -EINVAL; 1125 return -EINVAL;
1126 } 1126 }
1127 if (addr <= limit) 1127 if (addr <= limit)
1128 break; 1128 break;
1129 prv = limit; 1129 prv = limit;
1130 } 1130 }
1131 if (n_sads == pvt->info.max_sad) { 1131 if (n_sads == pvt->info.max_sad) {
1132 sprintf(msg, "Can't discover the memory socket"); 1132 sprintf(msg, "Can't discover the memory socket");
1133 return -EINVAL; 1133 return -EINVAL;
1134 } 1134 }
1135 dram_rule = reg; 1135 dram_rule = reg;
1136 *area_type = get_dram_attr(dram_rule); 1136 *area_type = get_dram_attr(dram_rule);
1137 interleave_mode = INTERLEAVE_MODE(dram_rule); 1137 interleave_mode = INTERLEAVE_MODE(dram_rule);
1138 1138
1139 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads], 1139 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1140 &reg); 1140 &reg);
1141 1141
1142 if (pvt->info.type == SANDY_BRIDGE) { 1142 if (pvt->info.type == SANDY_BRIDGE) {
1143 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0); 1143 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1144 for (sad_way = 0; sad_way < 8; sad_way++) { 1144 for (sad_way = 0; sad_way < 8; sad_way++) {
1145 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way); 1145 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way);
1146 if (sad_way > 0 && sad_interl == pkg) 1146 if (sad_way > 0 && sad_interl == pkg)
1147 break; 1147 break;
1148 sad_interleave[sad_way] = pkg; 1148 sad_interleave[sad_way] = pkg;
1149 edac_dbg(0, "SAD interleave #%d: %d\n", 1149 edac_dbg(0, "SAD interleave #%d: %d\n",
1150 sad_way, sad_interleave[sad_way]); 1150 sad_way, sad_interleave[sad_way]);
1151 } 1151 }
1152 edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n", 1152 edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
1153 pvt->sbridge_dev->mc, 1153 pvt->sbridge_dev->mc,
1154 n_sads, 1154 n_sads,
1155 addr, 1155 addr,
1156 limit, 1156 limit,
1157 sad_way + 7, 1157 sad_way + 7,
1158 !interleave_mode ? "" : "XOR[18:16]"); 1158 !interleave_mode ? "" : "XOR[18:16]");
1159 if (interleave_mode) 1159 if (interleave_mode)
1160 idx = ((addr >> 6) ^ (addr >> 16)) & 7; 1160 idx = ((addr >> 6) ^ (addr >> 16)) & 7;
1161 else 1161 else
1162 idx = (addr >> 6) & 7; 1162 idx = (addr >> 6) & 7;
1163 switch (sad_way) { 1163 switch (sad_way) {
1164 case 1: 1164 case 1:
1165 idx = 0; 1165 idx = 0;
1166 break; 1166 break;
1167 case 2: 1167 case 2:
1168 idx = idx & 1; 1168 idx = idx & 1;
1169 break; 1169 break;
1170 case 4: 1170 case 4:
1171 idx = idx & 3; 1171 idx = idx & 3;
1172 break; 1172 break;
1173 case 8: 1173 case 8:
1174 break; 1174 break;
1175 default: 1175 default:
1176 sprintf(msg, "Can't discover socket interleave"); 1176 sprintf(msg, "Can't discover socket interleave");
1177 return -EINVAL; 1177 return -EINVAL;
1178 } 1178 }
1179 *socket = sad_interleave[idx]; 1179 *socket = sad_interleave[idx];
1180 edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n", 1180 edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
1181 idx, sad_way, *socket); 1181 idx, sad_way, *socket);
1182 } else if (pvt->info.type == HASWELL) { 1182 } else if (pvt->info.type == HASWELL) {
1183 int bits, a7mode = A7MODE(dram_rule); 1183 int bits, a7mode = A7MODE(dram_rule);
1184 1184
1185 if (a7mode) { 1185 if (a7mode) {
1186 /* A7 mode swaps P9 with P6 */ 1186 /* A7 mode swaps P9 with P6 */
1187 bits = GET_BITFIELD(addr, 7, 8) << 1; 1187 bits = GET_BITFIELD(addr, 7, 8) << 1;
1188 bits |= GET_BITFIELD(addr, 9, 9); 1188 bits |= GET_BITFIELD(addr, 9, 9);
1189 } else 1189 } else
1190 bits = GET_BITFIELD(addr, 7, 9); 1190 bits = GET_BITFIELD(addr, 7, 9);
1191 1191
1192 if (interleave_mode) { 1192 if (interleave_mode) {
1193 /* interleave mode will XOR {8,7,6} with {18,17,16} */ 1193 /* interleave mode will XOR {8,7,6} with {18,17,16} */
1194 idx = GET_BITFIELD(addr, 16, 18); 1194 idx = GET_BITFIELD(addr, 16, 18);
1195 idx ^= bits; 1195 idx ^= bits;
1196 } else 1196 } else
1197 idx = bits; 1197 idx = bits;
1198 1198
1199 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx); 1199 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
1200 *socket = sad_pkg_socket(pkg); 1200 *socket = sad_pkg_socket(pkg);
1201 sad_ha = sad_pkg_ha(pkg); 1201 sad_ha = sad_pkg_ha(pkg);
1202 1202
1203 if (a7mode) { 1203 if (a7mode) {
1204 /* MCChanShiftUpEnable */ 1204 /* MCChanShiftUpEnable */
1205 pci_read_config_dword(pvt->pci_ha0, 1205 pci_read_config_dword(pvt->pci_ha0,
1206 HASWELL_HASYSDEFEATURE2, &reg); 1206 HASWELL_HASYSDEFEATURE2, &reg);
1207 shiftup = GET_BITFIELD(reg, 22, 22); 1207 shiftup = GET_BITFIELD(reg, 22, 22);
1208 } 1208 }
1209 1209
1210 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n", 1210 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n",
1211 idx, *socket, sad_ha, shiftup); 1211 idx, *socket, sad_ha, shiftup);
1212 } else { 1212 } else {
1213 /* Ivy Bridge's SAD mode doesn't support XOR interleave mode */ 1213 /* Ivy Bridge's SAD mode doesn't support XOR interleave mode */
1214 idx = (addr >> 6) & 7; 1214 idx = (addr >> 6) & 7;
1215 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx); 1215 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
1216 *socket = sad_pkg_socket(pkg); 1216 *socket = sad_pkg_socket(pkg);
1217 sad_ha = sad_pkg_ha(pkg); 1217 sad_ha = sad_pkg_ha(pkg);
1218 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n", 1218 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
1219 idx, *socket, sad_ha); 1219 idx, *socket, sad_ha);
1220 } 1220 }
1221 1221
1222 /* 1222 /*
1223 * Move to the proper node structure, in order to access the 1223 * Move to the proper node structure, in order to access the
1224 * right PCI registers 1224 * right PCI registers
1225 */ 1225 */
1226 new_mci = get_mci_for_node_id(*socket); 1226 new_mci = get_mci_for_node_id(*socket);
1227 if (!new_mci) { 1227 if (!new_mci) {
1228 sprintf(msg, "Struct for socket #%u wasn't initialized", 1228 sprintf(msg, "Struct for socket #%u wasn't initialized",
1229 *socket); 1229 *socket);
1230 return -EINVAL; 1230 return -EINVAL;
1231 } 1231 }
1232 mci = new_mci; 1232 mci = new_mci;
1233 pvt = mci->pvt_info; 1233 pvt = mci->pvt_info;
1234 1234
1235 /* 1235 /*
1236 * Step 2) Get memory channel 1236 * Step 2) Get memory channel
1237 */ 1237 */
1238 prv = 0; 1238 prv = 0;
1239 if (pvt->info.type == SANDY_BRIDGE) 1239 if (pvt->info.type == SANDY_BRIDGE)
1240 pci_ha = pvt->pci_ha0; 1240 pci_ha = pvt->pci_ha0;
1241 else { 1241 else {
1242 if (sad_ha) 1242 if (sad_ha)
1243 pci_ha = pvt->pci_ha1; 1243 pci_ha = pvt->pci_ha1;
1244 else 1244 else
1245 pci_ha = pvt->pci_ha0; 1245 pci_ha = pvt->pci_ha0;
1246 } 1246 }
1247 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) { 1247 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
1248 pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], &reg); 1248 pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], &reg);
1249 limit = TAD_LIMIT(reg); 1249 limit = TAD_LIMIT(reg);
1250 if (limit <= prv) { 1250 if (limit <= prv) {
1251 sprintf(msg, "Can't discover the memory channel"); 1251 sprintf(msg, "Can't discover the memory channel");
1252 return -EINVAL; 1252 return -EINVAL;
1253 } 1253 }
1254 if (addr <= limit) 1254 if (addr <= limit)
1255 break; 1255 break;
1256 prv = limit; 1256 prv = limit;
1257 } 1257 }
1258 if (n_tads == MAX_TAD) { 1258 if (n_tads == MAX_TAD) {
1259 sprintf(msg, "Can't discover the memory channel"); 1259 sprintf(msg, "Can't discover the memory channel");
1260 return -EINVAL; 1260 return -EINVAL;
1261 } 1261 }
1262 1262
1263 ch_way = TAD_CH(reg) + 1; 1263 ch_way = TAD_CH(reg) + 1;
1264 sck_way = TAD_SOCK(reg) + 1; 1264 sck_way = TAD_SOCK(reg) + 1;
1265 1265
1266 if (ch_way == 3) 1266 if (ch_way == 3)
1267 idx = addr >> 6; 1267 idx = addr >> 6;
1268 else 1268 else
1269 idx = (addr >> (6 + sck_way + shiftup)) & 0x3; 1269 idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
1270 idx = idx % ch_way; 1270 idx = idx % ch_way;
1271 1271
1272 /* 1272 /*
1273 * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ??? 1273 * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ???
1274 */ 1274 */
1275 switch (idx) { 1275 switch (idx) {
1276 case 0: 1276 case 0:
1277 base_ch = TAD_TGT0(reg); 1277 base_ch = TAD_TGT0(reg);
1278 break; 1278 break;
1279 case 1: 1279 case 1:
1280 base_ch = TAD_TGT1(reg); 1280 base_ch = TAD_TGT1(reg);
1281 break; 1281 break;
1282 case 2: 1282 case 2:
1283 base_ch = TAD_TGT2(reg); 1283 base_ch = TAD_TGT2(reg);
1284 break; 1284 break;
1285 case 3: 1285 case 3:
1286 base_ch = TAD_TGT3(reg); 1286 base_ch = TAD_TGT3(reg);
1287 break; 1287 break;
1288 default: 1288 default:
1289 sprintf(msg, "Can't discover the TAD target"); 1289 sprintf(msg, "Can't discover the TAD target");
1290 return -EINVAL; 1290 return -EINVAL;
1291 } 1291 }
1292 *channel_mask = 1 << base_ch; 1292 *channel_mask = 1 << base_ch;
1293 1293
1294 pci_read_config_dword(pvt->pci_tad[base_ch], 1294 pci_read_config_dword(pvt->pci_tad[base_ch],
1295 tad_ch_nilv_offset[n_tads], 1295 tad_ch_nilv_offset[n_tads],
1296 &tad_offset); 1296 &tad_offset);
1297 1297
1298 if (pvt->is_mirrored) { 1298 if (pvt->is_mirrored) {
1299 *channel_mask |= 1 << ((base_ch + 2) % 4); 1299 *channel_mask |= 1 << ((base_ch + 2) % 4);
1300 switch(ch_way) { 1300 switch(ch_way) {
1301 case 2: 1301 case 2:
1302 case 4: 1302 case 4:
1303 sck_xch = 1 << sck_way * (ch_way >> 1); 1303 sck_xch = 1 << sck_way * (ch_way >> 1);
1304 break; 1304 break;
1305 default: 1305 default:
1306 sprintf(msg, "Invalid mirror set. Can't decode addr"); 1306 sprintf(msg, "Invalid mirror set. Can't decode addr");
1307 return -EINVAL; 1307 return -EINVAL;
1308 } 1308 }
1309 } else 1309 } else
1310 sck_xch = (1 << sck_way) * ch_way; 1310 sck_xch = (1 << sck_way) * ch_way;
1311 1311
1312 if (pvt->is_lockstep) 1312 if (pvt->is_lockstep)
1313 *channel_mask |= 1 << ((base_ch + 1) % 4); 1313 *channel_mask |= 1 << ((base_ch + 1) % 4);
1314 1314
1315 offset = TAD_OFFSET(tad_offset); 1315 offset = TAD_OFFSET(tad_offset);
1316 1316
1317 edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n", 1317 edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
1318 n_tads, 1318 n_tads,
1319 addr, 1319 addr,
1320 limit, 1320 limit,
1321 (u32)TAD_SOCK(reg), 1321 (u32)TAD_SOCK(reg),
1322 ch_way, 1322 ch_way,
1323 offset, 1323 offset,
1324 idx, 1324 idx,
1325 base_ch, 1325 base_ch,
1326 *channel_mask); 1326 *channel_mask);
1327 1327
1328 /* Calculate channel address */ 1328 /* Calculate channel address */
1329 /* Remove the TAD offset */ 1329 /* Remove the TAD offset */
1330 1330
1331 if (offset > addr) { 1331 if (offset > addr) {
1332 sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!", 1332 sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
1333 offset, addr); 1333 offset, addr);
1334 return -EINVAL; 1334 return -EINVAL;
1335 } 1335 }
1336 addr -= offset; 1336 addr -= offset;
1337 /* Store the low bits [0:6] of the addr */ 1337 /* Store the low bits [0:6] of the addr */
1338 ch_addr = addr & 0x7f; 1338 ch_addr = addr & 0x7f;
1339 /* Remove socket wayness and remove 6 bits */ 1339 /* Remove socket wayness and remove 6 bits */
1340 addr >>= 6; 1340 addr >>= 6;
1341 addr = div_u64(addr, sck_xch); 1341 addr = div_u64(addr, sck_xch);
1342 #if 0 1342 #if 0
1343 /* Divide by channel way */ 1343 /* Divide by channel way */
1344 addr = addr / ch_way; 1344 addr = addr / ch_way;
1345 #endif 1345 #endif
1346 /* Recover the last 6 bits */ 1346 /* Recover the last 6 bits */
1347 ch_addr |= addr << 6; 1347 ch_addr |= addr << 6;
1348 1348
1349 /* 1349 /*
1350 * Step 3) Decode rank 1350 * Step 3) Decode rank
1351 */ 1351 */
1352 for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) { 1352 for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
1353 pci_read_config_dword(pvt->pci_tad[base_ch], 1353 pci_read_config_dword(pvt->pci_tad[base_ch],
1354 rir_way_limit[n_rir], 1354 rir_way_limit[n_rir],
1355 &reg); 1355 &reg);
1356 1356
1357 if (!IS_RIR_VALID(reg)) 1357 if (!IS_RIR_VALID(reg))
1358 continue; 1358 continue;
1359 1359
1360 limit = pvt->info.rir_limit(reg); 1360 limit = pvt->info.rir_limit(reg);
1361 mb = div_u64_rem(limit >> 20, 1000, &kb); 1361 mb = div_u64_rem(limit >> 20, 1000, &kb);
1362 edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n", 1362 edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
1363 n_rir, 1363 n_rir,
1364 mb, kb, 1364 mb, kb,
1365 limit, 1365 limit,
1366 1 << RIR_WAY(reg)); 1366 1 << RIR_WAY(reg));
1367 if (ch_addr <= limit) 1367 if (ch_addr <= limit)
1368 break; 1368 break;
1369 } 1369 }
1370 if (n_rir == MAX_RIR_RANGES) { 1370 if (n_rir == MAX_RIR_RANGES) {
1371 sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx", 1371 sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
1372 ch_addr); 1372 ch_addr);
1373 return -EINVAL; 1373 return -EINVAL;
1374 } 1374 }
1375 rir_way = RIR_WAY(reg); 1375 rir_way = RIR_WAY(reg);
1376 1376
1377 if (pvt->is_close_pg) 1377 if (pvt->is_close_pg)
1378 idx = (ch_addr >> 6); 1378 idx = (ch_addr >> 6);
1379 else 1379 else
1380 idx = (ch_addr >> 13); /* FIXME: Datasheet says to shift by 15 */ 1380 idx = (ch_addr >> 13); /* FIXME: Datasheet says to shift by 15 */
1381 idx %= 1 << rir_way; 1381 idx %= 1 << rir_way;
1382 1382
1383 pci_read_config_dword(pvt->pci_tad[base_ch], 1383 pci_read_config_dword(pvt->pci_tad[base_ch],
1384 rir_offset[n_rir][idx], 1384 rir_offset[n_rir][idx],
1385 &reg); 1385 &reg);
1386 *rank = RIR_RNK_TGT(reg); 1386 *rank = RIR_RNK_TGT(reg);
1387 1387
1388 edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", 1388 edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
1389 n_rir, 1389 n_rir,
1390 ch_addr, 1390 ch_addr,
1391 limit, 1391 limit,
1392 rir_way, 1392 rir_way,
1393 idx); 1393 idx);
1394 1394
1395 return 0; 1395 return 0;
1396 } 1396 }
1397 1397
1398 /**************************************************************************** 1398 /****************************************************************************
1399 Device initialization routines: put/get, init/exit 1399 Device initialization routines: put/get, init/exit
1400 ****************************************************************************/ 1400 ****************************************************************************/
1401 1401
1402 /* 1402 /*
1403 * sbridge_put_all_devices 'put' all the devices that we have 1403 * sbridge_put_all_devices 'put' all the devices that we have
1404 * reserved via 'get' 1404 * reserved via 'get'
1405 */ 1405 */
1406 static void sbridge_put_devices(struct sbridge_dev *sbridge_dev) 1406 static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
1407 { 1407 {
1408 int i; 1408 int i;
1409 1409
1410 edac_dbg(0, "\n"); 1410 edac_dbg(0, "\n");
1411 for (i = 0; i < sbridge_dev->n_devs; i++) { 1411 for (i = 0; i < sbridge_dev->n_devs; i++) {
1412 struct pci_dev *pdev = sbridge_dev->pdev[i]; 1412 struct pci_dev *pdev = sbridge_dev->pdev[i];
1413 if (!pdev) 1413 if (!pdev)
1414 continue; 1414 continue;
1415 edac_dbg(0, "Removing dev %02x:%02x.%d\n", 1415 edac_dbg(0, "Removing dev %02x:%02x.%d\n",
1416 pdev->bus->number, 1416 pdev->bus->number,
1417 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 1417 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1418 pci_dev_put(pdev); 1418 pci_dev_put(pdev);
1419 } 1419 }
1420 } 1420 }
1421 1421
1422 static void sbridge_put_all_devices(void) 1422 static void sbridge_put_all_devices(void)
1423 { 1423 {
1424 struct sbridge_dev *sbridge_dev, *tmp; 1424 struct sbridge_dev *sbridge_dev, *tmp;
1425 1425
1426 list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) { 1426 list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
1427 sbridge_put_devices(sbridge_dev); 1427 sbridge_put_devices(sbridge_dev);
1428 free_sbridge_dev(sbridge_dev); 1428 free_sbridge_dev(sbridge_dev);
1429 } 1429 }
1430 } 1430 }
1431 1431
1432 static int sbridge_get_onedevice(struct pci_dev **prev, 1432 static int sbridge_get_onedevice(struct pci_dev **prev,
1433 u8 *num_mc, 1433 u8 *num_mc,
1434 const struct pci_id_table *table, 1434 const struct pci_id_table *table,
1435 const unsigned devno) 1435 const unsigned devno)
1436 { 1436 {
1437 struct sbridge_dev *sbridge_dev; 1437 struct sbridge_dev *sbridge_dev;
1438 const struct pci_id_descr *dev_descr = &table->descr[devno]; 1438 const struct pci_id_descr *dev_descr = &table->descr[devno];
1439 struct pci_dev *pdev = NULL; 1439 struct pci_dev *pdev = NULL;
1440 u8 bus = 0; 1440 u8 bus = 0;
1441 1441
1442 sbridge_printk(KERN_DEBUG, 1442 sbridge_printk(KERN_DEBUG,
1443 "Seeking for: PCI ID %04x:%04x\n", 1443 "Seeking for: PCI ID %04x:%04x\n",
1444 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 1444 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1445 1445
1446 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 1446 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1447 dev_descr->dev_id, *prev); 1447 dev_descr->dev_id, *prev);
1448 1448
1449 if (!pdev) { 1449 if (!pdev) {
1450 if (*prev) { 1450 if (*prev) {
1451 *prev = pdev; 1451 *prev = pdev;
1452 return 0; 1452 return 0;
1453 } 1453 }
1454 1454
1455 if (dev_descr->optional) 1455 if (dev_descr->optional)
1456 return 0; 1456 return 0;
1457 1457
1458 /* if the HA wasn't found */ 1458 /* if the HA wasn't found */
1459 if (devno == 0) 1459 if (devno == 0)
1460 return -ENODEV; 1460 return -ENODEV;
1461 1461
1462 sbridge_printk(KERN_INFO, 1462 sbridge_printk(KERN_INFO,
1463 "Device not found: %04x:%04x\n", 1463 "Device not found: %04x:%04x\n",
1464 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 1464 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1465 1465
1466 /* End of list, leave */ 1466 /* End of list, leave */
1467 return -ENODEV; 1467 return -ENODEV;
1468 } 1468 }
1469 bus = pdev->bus->number; 1469 bus = pdev->bus->number;
1470 1470
1471 sbridge_dev = get_sbridge_dev(bus); 1471 sbridge_dev = get_sbridge_dev(bus);
1472 if (!sbridge_dev) { 1472 if (!sbridge_dev) {
1473 sbridge_dev = alloc_sbridge_dev(bus, table); 1473 sbridge_dev = alloc_sbridge_dev(bus, table);
1474 if (!sbridge_dev) { 1474 if (!sbridge_dev) {
1475 pci_dev_put(pdev); 1475 pci_dev_put(pdev);
1476 return -ENOMEM; 1476 return -ENOMEM;
1477 } 1477 }
1478 (*num_mc)++; 1478 (*num_mc)++;
1479 } 1479 }
1480 1480
1481 if (sbridge_dev->pdev[devno]) { 1481 if (sbridge_dev->pdev[devno]) {
1482 sbridge_printk(KERN_ERR, 1482 sbridge_printk(KERN_ERR,
1483 "Duplicated device for %04x:%04x\n", 1483 "Duplicated device for %04x:%04x\n",
1484 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 1484 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1485 pci_dev_put(pdev); 1485 pci_dev_put(pdev);
1486 return -ENODEV; 1486 return -ENODEV;
1487 } 1487 }
1488 1488
1489 sbridge_dev->pdev[devno] = pdev; 1489 sbridge_dev->pdev[devno] = pdev;
1490 1490
1491 /* Be sure that the device is enabled */ 1491 /* Be sure that the device is enabled */
1492 if (unlikely(pci_enable_device(pdev) < 0)) { 1492 if (unlikely(pci_enable_device(pdev) < 0)) {
1493 sbridge_printk(KERN_ERR, 1493 sbridge_printk(KERN_ERR,
1494 "Couldn't enable %04x:%04x\n", 1494 "Couldn't enable %04x:%04x\n",
1495 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 1495 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1496 return -ENODEV; 1496 return -ENODEV;
1497 } 1497 }
1498 1498
1499 edac_dbg(0, "Detected %04x:%04x\n", 1499 edac_dbg(0, "Detected %04x:%04x\n",
1500 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 1500 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1501 1501
1502 /* 1502 /*
1503 * As stated on drivers/pci/search.c, the reference count for 1503 * As stated on drivers/pci/search.c, the reference count for
1504 * @from is always decremented if it is not %NULL. So, as we need 1504 * @from is always decremented if it is not %NULL. So, as we need
1505 * to get all devices up to null, we need to do a get for the device 1505 * to get all devices up to null, we need to do a get for the device
1506 */ 1506 */
1507 pci_dev_get(pdev); 1507 pci_dev_get(pdev);
1508 1508
1509 *prev = pdev; 1509 *prev = pdev;
1510 1510
1511 return 0; 1511 return 0;
1512 } 1512 }
1513 1513
1514 /* 1514 /*
1515 * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's 1515 * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's
1516 * devices we want to reference for this driver. 1516 * devices we want to reference for this driver.
1517 * @num_mc: pointer to the memory controllers count, to be incremented in case 1517 * @num_mc: pointer to the memory controllers count, to be incremented in case
1518 * of success. 1518 * of success.
1519 * @table: model specific table 1519 * @table: model specific table
1520 * 1520 *
1521 * returns 0 in case of success or error code 1521 * returns 0 in case of success or error code
1522 */ 1522 */
1523 static int sbridge_get_all_devices(u8 *num_mc, 1523 static int sbridge_get_all_devices(u8 *num_mc,
1524 const struct pci_id_table *table) 1524 const struct pci_id_table *table)
1525 { 1525 {
1526 int i, rc; 1526 int i, rc;
1527 struct pci_dev *pdev = NULL; 1527 struct pci_dev *pdev = NULL;
1528 1528
1529 while (table && table->descr) { 1529 while (table && table->descr) {
1530 for (i = 0; i < table->n_devs; i++) { 1530 for (i = 0; i < table->n_devs; i++) {
1531 pdev = NULL; 1531 pdev = NULL;
1532 do { 1532 do {
1533 rc = sbridge_get_onedevice(&pdev, num_mc, 1533 rc = sbridge_get_onedevice(&pdev, num_mc,
1534 table, i); 1534 table, i);
1535 if (rc < 0) { 1535 if (rc < 0) {
1536 if (i == 0) { 1536 if (i == 0) {
1537 i = table->n_devs; 1537 i = table->n_devs;
1538 break; 1538 break;
1539 } 1539 }
1540 sbridge_put_all_devices(); 1540 sbridge_put_all_devices();
1541 return -ENODEV; 1541 return -ENODEV;
1542 } 1542 }
1543 } while (pdev); 1543 } while (pdev);
1544 } 1544 }
1545 table++; 1545 table++;
1546 } 1546 }
1547 1547
1548 return 0; 1548 return 0;
1549 } 1549 }
1550 1550
1551 static int sbridge_mci_bind_devs(struct mem_ctl_info *mci, 1551 static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
1552 struct sbridge_dev *sbridge_dev) 1552 struct sbridge_dev *sbridge_dev)
1553 { 1553 {
1554 struct sbridge_pvt *pvt = mci->pvt_info; 1554 struct sbridge_pvt *pvt = mci->pvt_info;
1555 struct pci_dev *pdev; 1555 struct pci_dev *pdev;
1556 int i; 1556 int i;
1557 1557
1558 for (i = 0; i < sbridge_dev->n_devs; i++) { 1558 for (i = 0; i < sbridge_dev->n_devs; i++) {
1559 pdev = sbridge_dev->pdev[i]; 1559 pdev = sbridge_dev->pdev[i];
1560 if (!pdev) 1560 if (!pdev)
1561 continue; 1561 continue;
1562 1562
1563 switch (pdev->device) { 1563 switch (pdev->device) {
1564 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0: 1564 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0:
1565 pvt->pci_sad0 = pdev; 1565 pvt->pci_sad0 = pdev;
1566 break; 1566 break;
1567 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1: 1567 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1:
1568 pvt->pci_sad1 = pdev; 1568 pvt->pci_sad1 = pdev;
1569 break; 1569 break;
1570 case PCI_DEVICE_ID_INTEL_SBRIDGE_BR: 1570 case PCI_DEVICE_ID_INTEL_SBRIDGE_BR:
1571 pvt->pci_br0 = pdev; 1571 pvt->pci_br0 = pdev;
1572 break; 1572 break;
1573 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0: 1573 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
1574 pvt->pci_ha0 = pdev; 1574 pvt->pci_ha0 = pdev;
1575 break; 1575 break;
1576 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA: 1576 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
1577 pvt->pci_ta = pdev; 1577 pvt->pci_ta = pdev;
1578 break; 1578 break;
1579 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS: 1579 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS:
1580 pvt->pci_ras = pdev; 1580 pvt->pci_ras = pdev;
1581 break; 1581 break;
1582 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0: 1582 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0:
1583 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1: 1583 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1:
1584 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2: 1584 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
1585 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3: 1585 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
1586 { 1586 {
1587 int id = pdev->device - PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0; 1587 int id = pdev->device - PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0;
1588 pvt->pci_tad[id] = pdev; 1588 pvt->pci_tad[id] = pdev;
1589 } 1589 }
1590 break; 1590 break;
1591 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO: 1591 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
1592 pvt->pci_ddrio = pdev; 1592 pvt->pci_ddrio = pdev;
1593 break; 1593 break;
1594 default: 1594 default:
1595 goto error; 1595 goto error;
1596 } 1596 }
1597 1597
1598 edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n", 1598 edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n",
1599 pdev->vendor, pdev->device, 1599 pdev->vendor, pdev->device,
1600 sbridge_dev->bus, 1600 sbridge_dev->bus,
1601 pdev); 1601 pdev);
1602 } 1602 }
1603 1603
1604 /* Check if everything were registered */ 1604 /* Check if everything were registered */
1605 if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha0 || 1605 if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha0 ||
1606 !pvt-> pci_tad || !pvt->pci_ras || !pvt->pci_ta) 1606 !pvt-> pci_tad || !pvt->pci_ras || !pvt->pci_ta)
1607 goto enodev; 1607 goto enodev;
1608 1608
1609 for (i = 0; i < NUM_CHANNELS; i++) { 1609 for (i = 0; i < NUM_CHANNELS; i++) {
1610 if (!pvt->pci_tad[i]) 1610 if (!pvt->pci_tad[i])
1611 goto enodev; 1611 goto enodev;
1612 } 1612 }
1613 return 0; 1613 return 0;
1614 1614
1615 enodev: 1615 enodev:
1616 sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); 1616 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
1617 return -ENODEV; 1617 return -ENODEV;
1618 1618
1619 error: 1619 error:
1620 sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n", 1620 sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n",
1621 PCI_VENDOR_ID_INTEL, pdev->device); 1621 PCI_VENDOR_ID_INTEL, pdev->device);
1622 return -EINVAL; 1622 return -EINVAL;
1623 } 1623 }
1624 1624
1625 static int ibridge_mci_bind_devs(struct mem_ctl_info *mci, 1625 static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
1626 struct sbridge_dev *sbridge_dev) 1626 struct sbridge_dev *sbridge_dev)
1627 { 1627 {
1628 struct sbridge_pvt *pvt = mci->pvt_info; 1628 struct sbridge_pvt *pvt = mci->pvt_info;
1629 struct pci_dev *pdev, *tmp; 1629 struct pci_dev *pdev, *tmp;
1630 int i; 1630 int i;
1631 bool mode_2ha = false; 1631 bool mode_2ha = false;
1632 1632
1633 tmp = pci_get_device(PCI_VENDOR_ID_INTEL, 1633 tmp = pci_get_device(PCI_VENDOR_ID_INTEL,
1634 PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, NULL); 1634 PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, NULL);
1635 if (tmp) { 1635 if (tmp) {
1636 mode_2ha = true; 1636 mode_2ha = true;
1637 pci_dev_put(tmp); 1637 pci_dev_put(tmp);
1638 } 1638 }
1639 1639
1640 for (i = 0; i < sbridge_dev->n_devs; i++) { 1640 for (i = 0; i < sbridge_dev->n_devs; i++) {
1641 pdev = sbridge_dev->pdev[i]; 1641 pdev = sbridge_dev->pdev[i];
1642 if (!pdev) 1642 if (!pdev)
1643 continue; 1643 continue;
1644 1644
1645 switch (pdev->device) { 1645 switch (pdev->device) {
1646 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0: 1646 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
1647 pvt->pci_ha0 = pdev; 1647 pvt->pci_ha0 = pdev;
1648 break; 1648 break;
1649 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA: 1649 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
1650 pvt->pci_ta = pdev; 1650 pvt->pci_ta = pdev;
1651 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS: 1651 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
1652 pvt->pci_ras = pdev; 1652 pvt->pci_ras = pdev;
1653 break; 1653 break;
1654 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2: 1654 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
1655 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3: 1655 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
1656 /* if we have 2 HAs active, channels 2 and 3 1656 /* if we have 2 HAs active, channels 2 and 3
1657 * are in other device */ 1657 * are in other device */
1658 if (mode_2ha) 1658 if (mode_2ha)
1659 break; 1659 break;
1660 /* fall through */ 1660 /* fall through */
1661 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0: 1661 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
1662 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1: 1662 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
1663 { 1663 {
1664 int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0; 1664 int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0;
1665 pvt->pci_tad[id] = pdev; 1665 pvt->pci_tad[id] = pdev;
1666 } 1666 }
1667 break; 1667 break;
1668 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0: 1668 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0:
1669 pvt->pci_ddrio = pdev; 1669 pvt->pci_ddrio = pdev;
1670 break; 1670 break;
1671 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0: 1671 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0:
1672 if (!mode_2ha) 1672 if (!mode_2ha)
1673 pvt->pci_ddrio = pdev; 1673 pvt->pci_ddrio = pdev;
1674 break; 1674 break;
1675 case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD: 1675 case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD:
1676 pvt->pci_sad0 = pdev; 1676 pvt->pci_sad0 = pdev;
1677 break; 1677 break;
1678 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0: 1678 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0:
1679 pvt->pci_br0 = pdev; 1679 pvt->pci_br0 = pdev;
1680 break; 1680 break;
1681 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1: 1681 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
1682 pvt->pci_br1 = pdev; 1682 pvt->pci_br1 = pdev;
1683 break; 1683 break;
1684 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1: 1684 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
1685 pvt->pci_ha1 = pdev; 1685 pvt->pci_ha1 = pdev;
1686 break; 1686 break;
1687 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0: 1687 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
1688 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1: 1688 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
1689 { 1689 {
1690 int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 + 2; 1690 int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 + 2;
1691 1691
1692 /* we shouldn't have this device if we have just one 1692 /* we shouldn't have this device if we have just one
1693 * HA present */ 1693 * HA present */
1694 WARN_ON(!mode_2ha); 1694 WARN_ON(!mode_2ha);
1695 pvt->pci_tad[id] = pdev; 1695 pvt->pci_tad[id] = pdev;
1696 } 1696 }
1697 break; 1697 break;
1698 default: 1698 default:
1699 goto error; 1699 goto error;
1700 } 1700 }
1701 1701
1702 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n", 1702 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
1703 sbridge_dev->bus, 1703 sbridge_dev->bus,
1704 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 1704 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1705 pdev); 1705 pdev);
1706 } 1706 }
1707 1707
1708 /* Check if everything were registered */ 1708 /* Check if everything were registered */
1709 if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_br0 || 1709 if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_br0 ||
1710 !pvt->pci_br1 || !pvt->pci_tad || !pvt->pci_ras || 1710 !pvt->pci_br1 || !pvt->pci_tad || !pvt->pci_ras ||
1711 !pvt->pci_ta) 1711 !pvt->pci_ta)
1712 goto enodev; 1712 goto enodev;
1713 1713
1714 for (i = 0; i < NUM_CHANNELS; i++) { 1714 for (i = 0; i < NUM_CHANNELS; i++) {
1715 if (!pvt->pci_tad[i]) 1715 if (!pvt->pci_tad[i])
1716 goto enodev; 1716 goto enodev;
1717 } 1717 }
1718 return 0; 1718 return 0;
1719 1719
1720 enodev: 1720 enodev:
1721 sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); 1721 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
1722 return -ENODEV; 1722 return -ENODEV;
1723 1723
1724 error: 1724 error:
1725 sbridge_printk(KERN_ERR, 1725 sbridge_printk(KERN_ERR,
1726 "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL, 1726 "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL,
1727 pdev->device); 1727 pdev->device);
1728 return -EINVAL; 1728 return -EINVAL;
1729 } 1729 }
1730 1730
1731 static int haswell_mci_bind_devs(struct mem_ctl_info *mci, 1731 static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
1732 struct sbridge_dev *sbridge_dev) 1732 struct sbridge_dev *sbridge_dev)
1733 { 1733 {
1734 struct sbridge_pvt *pvt = mci->pvt_info; 1734 struct sbridge_pvt *pvt = mci->pvt_info;
1735 struct pci_dev *pdev, *tmp; 1735 struct pci_dev *pdev, *tmp;
1736 int i; 1736 int i;
1737 bool mode_2ha = false; 1737 bool mode_2ha = false;
1738 1738
1739 tmp = pci_get_device(PCI_VENDOR_ID_INTEL, 1739 tmp = pci_get_device(PCI_VENDOR_ID_INTEL,
1740 PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, NULL); 1740 PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, NULL);
1741 if (tmp) { 1741 if (tmp) {
1742 mode_2ha = true; 1742 mode_2ha = true;
1743 pci_dev_put(tmp); 1743 pci_dev_put(tmp);
1744 } 1744 }
1745 1745
1746 /* there's only one device per system; not tied to any bus */ 1746 /* there's only one device per system; not tied to any bus */
1747 if (pvt->info.pci_vtd == NULL) 1747 if (pvt->info.pci_vtd == NULL)
1748 /* result will be checked later */ 1748 /* result will be checked later */
1749 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL, 1749 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
1750 PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC, 1750 PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC,
1751 NULL); 1751 NULL);
1752 1752
1753 for (i = 0; i < sbridge_dev->n_devs; i++) { 1753 for (i = 0; i < sbridge_dev->n_devs; i++) {
1754 pdev = sbridge_dev->pdev[i]; 1754 pdev = sbridge_dev->pdev[i];
1755 if (!pdev) 1755 if (!pdev)
1756 continue; 1756 continue;
1757 1757
1758 switch (pdev->device) { 1758 switch (pdev->device) {
1759 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0: 1759 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0:
1760 pvt->pci_sad0 = pdev; 1760 pvt->pci_sad0 = pdev;
1761 break; 1761 break;
1762 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1: 1762 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1:
1763 pvt->pci_sad1 = pdev; 1763 pvt->pci_sad1 = pdev;
1764 break; 1764 break;
1765 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0: 1765 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
1766 pvt->pci_ha0 = pdev; 1766 pvt->pci_ha0 = pdev;
1767 break; 1767 break;
1768 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA: 1768 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
1769 pvt->pci_ta = pdev; 1769 pvt->pci_ta = pdev;
1770 break; 1770 break;
1771 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL: 1771 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL:
1772 pvt->pci_ras = pdev; 1772 pvt->pci_ras = pdev;
1773 break; 1773 break;
1774 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0: 1774 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
1775 pvt->pci_tad[0] = pdev; 1775 pvt->pci_tad[0] = pdev;
1776 break; 1776 break;
1777 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1: 1777 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
1778 pvt->pci_tad[1] = pdev; 1778 pvt->pci_tad[1] = pdev;
1779 break; 1779 break;
1780 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2: 1780 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
1781 if (!mode_2ha) 1781 if (!mode_2ha)
1782 pvt->pci_tad[2] = pdev; 1782 pvt->pci_tad[2] = pdev;
1783 break; 1783 break;
1784 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3: 1784 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
1785 if (!mode_2ha) 1785 if (!mode_2ha)
1786 pvt->pci_tad[3] = pdev; 1786 pvt->pci_tad[3] = pdev;
1787 break; 1787 break;
1788 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0: 1788 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
1789 pvt->pci_ddrio = pdev; 1789 pvt->pci_ddrio = pdev;
1790 break; 1790 break;
1791 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1: 1791 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
1792 pvt->pci_ha1 = pdev; 1792 pvt->pci_ha1 = pdev;
1793 break; 1793 break;
1794 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA: 1794 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
1795 pvt->pci_ha1_ta = pdev; 1795 pvt->pci_ha1_ta = pdev;
1796 break; 1796 break;
1797 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0: 1797 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
1798 if (mode_2ha) 1798 if (mode_2ha)
1799 pvt->pci_tad[2] = pdev; 1799 pvt->pci_tad[2] = pdev;
1800 break; 1800 break;
1801 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1: 1801 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
1802 if (mode_2ha) 1802 if (mode_2ha)
1803 pvt->pci_tad[3] = pdev; 1803 pvt->pci_tad[3] = pdev;
1804 break; 1804 break;
1805 default: 1805 default:
1806 break; 1806 break;
1807 } 1807 }
1808 1808
1809 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n", 1809 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
1810 sbridge_dev->bus, 1810 sbridge_dev->bus,
1811 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 1811 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1812 pdev); 1812 pdev);
1813 } 1813 }
1814 1814
1815 /* Check if everything were registered */ 1815 /* Check if everything were registered */
1816 if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 || 1816 if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 ||
1817 !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd) 1817 !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
1818 goto enodev; 1818 goto enodev;
1819 1819
1820 for (i = 0; i < NUM_CHANNELS; i++) { 1820 for (i = 0; i < NUM_CHANNELS; i++) {
1821 if (!pvt->pci_tad[i]) 1821 if (!pvt->pci_tad[i])
1822 goto enodev; 1822 goto enodev;
1823 } 1823 }
1824 return 0; 1824 return 0;
1825 1825
1826 enodev: 1826 enodev:
1827 sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); 1827 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
1828 return -ENODEV; 1828 return -ENODEV;
1829 } 1829 }
1830 1830
1831 /**************************************************************************** 1831 /****************************************************************************
1832 Error check routines 1832 Error check routines
1833 ****************************************************************************/ 1833 ****************************************************************************/
1834 1834
1835 /* 1835 /*
1836 * While Sandy Bridge has error count registers, SMI BIOS read values from 1836 * While Sandy Bridge has error count registers, SMI BIOS read values from
1837 * and resets the counters. So, they are not reliable for the OS to read 1837 * and resets the counters. So, they are not reliable for the OS to read
1838 * from them. So, we have no option but to just trust on whatever MCE is 1838 * from them. So, we have no option but to just trust on whatever MCE is
1839 * telling us about the errors. 1839 * telling us about the errors.
1840 */ 1840 */
1841 static void sbridge_mce_output_error(struct mem_ctl_info *mci, 1841 static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1842 const struct mce *m) 1842 const struct mce *m)
1843 { 1843 {
1844 struct mem_ctl_info *new_mci; 1844 struct mem_ctl_info *new_mci;
1845 struct sbridge_pvt *pvt = mci->pvt_info; 1845 struct sbridge_pvt *pvt = mci->pvt_info;
1846 enum hw_event_mc_err_type tp_event; 1846 enum hw_event_mc_err_type tp_event;
1847 char *type, *optype, msg[256]; 1847 char *type, *optype, msg[256];
1848 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); 1848 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
1849 bool overflow = GET_BITFIELD(m->status, 62, 62); 1849 bool overflow = GET_BITFIELD(m->status, 62, 62);
1850 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); 1850 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
1851 bool recoverable; 1851 bool recoverable;
1852 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); 1852 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1853 u32 mscod = GET_BITFIELD(m->status, 16, 31); 1853 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1854 u32 errcode = GET_BITFIELD(m->status, 0, 15); 1854 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1855 u32 channel = GET_BITFIELD(m->status, 0, 3); 1855 u32 channel = GET_BITFIELD(m->status, 0, 3);
1856 u32 optypenum = GET_BITFIELD(m->status, 4, 6); 1856 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1857 long channel_mask, first_channel; 1857 long channel_mask, first_channel;
1858 u8 rank, socket; 1858 u8 rank, socket;
1859 int rc, dimm; 1859 int rc, dimm;
1860 char *area_type = NULL; 1860 char *area_type = NULL;
1861 1861
1862 if (pvt->info.type == IVY_BRIDGE) 1862 if (pvt->info.type == IVY_BRIDGE)
1863 recoverable = true; 1863 recoverable = true;
1864 else 1864 else
1865 recoverable = GET_BITFIELD(m->status, 56, 56); 1865 recoverable = GET_BITFIELD(m->status, 56, 56);
1866 1866
1867 if (uncorrected_error) { 1867 if (uncorrected_error) {
1868 if (ripv) { 1868 if (ripv) {
1869 type = "FATAL"; 1869 type = "FATAL";
1870 tp_event = HW_EVENT_ERR_FATAL; 1870 tp_event = HW_EVENT_ERR_FATAL;
1871 } else { 1871 } else {
1872 type = "NON_FATAL"; 1872 type = "NON_FATAL";
1873 tp_event = HW_EVENT_ERR_UNCORRECTED; 1873 tp_event = HW_EVENT_ERR_UNCORRECTED;
1874 } 1874 }
1875 } else { 1875 } else {
1876 type = "CORRECTED"; 1876 type = "CORRECTED";
1877 tp_event = HW_EVENT_ERR_CORRECTED; 1877 tp_event = HW_EVENT_ERR_CORRECTED;
1878 } 1878 }
1879 1879
1880 /* 1880 /*
1881 * According with Table 15-9 of the Intel Architecture spec vol 3A, 1881 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1882 * memory errors should fit in this mask: 1882 * memory errors should fit in this mask:
1883 * 000f 0000 1mmm cccc (binary) 1883 * 000f 0000 1mmm cccc (binary)
1884 * where: 1884 * where:
1885 * f = Correction Report Filtering Bit. If 1, subsequent errors 1885 * f = Correction Report Filtering Bit. If 1, subsequent errors
1886 * won't be shown 1886 * won't be shown
1887 * mmm = error type 1887 * mmm = error type
1888 * cccc = channel 1888 * cccc = channel
1889 * If the mask doesn't match, report an error to the parsing logic 1889 * If the mask doesn't match, report an error to the parsing logic
1890 */ 1890 */
1891 if (! ((errcode & 0xef80) == 0x80)) { 1891 if (! ((errcode & 0xef80) == 0x80)) {
1892 optype = "Can't parse: it is not a mem"; 1892 optype = "Can't parse: it is not a mem";
1893 } else { 1893 } else {
1894 switch (optypenum) { 1894 switch (optypenum) {
1895 case 0: 1895 case 0:
1896 optype = "generic undef request error"; 1896 optype = "generic undef request error";
1897 break; 1897 break;
1898 case 1: 1898 case 1:
1899 optype = "memory read error"; 1899 optype = "memory read error";
1900 break; 1900 break;
1901 case 2: 1901 case 2:
1902 optype = "memory write error"; 1902 optype = "memory write error";
1903 break; 1903 break;
1904 case 3: 1904 case 3:
1905 optype = "addr/cmd error"; 1905 optype = "addr/cmd error";
1906 break; 1906 break;
1907 case 4: 1907 case 4:
1908 optype = "memory scrubbing error"; 1908 optype = "memory scrubbing error";
1909 break; 1909 break;
1910 default: 1910 default:
1911 optype = "reserved"; 1911 optype = "reserved";
1912 break; 1912 break;
1913 } 1913 }
1914 } 1914 }
1915 1915
1916 /* Only decode errors with an valid address (ADDRV) */ 1916 /* Only decode errors with an valid address (ADDRV) */
1917 if (!GET_BITFIELD(m->status, 58, 58)) 1917 if (!GET_BITFIELD(m->status, 58, 58))
1918 return; 1918 return;
1919 1919
1920 rc = get_memory_error_data(mci, m->addr, &socket, 1920 rc = get_memory_error_data(mci, m->addr, &socket,
1921 &channel_mask, &rank, &area_type, msg); 1921 &channel_mask, &rank, &area_type, msg);
1922 if (rc < 0) 1922 if (rc < 0)
1923 goto err_parsing; 1923 goto err_parsing;
1924 new_mci = get_mci_for_node_id(socket); 1924 new_mci = get_mci_for_node_id(socket);
1925 if (!new_mci) { 1925 if (!new_mci) {
1926 strcpy(msg, "Error: socket got corrupted!"); 1926 strcpy(msg, "Error: socket got corrupted!");
1927 goto err_parsing; 1927 goto err_parsing;
1928 } 1928 }
1929 mci = new_mci; 1929 mci = new_mci;
1930 pvt = mci->pvt_info; 1930 pvt = mci->pvt_info;
1931 1931
1932 first_channel = find_first_bit(&channel_mask, NUM_CHANNELS); 1932 first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
1933 1933
1934 if (rank < 4) 1934 if (rank < 4)
1935 dimm = 0; 1935 dimm = 0;
1936 else if (rank < 8) 1936 else if (rank < 8)
1937 dimm = 1; 1937 dimm = 1;
1938 else 1938 else
1939 dimm = 2; 1939 dimm = 2;
1940 1940
1941 1941
1942 /* 1942 /*
1943 * FIXME: On some memory configurations (mirror, lockstep), the 1943 * FIXME: On some memory configurations (mirror, lockstep), the
1944 * Memory Controller can't point the error to a single DIMM. The 1944 * Memory Controller can't point the error to a single DIMM. The
1945 * EDAC core should be handling the channel mask, in order to point 1945 * EDAC core should be handling the channel mask, in order to point
1946 * to the group of dimm's where the error may be happening. 1946 * to the group of dimm's where the error may be happening.
1947 */ 1947 */
1948 if (!pvt->is_lockstep && !pvt->is_mirrored && !pvt->is_close_pg) 1948 if (!pvt->is_lockstep && !pvt->is_mirrored && !pvt->is_close_pg)
1949 channel = first_channel; 1949 channel = first_channel;
1950 1950
1951 snprintf(msg, sizeof(msg), 1951 snprintf(msg, sizeof(msg),
1952 "%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d", 1952 "%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d",
1953 overflow ? " OVERFLOW" : "", 1953 overflow ? " OVERFLOW" : "",
1954 (uncorrected_error && recoverable) ? " recoverable" : "", 1954 (uncorrected_error && recoverable) ? " recoverable" : "",
1955 area_type, 1955 area_type,
1956 mscod, errcode, 1956 mscod, errcode,
1957 socket, 1957 socket,
1958 channel_mask, 1958 channel_mask,
1959 rank); 1959 rank);
1960 1960
1961 edac_dbg(0, "%s\n", msg); 1961 edac_dbg(0, "%s\n", msg);
1962 1962
1963 /* FIXME: need support for channel mask */ 1963 /* FIXME: need support for channel mask */
1964 1964
1965 if (channel == CHANNEL_UNSPECIFIED) 1965 if (channel == CHANNEL_UNSPECIFIED)
1966 channel = -1; 1966 channel = -1;
1967 1967
1968 /* Call the helper to output message */ 1968 /* Call the helper to output message */
1969 edac_mc_handle_error(tp_event, mci, core_err_cnt, 1969 edac_mc_handle_error(tp_event, mci, core_err_cnt,
1970 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, 1970 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
1971 channel, dimm, -1, 1971 channel, dimm, -1,
1972 optype, msg); 1972 optype, msg);
1973 return; 1973 return;
1974 err_parsing: 1974 err_parsing:
1975 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, 1975 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
1976 -1, -1, -1, 1976 -1, -1, -1,
1977 msg, ""); 1977 msg, "");
1978 1978
1979 } 1979 }
1980 1980
1981 /* 1981 /*
1982 * sbridge_check_error Retrieve and process errors reported by the 1982 * sbridge_check_error Retrieve and process errors reported by the
1983 * hardware. Called by the Core module. 1983 * hardware. Called by the Core module.
1984 */ 1984 */
1985 static void sbridge_check_error(struct mem_ctl_info *mci) 1985 static void sbridge_check_error(struct mem_ctl_info *mci)
1986 { 1986 {
1987 struct sbridge_pvt *pvt = mci->pvt_info; 1987 struct sbridge_pvt *pvt = mci->pvt_info;
1988 int i; 1988 int i;
1989 unsigned count = 0; 1989 unsigned count = 0;
1990 struct mce *m; 1990 struct mce *m;
1991 1991
1992 /* 1992 /*
1993 * MCE first step: Copy all mce errors into a temporary buffer 1993 * MCE first step: Copy all mce errors into a temporary buffer
1994 * We use a double buffering here, to reduce the risk of 1994 * We use a double buffering here, to reduce the risk of
1995 * loosing an error. 1995 * loosing an error.
1996 */ 1996 */
1997 smp_rmb(); 1997 smp_rmb();
1998 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in) 1998 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
1999 % MCE_LOG_LEN; 1999 % MCE_LOG_LEN;
2000 if (!count) 2000 if (!count)
2001 return; 2001 return;
2002 2002
2003 m = pvt->mce_outentry; 2003 m = pvt->mce_outentry;
2004 if (pvt->mce_in + count > MCE_LOG_LEN) { 2004 if (pvt->mce_in + count > MCE_LOG_LEN) {
2005 unsigned l = MCE_LOG_LEN - pvt->mce_in; 2005 unsigned l = MCE_LOG_LEN - pvt->mce_in;
2006 2006
2007 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l); 2007 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
2008 smp_wmb(); 2008 smp_wmb();
2009 pvt->mce_in = 0; 2009 pvt->mce_in = 0;
2010 count -= l; 2010 count -= l;
2011 m += l; 2011 m += l;
2012 } 2012 }
2013 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count); 2013 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
2014 smp_wmb(); 2014 smp_wmb();
2015 pvt->mce_in += count; 2015 pvt->mce_in += count;
2016 2016
2017 smp_rmb(); 2017 smp_rmb();
2018 if (pvt->mce_overrun) { 2018 if (pvt->mce_overrun) {
2019 sbridge_printk(KERN_ERR, "Lost %d memory errors\n", 2019 sbridge_printk(KERN_ERR, "Lost %d memory errors\n",
2020 pvt->mce_overrun); 2020 pvt->mce_overrun);
2021 smp_wmb(); 2021 smp_wmb();
2022 pvt->mce_overrun = 0; 2022 pvt->mce_overrun = 0;
2023 } 2023 }
2024 2024
2025 /* 2025 /*
2026 * MCE second step: parse errors and display 2026 * MCE second step: parse errors and display
2027 */ 2027 */
2028 for (i = 0; i < count; i++) 2028 for (i = 0; i < count; i++)
2029 sbridge_mce_output_error(mci, &pvt->mce_outentry[i]); 2029 sbridge_mce_output_error(mci, &pvt->mce_outentry[i]);
2030 } 2030 }
2031 2031
2032 /* 2032 /*
2033 * sbridge_mce_check_error Replicates mcelog routine to get errors 2033 * sbridge_mce_check_error Replicates mcelog routine to get errors
2034 * This routine simply queues mcelog errors, and 2034 * This routine simply queues mcelog errors, and
2035 * return. The error itself should be handled later 2035 * return. The error itself should be handled later
2036 * by sbridge_check_error. 2036 * by sbridge_check_error.
2037 * WARNING: As this routine should be called at NMI time, extra care should 2037 * WARNING: As this routine should be called at NMI time, extra care should
2038 * be taken to avoid deadlocks, and to be as fast as possible. 2038 * be taken to avoid deadlocks, and to be as fast as possible.
2039 */ 2039 */
2040 static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val, 2040 static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
2041 void *data) 2041 void *data)
2042 { 2042 {
2043 struct mce *mce = (struct mce *)data; 2043 struct mce *mce = (struct mce *)data;
2044 struct mem_ctl_info *mci; 2044 struct mem_ctl_info *mci;
2045 struct sbridge_pvt *pvt; 2045 struct sbridge_pvt *pvt;
2046 char *type; 2046 char *type;
2047 2047
2048 if (get_edac_report_status() == EDAC_REPORTING_DISABLED) 2048 if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
2049 return NOTIFY_DONE; 2049 return NOTIFY_DONE;
2050 2050
2051 mci = get_mci_for_node_id(mce->socketid); 2051 mci = get_mci_for_node_id(mce->socketid);
2052 if (!mci) 2052 if (!mci)
2053 return NOTIFY_BAD; 2053 return NOTIFY_BAD;
2054 pvt = mci->pvt_info; 2054 pvt = mci->pvt_info;
2055 2055
2056 /* 2056 /*
2057 * Just let mcelog handle it if the error is 2057 * Just let mcelog handle it if the error is
2058 * outside the memory controller. A memory error 2058 * outside the memory controller. A memory error
2059 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0. 2059 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
2060 * bit 12 has an special meaning. 2060 * bit 12 has an special meaning.
2061 */ 2061 */
2062 if ((mce->status & 0xefff) >> 7 != 1) 2062 if ((mce->status & 0xefff) >> 7 != 1)
2063 return NOTIFY_DONE; 2063 return NOTIFY_DONE;
2064 2064
2065 if (mce->mcgstatus & MCG_STATUS_MCIP) 2065 if (mce->mcgstatus & MCG_STATUS_MCIP)
2066 type = "Exception"; 2066 type = "Exception";
2067 else 2067 else
2068 type = "Event"; 2068 type = "Event";
2069 2069
2070 sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n"); 2070 sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
2071 2071
2072 sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx " 2072 sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
2073 "Bank %d: %016Lx\n", mce->extcpu, type, 2073 "Bank %d: %016Lx\n", mce->extcpu, type,
2074 mce->mcgstatus, mce->bank, mce->status); 2074 mce->mcgstatus, mce->bank, mce->status);
2075 sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc); 2075 sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
2076 sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr); 2076 sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
2077 sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc); 2077 sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
2078 2078
2079 sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET " 2079 sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
2080 "%u APIC %x\n", mce->cpuvendor, mce->cpuid, 2080 "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
2081 mce->time, mce->socketid, mce->apicid); 2081 mce->time, mce->socketid, mce->apicid);
2082 2082
2083 smp_rmb(); 2083 smp_rmb();
2084 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) { 2084 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
2085 smp_wmb(); 2085 smp_wmb();
2086 pvt->mce_overrun++; 2086 pvt->mce_overrun++;
2087 return NOTIFY_DONE; 2087 return NOTIFY_DONE;
2088 } 2088 }
2089 2089
2090 /* Copy memory error at the ringbuffer */ 2090 /* Copy memory error at the ringbuffer */
2091 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce)); 2091 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
2092 smp_wmb(); 2092 smp_wmb();
2093 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN; 2093 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
2094 2094
2095 /* Handle fatal errors immediately */ 2095 /* Handle fatal errors immediately */
2096 if (mce->mcgstatus & 1) 2096 if (mce->mcgstatus & 1)
2097 sbridge_check_error(mci); 2097 sbridge_check_error(mci);
2098 2098
2099 /* Advice mcelog that the error were handled */ 2099 /* Advice mcelog that the error were handled */
2100 return NOTIFY_STOP; 2100 return NOTIFY_STOP;
2101 } 2101 }
2102 2102
2103 static struct notifier_block sbridge_mce_dec = { 2103 static struct notifier_block sbridge_mce_dec = {
2104 .notifier_call = sbridge_mce_check_error, 2104 .notifier_call = sbridge_mce_check_error,
2105 }; 2105 };
2106 2106
2107 /**************************************************************************** 2107 /****************************************************************************
2108 EDAC register/unregister logic 2108 EDAC register/unregister logic
2109 ****************************************************************************/ 2109 ****************************************************************************/
2110 2110
2111 static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev) 2111 static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
2112 { 2112 {
2113 struct mem_ctl_info *mci = sbridge_dev->mci; 2113 struct mem_ctl_info *mci = sbridge_dev->mci;
2114 struct sbridge_pvt *pvt; 2114 struct sbridge_pvt *pvt;
2115 2115
2116 if (unlikely(!mci || !mci->pvt_info)) { 2116 if (unlikely(!mci || !mci->pvt_info)) {
2117 edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev); 2117 edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
2118 2118
2119 sbridge_printk(KERN_ERR, "Couldn't find mci handler\n"); 2119 sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
2120 return; 2120 return;
2121 } 2121 }
2122 2122
2123 pvt = mci->pvt_info; 2123 pvt = mci->pvt_info;
2124 2124
2125 edac_dbg(0, "MC: mci = %p, dev = %p\n", 2125 edac_dbg(0, "MC: mci = %p, dev = %p\n",
2126 mci, &sbridge_dev->pdev[0]->dev); 2126 mci, &sbridge_dev->pdev[0]->dev);
2127 2127
2128 /* Remove MC sysfs nodes */ 2128 /* Remove MC sysfs nodes */
2129 edac_mc_del_mc(mci->pdev); 2129 edac_mc_del_mc(mci->pdev);
2130 2130
2131 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); 2131 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
2132 kfree(mci->ctl_name); 2132 kfree(mci->ctl_name);
2133 edac_mc_free(mci); 2133 edac_mc_free(mci);
2134 sbridge_dev->mci = NULL; 2134 sbridge_dev->mci = NULL;
2135 } 2135 }
2136 2136
2137 static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) 2137 static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
2138 { 2138 {
2139 struct mem_ctl_info *mci; 2139 struct mem_ctl_info *mci;
2140 struct edac_mc_layer layers[2]; 2140 struct edac_mc_layer layers[2];
2141 struct sbridge_pvt *pvt; 2141 struct sbridge_pvt *pvt;
2142 struct pci_dev *pdev = sbridge_dev->pdev[0]; 2142 struct pci_dev *pdev = sbridge_dev->pdev[0];
2143 int rc; 2143 int rc;
2144 2144
2145 /* Check the number of active and not disabled channels */ 2145 /* Check the number of active and not disabled channels */
2146 rc = check_if_ecc_is_active(sbridge_dev->bus, type); 2146 rc = check_if_ecc_is_active(sbridge_dev->bus, type);
2147 if (unlikely(rc < 0)) 2147 if (unlikely(rc < 0))
2148 return rc; 2148 return rc;
2149 2149
2150 /* allocate a new MC control structure */ 2150 /* allocate a new MC control structure */
2151 layers[0].type = EDAC_MC_LAYER_CHANNEL; 2151 layers[0].type = EDAC_MC_LAYER_CHANNEL;
2152 layers[0].size = NUM_CHANNELS; 2152 layers[0].size = NUM_CHANNELS;
2153 layers[0].is_virt_csrow = false; 2153 layers[0].is_virt_csrow = false;
2154 layers[1].type = EDAC_MC_LAYER_SLOT; 2154 layers[1].type = EDAC_MC_LAYER_SLOT;
2155 layers[1].size = MAX_DIMMS; 2155 layers[1].size = MAX_DIMMS;
2156 layers[1].is_virt_csrow = true; 2156 layers[1].is_virt_csrow = true;
2157 mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers, 2157 mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
2158 sizeof(*pvt)); 2158 sizeof(*pvt));
2159 2159
2160 if (unlikely(!mci)) 2160 if (unlikely(!mci))
2161 return -ENOMEM; 2161 return -ENOMEM;
2162 2162
2163 edac_dbg(0, "MC: mci = %p, dev = %p\n", 2163 edac_dbg(0, "MC: mci = %p, dev = %p\n",
2164 mci, &pdev->dev); 2164 mci, &pdev->dev);
2165 2165
2166 pvt = mci->pvt_info; 2166 pvt = mci->pvt_info;
2167 memset(pvt, 0, sizeof(*pvt)); 2167 memset(pvt, 0, sizeof(*pvt));
2168 2168
2169 /* Associate sbridge_dev and mci for future usage */ 2169 /* Associate sbridge_dev and mci for future usage */
2170 pvt->sbridge_dev = sbridge_dev; 2170 pvt->sbridge_dev = sbridge_dev;
2171 sbridge_dev->mci = mci; 2171 sbridge_dev->mci = mci;
2172 2172
2173 mci->mtype_cap = MEM_FLAG_DDR3; 2173 mci->mtype_cap = MEM_FLAG_DDR3;
2174 mci->edac_ctl_cap = EDAC_FLAG_NONE; 2174 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2175 mci->edac_cap = EDAC_FLAG_NONE; 2175 mci->edac_cap = EDAC_FLAG_NONE;
2176 mci->mod_name = "sbridge_edac.c"; 2176 mci->mod_name = "sbridge_edac.c";
2177 mci->mod_ver = SBRIDGE_REVISION; 2177 mci->mod_ver = SBRIDGE_REVISION;
2178 mci->dev_name = pci_name(pdev); 2178 mci->dev_name = pci_name(pdev);
2179 mci->ctl_page_to_phys = NULL; 2179 mci->ctl_page_to_phys = NULL;
2180 2180
2181 /* Set the function pointer to an actual operation function */ 2181 /* Set the function pointer to an actual operation function */
2182 mci->edac_check = sbridge_check_error; 2182 mci->edac_check = sbridge_check_error;
2183 2183
2184 pvt->info.type = type; 2184 pvt->info.type = type;
2185 switch (type) { 2185 switch (type) {
2186 case IVY_BRIDGE: 2186 case IVY_BRIDGE:
2187 pvt->info.rankcfgr = IB_RANK_CFG_A; 2187 pvt->info.rankcfgr = IB_RANK_CFG_A;
2188 pvt->info.get_tolm = ibridge_get_tolm; 2188 pvt->info.get_tolm = ibridge_get_tolm;
2189 pvt->info.get_tohm = ibridge_get_tohm; 2189 pvt->info.get_tohm = ibridge_get_tohm;
2190 pvt->info.dram_rule = ibridge_dram_rule; 2190 pvt->info.dram_rule = ibridge_dram_rule;
2191 pvt->info.get_memory_type = get_memory_type; 2191 pvt->info.get_memory_type = get_memory_type;
2192 pvt->info.get_node_id = get_node_id; 2192 pvt->info.get_node_id = get_node_id;
2193 pvt->info.rir_limit = rir_limit; 2193 pvt->info.rir_limit = rir_limit;
2194 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule); 2194 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
2195 pvt->info.interleave_list = ibridge_interleave_list; 2195 pvt->info.interleave_list = ibridge_interleave_list;
2196 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); 2196 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
2197 pvt->info.interleave_pkg = ibridge_interleave_pkg; 2197 pvt->info.interleave_pkg = ibridge_interleave_pkg;
2198 mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx); 2198 mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx);
2199 2199
2200 /* Store pci devices at mci for faster access */ 2200 /* Store pci devices at mci for faster access */
2201 rc = ibridge_mci_bind_devs(mci, sbridge_dev); 2201 rc = ibridge_mci_bind_devs(mci, sbridge_dev);
2202 if (unlikely(rc < 0)) 2202 if (unlikely(rc < 0))
2203 goto fail0; 2203 goto fail0;
2204 break; 2204 break;
2205 case SANDY_BRIDGE: 2205 case SANDY_BRIDGE:
2206 pvt->info.rankcfgr = SB_RANK_CFG_A; 2206 pvt->info.rankcfgr = SB_RANK_CFG_A;
2207 pvt->info.get_tolm = sbridge_get_tolm; 2207 pvt->info.get_tolm = sbridge_get_tolm;
2208 pvt->info.get_tohm = sbridge_get_tohm; 2208 pvt->info.get_tohm = sbridge_get_tohm;
2209 pvt->info.dram_rule = sbridge_dram_rule; 2209 pvt->info.dram_rule = sbridge_dram_rule;
2210 pvt->info.get_memory_type = get_memory_type; 2210 pvt->info.get_memory_type = get_memory_type;
2211 pvt->info.get_node_id = get_node_id; 2211 pvt->info.get_node_id = get_node_id;
2212 pvt->info.rir_limit = rir_limit; 2212 pvt->info.rir_limit = rir_limit;
2213 pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule); 2213 pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
2214 pvt->info.interleave_list = sbridge_interleave_list; 2214 pvt->info.interleave_list = sbridge_interleave_list;
2215 pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list); 2215 pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list);
2216 pvt->info.interleave_pkg = sbridge_interleave_pkg; 2216 pvt->info.interleave_pkg = sbridge_interleave_pkg;
2217 mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx); 2217 mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx);
2218 2218
2219 /* Store pci devices at mci for faster access */ 2219 /* Store pci devices at mci for faster access */
2220 rc = sbridge_mci_bind_devs(mci, sbridge_dev); 2220 rc = sbridge_mci_bind_devs(mci, sbridge_dev);
2221 if (unlikely(rc < 0)) 2221 if (unlikely(rc < 0))
2222 goto fail0; 2222 goto fail0;
2223 break; 2223 break;
2224 case HASWELL: 2224 case HASWELL:
2225 /* rankcfgr isn't used */ 2225 /* rankcfgr isn't used */
2226 pvt->info.get_tolm = haswell_get_tolm; 2226 pvt->info.get_tolm = haswell_get_tolm;
2227 pvt->info.get_tohm = haswell_get_tohm; 2227 pvt->info.get_tohm = haswell_get_tohm;
2228 pvt->info.dram_rule = ibridge_dram_rule; 2228 pvt->info.dram_rule = ibridge_dram_rule;
2229 pvt->info.get_memory_type = haswell_get_memory_type; 2229 pvt->info.get_memory_type = haswell_get_memory_type;
2230 pvt->info.get_node_id = haswell_get_node_id; 2230 pvt->info.get_node_id = haswell_get_node_id;
2231 pvt->info.rir_limit = haswell_rir_limit; 2231 pvt->info.rir_limit = haswell_rir_limit;
2232 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule); 2232 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
2233 pvt->info.interleave_list = ibridge_interleave_list; 2233 pvt->info.interleave_list = ibridge_interleave_list;
2234 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); 2234 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
2235 pvt->info.interleave_pkg = ibridge_interleave_pkg; 2235 pvt->info.interleave_pkg = ibridge_interleave_pkg;
2236 mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell Socket#%d", mci->mc_idx); 2236 mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell Socket#%d", mci->mc_idx);
2237 2237
2238 /* Store pci devices at mci for faster access */ 2238 /* Store pci devices at mci for faster access */
2239 rc = haswell_mci_bind_devs(mci, sbridge_dev); 2239 rc = haswell_mci_bind_devs(mci, sbridge_dev);
2240 if (unlikely(rc < 0)) 2240 if (unlikely(rc < 0))
2241 goto fail0; 2241 goto fail0;
2242 break; 2242 break;
2243 } 2243 }
2244 2244
2245 /* Get dimm basic config and the memory layout */ 2245 /* Get dimm basic config and the memory layout */
2246 get_dimm_config(mci); 2246 get_dimm_config(mci);
2247 get_memory_layout(mci); 2247 get_memory_layout(mci);
2248 2248
2249 /* record ptr to the generic device */ 2249 /* record ptr to the generic device */
2250 mci->pdev = &pdev->dev; 2250 mci->pdev = &pdev->dev;
2251 2251
2252 /* add this new MC control structure to EDAC's list of MCs */ 2252 /* add this new MC control structure to EDAC's list of MCs */
2253 if (unlikely(edac_mc_add_mc(mci))) { 2253 if (unlikely(edac_mc_add_mc(mci))) {
2254 edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); 2254 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
2255 rc = -EINVAL; 2255 rc = -EINVAL;
2256 goto fail0; 2256 goto fail0;
2257 } 2257 }
2258 2258
2259 return 0; 2259 return 0;
2260 2260
2261 fail0: 2261 fail0:
2262 kfree(mci->ctl_name); 2262 kfree(mci->ctl_name);
2263 edac_mc_free(mci); 2263 edac_mc_free(mci);
2264 sbridge_dev->mci = NULL; 2264 sbridge_dev->mci = NULL;
2265 return rc; 2265 return rc;
2266 } 2266 }
2267 2267
2268 /* 2268 /*
2269 * sbridge_probe Probe for ONE instance of device to see if it is 2269 * sbridge_probe Probe for ONE instance of device to see if it is
2270 * present. 2270 * present.
2271 * return: 2271 * return:
2272 * 0 for FOUND a device 2272 * 0 for FOUND a device
2273 * < 0 for error code 2273 * < 0 for error code
2274 */ 2274 */
2275 2275
2276 static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2276 static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2277 { 2277 {
2278 int rc = -ENODEV; 2278 int rc = -ENODEV;
2279 u8 mc, num_mc = 0; 2279 u8 mc, num_mc = 0;
2280 struct sbridge_dev *sbridge_dev; 2280 struct sbridge_dev *sbridge_dev;
2281 enum type type = SANDY_BRIDGE; 2281 enum type type = SANDY_BRIDGE;
2282 2282
2283 /* get the pci devices we want to reserve for our use */ 2283 /* get the pci devices we want to reserve for our use */
2284 mutex_lock(&sbridge_edac_lock); 2284 mutex_lock(&sbridge_edac_lock);
2285 2285
2286 /* 2286 /*
2287 * All memory controllers are allocated at the first pass. 2287 * All memory controllers are allocated at the first pass.
2288 */ 2288 */
2289 if (unlikely(probed >= 1)) { 2289 if (unlikely(probed >= 1)) {
2290 mutex_unlock(&sbridge_edac_lock); 2290 mutex_unlock(&sbridge_edac_lock);
2291 return -ENODEV; 2291 return -ENODEV;
2292 } 2292 }
2293 probed++; 2293 probed++;
2294 2294
2295 switch (pdev->device) { 2295 switch (pdev->device) {
2296 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA: 2296 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
2297 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table); 2297 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table);
2298 type = IVY_BRIDGE; 2298 type = IVY_BRIDGE;
2299 break; 2299 break;
2300 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA: 2300 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
2301 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table); 2301 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table);
2302 type = SANDY_BRIDGE; 2302 type = SANDY_BRIDGE;
2303 break; 2303 break;
2304 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0: 2304 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
2305 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_haswell_table); 2305 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_haswell_table);
2306 type = HASWELL; 2306 type = HASWELL;
2307 break; 2307 break;
2308 } 2308 }
2309 if (unlikely(rc < 0)) 2309 if (unlikely(rc < 0))
2310 goto fail0; 2310 goto fail0;
2311 mc = 0; 2311 mc = 0;
2312 2312
2313 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { 2313 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
2314 edac_dbg(0, "Registering MC#%d (%d of %d)\n", 2314 edac_dbg(0, "Registering MC#%d (%d of %d)\n",
2315 mc, mc + 1, num_mc); 2315 mc, mc + 1, num_mc);
2316 2316
2317 sbridge_dev->mc = mc++; 2317 sbridge_dev->mc = mc++;
2318 rc = sbridge_register_mci(sbridge_dev, type); 2318 rc = sbridge_register_mci(sbridge_dev, type);
2319 if (unlikely(rc < 0)) 2319 if (unlikely(rc < 0))
2320 goto fail1; 2320 goto fail1;
2321 } 2321 }
2322 2322
2323 sbridge_printk(KERN_INFO, "Driver loaded.\n"); 2323 sbridge_printk(KERN_INFO, "Driver loaded.\n");
2324 2324
2325 mutex_unlock(&sbridge_edac_lock); 2325 mutex_unlock(&sbridge_edac_lock);
2326 return 0; 2326 return 0;
2327 2327
2328 fail1: 2328 fail1:
2329 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) 2329 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
2330 sbridge_unregister_mci(sbridge_dev); 2330 sbridge_unregister_mci(sbridge_dev);
2331 2331
2332 sbridge_put_all_devices(); 2332 sbridge_put_all_devices();
2333 fail0: 2333 fail0:
2334 mutex_unlock(&sbridge_edac_lock); 2334 mutex_unlock(&sbridge_edac_lock);
2335 return rc; 2335 return rc;
2336 } 2336 }
2337 2337
2338 /* 2338 /*
2339 * sbridge_remove destructor for one instance of device 2339 * sbridge_remove destructor for one instance of device
2340 * 2340 *
2341 */ 2341 */
2342 static void sbridge_remove(struct pci_dev *pdev) 2342 static void sbridge_remove(struct pci_dev *pdev)
2343 { 2343 {
2344 struct sbridge_dev *sbridge_dev; 2344 struct sbridge_dev *sbridge_dev;
2345 2345
2346 edac_dbg(0, "\n"); 2346 edac_dbg(0, "\n");
2347 2347
2348 /* 2348 /*
2349 * we have a trouble here: pdev value for removal will be wrong, since 2349 * we have a trouble here: pdev value for removal will be wrong, since
2350 * it will point to the X58 register used to detect that the machine 2350 * it will point to the X58 register used to detect that the machine
2351 * is a Nehalem or upper design. However, due to the way several PCI 2351 * is a Nehalem or upper design. However, due to the way several PCI
2352 * devices are grouped together to provide MC functionality, we need 2352 * devices are grouped together to provide MC functionality, we need
2353 * to use a different method for releasing the devices 2353 * to use a different method for releasing the devices
2354 */ 2354 */
2355 2355
2356 mutex_lock(&sbridge_edac_lock); 2356 mutex_lock(&sbridge_edac_lock);
2357 2357
2358 if (unlikely(!probed)) { 2358 if (unlikely(!probed)) {
2359 mutex_unlock(&sbridge_edac_lock); 2359 mutex_unlock(&sbridge_edac_lock);
2360 return; 2360 return;
2361 } 2361 }
2362 2362
2363 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) 2363 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
2364 sbridge_unregister_mci(sbridge_dev); 2364 sbridge_unregister_mci(sbridge_dev);
2365 2365
2366 /* Release PCI resources */ 2366 /* Release PCI resources */
2367 sbridge_put_all_devices(); 2367 sbridge_put_all_devices();
2368 2368
2369 probed--; 2369 probed--;
2370 2370
2371 mutex_unlock(&sbridge_edac_lock); 2371 mutex_unlock(&sbridge_edac_lock);
2372 } 2372 }
2373 2373
2374 MODULE_DEVICE_TABLE(pci, sbridge_pci_tbl); 2374 MODULE_DEVICE_TABLE(pci, sbridge_pci_tbl);
2375 2375
2376 /* 2376 /*
2377 * sbridge_driver pci_driver structure for this module 2377 * sbridge_driver pci_driver structure for this module
2378 * 2378 *
2379 */ 2379 */
2380 static struct pci_driver sbridge_driver = { 2380 static struct pci_driver sbridge_driver = {
2381 .name = "sbridge_edac", 2381 .name = "sbridge_edac",
2382 .probe = sbridge_probe, 2382 .probe = sbridge_probe,
2383 .remove = sbridge_remove, 2383 .remove = sbridge_remove,
2384 .id_table = sbridge_pci_tbl, 2384 .id_table = sbridge_pci_tbl,
2385 }; 2385 };
2386 2386
2387 /* 2387 /*
2388 * sbridge_init Module entry function 2388 * sbridge_init Module entry function
2389 * Try to initialize this module for its devices 2389 * Try to initialize this module for its devices
2390 */ 2390 */
2391 static int __init sbridge_init(void) 2391 static int __init sbridge_init(void)
2392 { 2392 {
2393 int pci_rc; 2393 int pci_rc;
2394 2394
2395 edac_dbg(2, "\n"); 2395 edac_dbg(2, "\n");
2396 2396
2397 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 2397 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
2398 opstate_init(); 2398 opstate_init();
2399 2399
2400 pci_rc = pci_register_driver(&sbridge_driver); 2400 pci_rc = pci_register_driver(&sbridge_driver);
2401 if (pci_rc >= 0) { 2401 if (pci_rc >= 0) {
2402 mce_register_decode_chain(&sbridge_mce_dec); 2402 mce_register_decode_chain(&sbridge_mce_dec);
2403 if (get_edac_report_status() == EDAC_REPORTING_DISABLED) 2403 if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
2404 sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n"); 2404 sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n");
2405 return 0; 2405 return 0;
2406 } 2406 }
2407 2407
2408 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n", 2408 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
2409 pci_rc); 2409 pci_rc);
2410 2410
2411 return pci_rc; 2411 return pci_rc;
2412 } 2412 }
2413 2413
2414 /* 2414 /*
2415 * sbridge_exit() Module exit function 2415 * sbridge_exit() Module exit function
2416 * Unregister the driver 2416 * Unregister the driver
2417 */ 2417 */
2418 static void __exit sbridge_exit(void) 2418 static void __exit sbridge_exit(void)
2419 { 2419 {
2420 edac_dbg(2, "\n"); 2420 edac_dbg(2, "\n");
2421 pci_unregister_driver(&sbridge_driver); 2421 pci_unregister_driver(&sbridge_driver);
2422 mce_unregister_decode_chain(&sbridge_mce_dec); 2422 mce_unregister_decode_chain(&sbridge_mce_dec);
2423 } 2423 }
2424 2424
2425 module_init(sbridge_init); 2425 module_init(sbridge_init);
2426 module_exit(sbridge_exit); 2426 module_exit(sbridge_exit);
2427 2427
2428 module_param(edac_op_state, int, 0444); 2428 module_param(edac_op_state, int, 0444);
2429 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 2429 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
2430 2430
2431 MODULE_LICENSE("GPL"); 2431 MODULE_LICENSE("GPL");
2432 MODULE_AUTHOR("Mauro Carvalho Chehab"); 2432 MODULE_AUTHOR("Mauro Carvalho Chehab");
2433 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); 2433 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
2434 MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - " 2434 MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "
2435 SBRIDGE_REVISION); 2435 SBRIDGE_REVISION);
2436 2436