Commit e2823299cdc140cccd40484d690ab1d90d75b099

Authored by Linus Torvalds

Merge branch 'linux_next' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac

Pull edac fixes from Mauro Carvalho Chehab:
 "Two edac fixes:

   - i7300_edac currently reports a wrong number of DIMMs when the
     memory controller is in single channel mode

   - on some Sandy Bridge machines, the EDAC driver bails out as one of
     the PCI IDs used by the driver is hidden by BIOS.  As the driver
     uses it only to detect the type of memory, make it optional at the
     driver"

* 'linux_next' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac:
  edac: sb_edac.c should not require prescence of IMC_DDRIO device
  i7300_edac: Fix memory detection in single mode

Showing 2 changed files Inline Diff

drivers/edac/i7300_edac.c
1 /* 1 /*
2 * Intel 7300 class Memory Controllers kernel module (Clarksboro) 2 * Intel 7300 class Memory Controllers kernel module (Clarksboro)
3 * 3 *
4 * This file may be distributed under the terms of the 4 * This file may be distributed under the terms of the
5 * GNU General Public License version 2 only. 5 * GNU General Public License version 2 only.
6 * 6 *
7 * Copyright (c) 2010 by: 7 * Copyright (c) 2010 by:
8 * Mauro Carvalho Chehab <mchehab@redhat.com> 8 * Mauro Carvalho Chehab <mchehab@redhat.com>
9 * 9 *
10 * Red Hat Inc. http://www.redhat.com 10 * Red Hat Inc. http://www.redhat.com
11 * 11 *
12 * Intel 7300 Chipset Memory Controller Hub (MCH) - Datasheet 12 * Intel 7300 Chipset Memory Controller Hub (MCH) - Datasheet
13 * http://www.intel.com/Assets/PDF/datasheet/318082.pdf 13 * http://www.intel.com/Assets/PDF/datasheet/318082.pdf
14 * 14 *
15 * TODO: The chipset allow checking for PCI Express errors also. Currently, 15 * TODO: The chipset allow checking for PCI Express errors also. Currently,
16 * the driver covers only memory error errors 16 * the driver covers only memory error errors
17 * 17 *
18 * This driver uses "csrows" EDAC attribute to represent DIMM slot# 18 * This driver uses "csrows" EDAC attribute to represent DIMM slot#
19 */ 19 */
20 20
21 #include <linux/module.h> 21 #include <linux/module.h>
22 #include <linux/init.h> 22 #include <linux/init.h>
23 #include <linux/pci.h> 23 #include <linux/pci.h>
24 #include <linux/pci_ids.h> 24 #include <linux/pci_ids.h>
25 #include <linux/slab.h> 25 #include <linux/slab.h>
26 #include <linux/edac.h> 26 #include <linux/edac.h>
27 #include <linux/mmzone.h> 27 #include <linux/mmzone.h>
28 28
29 #include "edac_core.h" 29 #include "edac_core.h"
30 30
31 /* 31 /*
32 * Alter this version for the I7300 module when modifications are made 32 * Alter this version for the I7300 module when modifications are made
33 */ 33 */
34 #define I7300_REVISION " Ver: 1.0.0" 34 #define I7300_REVISION " Ver: 1.0.0"
35 35
36 #define EDAC_MOD_STR "i7300_edac" 36 #define EDAC_MOD_STR "i7300_edac"
37 37
38 #define i7300_printk(level, fmt, arg...) \ 38 #define i7300_printk(level, fmt, arg...) \
39 edac_printk(level, "i7300", fmt, ##arg) 39 edac_printk(level, "i7300", fmt, ##arg)
40 40
41 #define i7300_mc_printk(mci, level, fmt, arg...) \ 41 #define i7300_mc_printk(mci, level, fmt, arg...) \
42 edac_mc_chipset_printk(mci, level, "i7300", fmt, ##arg) 42 edac_mc_chipset_printk(mci, level, "i7300", fmt, ##arg)
43 43
44 /*********************************************** 44 /***********************************************
45 * i7300 Limit constants Structs and static vars 45 * i7300 Limit constants Structs and static vars
46 ***********************************************/ 46 ***********************************************/
47 47
48 /* 48 /*
49 * Memory topology is organized as: 49 * Memory topology is organized as:
50 * Branch 0 - 2 channels: channels 0 and 1 (FDB0 PCI dev 21.0) 50 * Branch 0 - 2 channels: channels 0 and 1 (FDB0 PCI dev 21.0)
51 * Branch 1 - 2 channels: channels 2 and 3 (FDB1 PCI dev 22.0) 51 * Branch 1 - 2 channels: channels 2 and 3 (FDB1 PCI dev 22.0)
52 * Each channel can have to 8 DIMM sets (called as SLOTS) 52 * Each channel can have to 8 DIMM sets (called as SLOTS)
53 * Slots should generally be filled in pairs 53 * Slots should generally be filled in pairs
54 * Except on Single Channel mode of operation 54 * Except on Single Channel mode of operation
55 * just slot 0/channel0 filled on this mode 55 * just slot 0/channel0 filled on this mode
56 * On normal operation mode, the two channels on a branch should be 56 * On normal operation mode, the two channels on a branch should be
57 * filled together for the same SLOT# 57 * filled together for the same SLOT#
58 * When in mirrored mode, Branch 1 replicate memory at Branch 0, so, the four 58 * When in mirrored mode, Branch 1 replicate memory at Branch 0, so, the four
59 * channels on both branches should be filled 59 * channels on both branches should be filled
60 */ 60 */
61 61
62 /* Limits for i7300 */ 62 /* Limits for i7300 */
63 #define MAX_SLOTS 8 63 #define MAX_SLOTS 8
64 #define MAX_BRANCHES 2 64 #define MAX_BRANCHES 2
65 #define MAX_CH_PER_BRANCH 2 65 #define MAX_CH_PER_BRANCH 2
66 #define MAX_CHANNELS (MAX_CH_PER_BRANCH * MAX_BRANCHES) 66 #define MAX_CHANNELS (MAX_CH_PER_BRANCH * MAX_BRANCHES)
67 #define MAX_MIR 3 67 #define MAX_MIR 3
68 68
69 #define to_channel(ch, branch) ((((branch)) << 1) | (ch)) 69 #define to_channel(ch, branch) ((((branch)) << 1) | (ch))
70 70
71 #define to_csrow(slot, ch, branch) \ 71 #define to_csrow(slot, ch, branch) \
72 (to_channel(ch, branch) | ((slot) << 2)) 72 (to_channel(ch, branch) | ((slot) << 2))
73 73
74 /* Device name and register DID (Device ID) */ 74 /* Device name and register DID (Device ID) */
75 struct i7300_dev_info { 75 struct i7300_dev_info {
76 const char *ctl_name; /* name for this device */ 76 const char *ctl_name; /* name for this device */
77 u16 fsb_mapping_errors; /* DID for the branchmap,control */ 77 u16 fsb_mapping_errors; /* DID for the branchmap,control */
78 }; 78 };
79 79
80 /* Table of devices attributes supported by this driver */ 80 /* Table of devices attributes supported by this driver */
81 static const struct i7300_dev_info i7300_devs[] = { 81 static const struct i7300_dev_info i7300_devs[] = {
82 { 82 {
83 .ctl_name = "I7300", 83 .ctl_name = "I7300",
84 .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, 84 .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
85 }, 85 },
86 }; 86 };
87 87
88 struct i7300_dimm_info { 88 struct i7300_dimm_info {
89 int megabytes; /* size, 0 means not present */ 89 int megabytes; /* size, 0 means not present */
90 }; 90 };
91 91
92 /* driver private data structure */ 92 /* driver private data structure */
93 struct i7300_pvt { 93 struct i7300_pvt {
94 struct pci_dev *pci_dev_16_0_fsb_ctlr; /* 16.0 */ 94 struct pci_dev *pci_dev_16_0_fsb_ctlr; /* 16.0 */
95 struct pci_dev *pci_dev_16_1_fsb_addr_map; /* 16.1 */ 95 struct pci_dev *pci_dev_16_1_fsb_addr_map; /* 16.1 */
96 struct pci_dev *pci_dev_16_2_fsb_err_regs; /* 16.2 */ 96 struct pci_dev *pci_dev_16_2_fsb_err_regs; /* 16.2 */
97 struct pci_dev *pci_dev_2x_0_fbd_branch[MAX_BRANCHES]; /* 21.0 and 22.0 */ 97 struct pci_dev *pci_dev_2x_0_fbd_branch[MAX_BRANCHES]; /* 21.0 and 22.0 */
98 98
99 u16 tolm; /* top of low memory */ 99 u16 tolm; /* top of low memory */
100 u64 ambase; /* AMB BAR */ 100 u64 ambase; /* AMB BAR */
101 101
102 u32 mc_settings; /* Report several settings */ 102 u32 mc_settings; /* Report several settings */
103 u32 mc_settings_a; 103 u32 mc_settings_a;
104 104
105 u16 mir[MAX_MIR]; /* Memory Interleave Reg*/ 105 u16 mir[MAX_MIR]; /* Memory Interleave Reg*/
106 106
107 u16 mtr[MAX_SLOTS][MAX_BRANCHES]; /* Memory Technlogy Reg */ 107 u16 mtr[MAX_SLOTS][MAX_BRANCHES]; /* Memory Technlogy Reg */
108 u16 ambpresent[MAX_CHANNELS]; /* AMB present regs */ 108 u16 ambpresent[MAX_CHANNELS]; /* AMB present regs */
109 109
110 /* DIMM information matrix, allocating architecture maximums */ 110 /* DIMM information matrix, allocating architecture maximums */
111 struct i7300_dimm_info dimm_info[MAX_SLOTS][MAX_CHANNELS]; 111 struct i7300_dimm_info dimm_info[MAX_SLOTS][MAX_CHANNELS];
112 112
113 /* Temporary buffer for use when preparing error messages */ 113 /* Temporary buffer for use when preparing error messages */
114 char *tmp_prt_buffer; 114 char *tmp_prt_buffer;
115 }; 115 };
116 116
117 /* FIXME: Why do we need to have this static? */ 117 /* FIXME: Why do we need to have this static? */
118 static struct edac_pci_ctl_info *i7300_pci; 118 static struct edac_pci_ctl_info *i7300_pci;
119 119
120 /*************************************************** 120 /***************************************************
121 * i7300 Register definitions for memory enumeration 121 * i7300 Register definitions for memory enumeration
122 ***************************************************/ 122 ***************************************************/
123 123
124 /* 124 /*
125 * Device 16, 125 * Device 16,
126 * Function 0: System Address (not documented) 126 * Function 0: System Address (not documented)
127 * Function 1: Memory Branch Map, Control, Errors Register 127 * Function 1: Memory Branch Map, Control, Errors Register
128 */ 128 */
129 129
130 /* OFFSETS for Function 0 */ 130 /* OFFSETS for Function 0 */
131 #define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */ 131 #define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */
132 #define MAXCH 0x56 /* Max Channel Number */ 132 #define MAXCH 0x56 /* Max Channel Number */
133 #define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */ 133 #define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */
134 134
135 /* OFFSETS for Function 1 */ 135 /* OFFSETS for Function 1 */
136 #define MC_SETTINGS 0x40 136 #define MC_SETTINGS 0x40
137 #define IS_MIRRORED(mc) ((mc) & (1 << 16)) 137 #define IS_MIRRORED(mc) ((mc) & (1 << 16))
138 #define IS_ECC_ENABLED(mc) ((mc) & (1 << 5)) 138 #define IS_ECC_ENABLED(mc) ((mc) & (1 << 5))
139 #define IS_RETRY_ENABLED(mc) ((mc) & (1 << 31)) 139 #define IS_RETRY_ENABLED(mc) ((mc) & (1 << 31))
140 #define IS_SCRBALGO_ENHANCED(mc) ((mc) & (1 << 8)) 140 #define IS_SCRBALGO_ENHANCED(mc) ((mc) & (1 << 8))
141 141
142 #define MC_SETTINGS_A 0x58 142 #define MC_SETTINGS_A 0x58
143 #define IS_SINGLE_MODE(mca) ((mca) & (1 << 14)) 143 #define IS_SINGLE_MODE(mca) ((mca) & (1 << 14))
144 144
145 #define TOLM 0x6C 145 #define TOLM 0x6C
146 146
147 #define MIR0 0x80 147 #define MIR0 0x80
148 #define MIR1 0x84 148 #define MIR1 0x84
149 #define MIR2 0x88 149 #define MIR2 0x88
150 150
151 /* 151 /*
152 * Note: Other Intel EDAC drivers use AMBPRESENT to identify if the available 152 * Note: Other Intel EDAC drivers use AMBPRESENT to identify if the available
153 * memory. From datasheet item 7.3.1 (FB-DIMM technology & organization), it 153 * memory. From datasheet item 7.3.1 (FB-DIMM technology & organization), it
154 * seems that we cannot use this information directly for the same usage. 154 * seems that we cannot use this information directly for the same usage.
155 * Each memory slot may have up to 2 AMB interfaces, one for income and another 155 * Each memory slot may have up to 2 AMB interfaces, one for income and another
156 * for outcome interface to the next slot. 156 * for outcome interface to the next slot.
157 * For now, the driver just stores the AMB present registers, but rely only at 157 * For now, the driver just stores the AMB present registers, but rely only at
158 * the MTR info to detect memory. 158 * the MTR info to detect memory.
159 * Datasheet is also not clear about how to map each AMBPRESENT registers to 159 * Datasheet is also not clear about how to map each AMBPRESENT registers to
160 * one of the 4 available channels. 160 * one of the 4 available channels.
161 */ 161 */
162 #define AMBPRESENT_0 0x64 162 #define AMBPRESENT_0 0x64
163 #define AMBPRESENT_1 0x66 163 #define AMBPRESENT_1 0x66
164 164
165 static const u16 mtr_regs[MAX_SLOTS] = { 165 static const u16 mtr_regs[MAX_SLOTS] = {
166 0x80, 0x84, 0x88, 0x8c, 166 0x80, 0x84, 0x88, 0x8c,
167 0x82, 0x86, 0x8a, 0x8e 167 0x82, 0x86, 0x8a, 0x8e
168 }; 168 };
169 169
170 /* 170 /*
171 * Defines to extract the vaious fields from the 171 * Defines to extract the vaious fields from the
172 * MTRx - Memory Technology Registers 172 * MTRx - Memory Technology Registers
173 */ 173 */
174 #define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 8)) 174 #define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 8))
175 #define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 7)) 175 #define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 7))
176 #define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 6)) ? 8 : 4) 176 #define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 6)) ? 8 : 4)
177 #define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 5)) ? 8 : 4) 177 #define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 5)) ? 8 : 4)
178 #define MTR_DIMM_RANKS(mtr) (((mtr) & (1 << 4)) ? 1 : 0) 178 #define MTR_DIMM_RANKS(mtr) (((mtr) & (1 << 4)) ? 1 : 0)
179 #define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3) 179 #define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3)
180 #define MTR_DRAM_BANKS_ADDR_BITS 2 180 #define MTR_DRAM_BANKS_ADDR_BITS 2
181 #define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13) 181 #define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13)
182 #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) 182 #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
183 #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) 183 #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
184 184
185 /************************************************ 185 /************************************************
186 * i7300 Register definitions for error detection 186 * i7300 Register definitions for error detection
187 ************************************************/ 187 ************************************************/
188 188
189 /* 189 /*
190 * Device 16.1: FBD Error Registers 190 * Device 16.1: FBD Error Registers
191 */ 191 */
192 #define FERR_FAT_FBD 0x98 192 #define FERR_FAT_FBD 0x98
193 static const char *ferr_fat_fbd_name[] = { 193 static const char *ferr_fat_fbd_name[] = {
194 [22] = "Non-Redundant Fast Reset Timeout", 194 [22] = "Non-Redundant Fast Reset Timeout",
195 [2] = ">Tmid Thermal event with intelligent throttling disabled", 195 [2] = ">Tmid Thermal event with intelligent throttling disabled",
196 [1] = "Memory or FBD configuration CRC read error", 196 [1] = "Memory or FBD configuration CRC read error",
197 [0] = "Memory Write error on non-redundant retry or " 197 [0] = "Memory Write error on non-redundant retry or "
198 "FBD configuration Write error on retry", 198 "FBD configuration Write error on retry",
199 }; 199 };
200 #define GET_FBD_FAT_IDX(fbderr) (((fbderr) >> 28) & 3) 200 #define GET_FBD_FAT_IDX(fbderr) (((fbderr) >> 28) & 3)
201 #define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22)) 201 #define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22))
202 202
203 #define FERR_NF_FBD 0xa0 203 #define FERR_NF_FBD 0xa0
204 static const char *ferr_nf_fbd_name[] = { 204 static const char *ferr_nf_fbd_name[] = {
205 [24] = "DIMM-Spare Copy Completed", 205 [24] = "DIMM-Spare Copy Completed",
206 [23] = "DIMM-Spare Copy Initiated", 206 [23] = "DIMM-Spare Copy Initiated",
207 [22] = "Redundant Fast Reset Timeout", 207 [22] = "Redundant Fast Reset Timeout",
208 [21] = "Memory Write error on redundant retry", 208 [21] = "Memory Write error on redundant retry",
209 [18] = "SPD protocol Error", 209 [18] = "SPD protocol Error",
210 [17] = "FBD Northbound parity error on FBD Sync Status", 210 [17] = "FBD Northbound parity error on FBD Sync Status",
211 [16] = "Correctable Patrol Data ECC", 211 [16] = "Correctable Patrol Data ECC",
212 [15] = "Correctable Resilver- or Spare-Copy Data ECC", 212 [15] = "Correctable Resilver- or Spare-Copy Data ECC",
213 [14] = "Correctable Mirrored Demand Data ECC", 213 [14] = "Correctable Mirrored Demand Data ECC",
214 [13] = "Correctable Non-Mirrored Demand Data ECC", 214 [13] = "Correctable Non-Mirrored Demand Data ECC",
215 [11] = "Memory or FBD configuration CRC read error", 215 [11] = "Memory or FBD configuration CRC read error",
216 [10] = "FBD Configuration Write error on first attempt", 216 [10] = "FBD Configuration Write error on first attempt",
217 [9] = "Memory Write error on first attempt", 217 [9] = "Memory Write error on first attempt",
218 [8] = "Non-Aliased Uncorrectable Patrol Data ECC", 218 [8] = "Non-Aliased Uncorrectable Patrol Data ECC",
219 [7] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC", 219 [7] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
220 [6] = "Non-Aliased Uncorrectable Mirrored Demand Data ECC", 220 [6] = "Non-Aliased Uncorrectable Mirrored Demand Data ECC",
221 [5] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC", 221 [5] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
222 [4] = "Aliased Uncorrectable Patrol Data ECC", 222 [4] = "Aliased Uncorrectable Patrol Data ECC",
223 [3] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC", 223 [3] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
224 [2] = "Aliased Uncorrectable Mirrored Demand Data ECC", 224 [2] = "Aliased Uncorrectable Mirrored Demand Data ECC",
225 [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC", 225 [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
226 [0] = "Uncorrectable Data ECC on Replay", 226 [0] = "Uncorrectable Data ECC on Replay",
227 }; 227 };
228 #define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3) 228 #define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3)
229 #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\ 229 #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
230 (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\ 230 (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
231 (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\ 231 (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
232 (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\ 232 (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\
233 (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\ 233 (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\
234 (1 << 1) | (1 << 0)) 234 (1 << 1) | (1 << 0))
235 235
236 #define EMASK_FBD 0xa8 236 #define EMASK_FBD 0xa8
237 #define EMASK_FBD_ERR_MASK ((1 << 27) | (1 << 26) | (1 << 25) | (1 << 24) |\ 237 #define EMASK_FBD_ERR_MASK ((1 << 27) | (1 << 26) | (1 << 25) | (1 << 24) |\
238 (1 << 22) | (1 << 21) | (1 << 20) | (1 << 19) |\ 238 (1 << 22) | (1 << 21) | (1 << 20) | (1 << 19) |\
239 (1 << 18) | (1 << 17) | (1 << 16) | (1 << 14) |\ 239 (1 << 18) | (1 << 17) | (1 << 16) | (1 << 14) |\
240 (1 << 13) | (1 << 12) | (1 << 11) | (1 << 10) |\ 240 (1 << 13) | (1 << 12) | (1 << 11) | (1 << 10) |\
241 (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\ 241 (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\
242 (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\ 242 (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\
243 (1 << 1) | (1 << 0)) 243 (1 << 1) | (1 << 0))
244 244
245 /* 245 /*
246 * Device 16.2: Global Error Registers 246 * Device 16.2: Global Error Registers
247 */ 247 */
248 248
249 #define FERR_GLOBAL_HI 0x48 249 #define FERR_GLOBAL_HI 0x48
250 static const char *ferr_global_hi_name[] = { 250 static const char *ferr_global_hi_name[] = {
251 [3] = "FSB 3 Fatal Error", 251 [3] = "FSB 3 Fatal Error",
252 [2] = "FSB 2 Fatal Error", 252 [2] = "FSB 2 Fatal Error",
253 [1] = "FSB 1 Fatal Error", 253 [1] = "FSB 1 Fatal Error",
254 [0] = "FSB 0 Fatal Error", 254 [0] = "FSB 0 Fatal Error",
255 }; 255 };
256 #define ferr_global_hi_is_fatal(errno) 1 256 #define ferr_global_hi_is_fatal(errno) 1
257 257
258 #define FERR_GLOBAL_LO 0x40 258 #define FERR_GLOBAL_LO 0x40
259 static const char *ferr_global_lo_name[] = { 259 static const char *ferr_global_lo_name[] = {
260 [31] = "Internal MCH Fatal Error", 260 [31] = "Internal MCH Fatal Error",
261 [30] = "Intel QuickData Technology Device Fatal Error", 261 [30] = "Intel QuickData Technology Device Fatal Error",
262 [29] = "FSB1 Fatal Error", 262 [29] = "FSB1 Fatal Error",
263 [28] = "FSB0 Fatal Error", 263 [28] = "FSB0 Fatal Error",
264 [27] = "FBD Channel 3 Fatal Error", 264 [27] = "FBD Channel 3 Fatal Error",
265 [26] = "FBD Channel 2 Fatal Error", 265 [26] = "FBD Channel 2 Fatal Error",
266 [25] = "FBD Channel 1 Fatal Error", 266 [25] = "FBD Channel 1 Fatal Error",
267 [24] = "FBD Channel 0 Fatal Error", 267 [24] = "FBD Channel 0 Fatal Error",
268 [23] = "PCI Express Device 7Fatal Error", 268 [23] = "PCI Express Device 7Fatal Error",
269 [22] = "PCI Express Device 6 Fatal Error", 269 [22] = "PCI Express Device 6 Fatal Error",
270 [21] = "PCI Express Device 5 Fatal Error", 270 [21] = "PCI Express Device 5 Fatal Error",
271 [20] = "PCI Express Device 4 Fatal Error", 271 [20] = "PCI Express Device 4 Fatal Error",
272 [19] = "PCI Express Device 3 Fatal Error", 272 [19] = "PCI Express Device 3 Fatal Error",
273 [18] = "PCI Express Device 2 Fatal Error", 273 [18] = "PCI Express Device 2 Fatal Error",
274 [17] = "PCI Express Device 1 Fatal Error", 274 [17] = "PCI Express Device 1 Fatal Error",
275 [16] = "ESI Fatal Error", 275 [16] = "ESI Fatal Error",
276 [15] = "Internal MCH Non-Fatal Error", 276 [15] = "Internal MCH Non-Fatal Error",
277 [14] = "Intel QuickData Technology Device Non Fatal Error", 277 [14] = "Intel QuickData Technology Device Non Fatal Error",
278 [13] = "FSB1 Non-Fatal Error", 278 [13] = "FSB1 Non-Fatal Error",
279 [12] = "FSB 0 Non-Fatal Error", 279 [12] = "FSB 0 Non-Fatal Error",
280 [11] = "FBD Channel 3 Non-Fatal Error", 280 [11] = "FBD Channel 3 Non-Fatal Error",
281 [10] = "FBD Channel 2 Non-Fatal Error", 281 [10] = "FBD Channel 2 Non-Fatal Error",
282 [9] = "FBD Channel 1 Non-Fatal Error", 282 [9] = "FBD Channel 1 Non-Fatal Error",
283 [8] = "FBD Channel 0 Non-Fatal Error", 283 [8] = "FBD Channel 0 Non-Fatal Error",
284 [7] = "PCI Express Device 7 Non-Fatal Error", 284 [7] = "PCI Express Device 7 Non-Fatal Error",
285 [6] = "PCI Express Device 6 Non-Fatal Error", 285 [6] = "PCI Express Device 6 Non-Fatal Error",
286 [5] = "PCI Express Device 5 Non-Fatal Error", 286 [5] = "PCI Express Device 5 Non-Fatal Error",
287 [4] = "PCI Express Device 4 Non-Fatal Error", 287 [4] = "PCI Express Device 4 Non-Fatal Error",
288 [3] = "PCI Express Device 3 Non-Fatal Error", 288 [3] = "PCI Express Device 3 Non-Fatal Error",
289 [2] = "PCI Express Device 2 Non-Fatal Error", 289 [2] = "PCI Express Device 2 Non-Fatal Error",
290 [1] = "PCI Express Device 1 Non-Fatal Error", 290 [1] = "PCI Express Device 1 Non-Fatal Error",
291 [0] = "ESI Non-Fatal Error", 291 [0] = "ESI Non-Fatal Error",
292 }; 292 };
293 #define ferr_global_lo_is_fatal(errno) ((errno < 16) ? 0 : 1) 293 #define ferr_global_lo_is_fatal(errno) ((errno < 16) ? 0 : 1)
294 294
295 #define NRECMEMA 0xbe 295 #define NRECMEMA 0xbe
296 #define NRECMEMA_BANK(v) (((v) >> 12) & 7) 296 #define NRECMEMA_BANK(v) (((v) >> 12) & 7)
297 #define NRECMEMA_RANK(v) (((v) >> 8) & 15) 297 #define NRECMEMA_RANK(v) (((v) >> 8) & 15)
298 298
299 #define NRECMEMB 0xc0 299 #define NRECMEMB 0xc0
300 #define NRECMEMB_IS_WR(v) ((v) & (1 << 31)) 300 #define NRECMEMB_IS_WR(v) ((v) & (1 << 31))
301 #define NRECMEMB_CAS(v) (((v) >> 16) & 0x1fff) 301 #define NRECMEMB_CAS(v) (((v) >> 16) & 0x1fff)
302 #define NRECMEMB_RAS(v) ((v) & 0xffff) 302 #define NRECMEMB_RAS(v) ((v) & 0xffff)
303 303
304 #define REDMEMA 0xdc 304 #define REDMEMA 0xdc
305 305
306 #define REDMEMB 0x7c 306 #define REDMEMB 0x7c
307 #define IS_SECOND_CH(v) ((v) * (1 << 17)) 307 #define IS_SECOND_CH(v) ((v) * (1 << 17))
308 308
309 #define RECMEMA 0xe0 309 #define RECMEMA 0xe0
310 #define RECMEMA_BANK(v) (((v) >> 12) & 7) 310 #define RECMEMA_BANK(v) (((v) >> 12) & 7)
311 #define RECMEMA_RANK(v) (((v) >> 8) & 15) 311 #define RECMEMA_RANK(v) (((v) >> 8) & 15)
312 312
313 #define RECMEMB 0xe4 313 #define RECMEMB 0xe4
314 #define RECMEMB_IS_WR(v) ((v) & (1 << 31)) 314 #define RECMEMB_IS_WR(v) ((v) & (1 << 31))
315 #define RECMEMB_CAS(v) (((v) >> 16) & 0x1fff) 315 #define RECMEMB_CAS(v) (((v) >> 16) & 0x1fff)
316 #define RECMEMB_RAS(v) ((v) & 0xffff) 316 #define RECMEMB_RAS(v) ((v) & 0xffff)
317 317
318 /******************************************** 318 /********************************************
319 * i7300 Functions related to error detection 319 * i7300 Functions related to error detection
320 ********************************************/ 320 ********************************************/
321 321
322 /** 322 /**
323 * get_err_from_table() - Gets the error message from a table 323 * get_err_from_table() - Gets the error message from a table
324 * @table: table name (array of char *) 324 * @table: table name (array of char *)
325 * @size: number of elements at the table 325 * @size: number of elements at the table
326 * @pos: position of the element to be returned 326 * @pos: position of the element to be returned
327 * 327 *
328 * This is a small routine that gets the pos-th element of a table. If the 328 * This is a small routine that gets the pos-th element of a table. If the
329 * element doesn't exist (or it is empty), it returns "reserved". 329 * element doesn't exist (or it is empty), it returns "reserved".
330 * Instead of calling it directly, the better is to call via the macro 330 * Instead of calling it directly, the better is to call via the macro
331 * GET_ERR_FROM_TABLE(), that automatically checks the table size via 331 * GET_ERR_FROM_TABLE(), that automatically checks the table size via
332 * ARRAY_SIZE() macro 332 * ARRAY_SIZE() macro
333 */ 333 */
334 static const char *get_err_from_table(const char *table[], int size, int pos) 334 static const char *get_err_from_table(const char *table[], int size, int pos)
335 { 335 {
336 if (unlikely(pos >= size)) 336 if (unlikely(pos >= size))
337 return "Reserved"; 337 return "Reserved";
338 338
339 if (unlikely(!table[pos])) 339 if (unlikely(!table[pos]))
340 return "Reserved"; 340 return "Reserved";
341 341
342 return table[pos]; 342 return table[pos];
343 } 343 }
344 344
345 #define GET_ERR_FROM_TABLE(table, pos) \ 345 #define GET_ERR_FROM_TABLE(table, pos) \
346 get_err_from_table(table, ARRAY_SIZE(table), pos) 346 get_err_from_table(table, ARRAY_SIZE(table), pos)
347 347
348 /** 348 /**
349 * i7300_process_error_global() - Retrieve the hardware error information from 349 * i7300_process_error_global() - Retrieve the hardware error information from
350 * the hardware global error registers and 350 * the hardware global error registers and
351 * sends it to dmesg 351 * sends it to dmesg
352 * @mci: struct mem_ctl_info pointer 352 * @mci: struct mem_ctl_info pointer
353 */ 353 */
354 static void i7300_process_error_global(struct mem_ctl_info *mci) 354 static void i7300_process_error_global(struct mem_ctl_info *mci)
355 { 355 {
356 struct i7300_pvt *pvt; 356 struct i7300_pvt *pvt;
357 u32 errnum, error_reg; 357 u32 errnum, error_reg;
358 unsigned long errors; 358 unsigned long errors;
359 const char *specific; 359 const char *specific;
360 bool is_fatal; 360 bool is_fatal;
361 361
362 pvt = mci->pvt_info; 362 pvt = mci->pvt_info;
363 363
364 /* read in the 1st FATAL error register */ 364 /* read in the 1st FATAL error register */
365 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 365 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
366 FERR_GLOBAL_HI, &error_reg); 366 FERR_GLOBAL_HI, &error_reg);
367 if (unlikely(error_reg)) { 367 if (unlikely(error_reg)) {
368 errors = error_reg; 368 errors = error_reg;
369 errnum = find_first_bit(&errors, 369 errnum = find_first_bit(&errors,
370 ARRAY_SIZE(ferr_global_hi_name)); 370 ARRAY_SIZE(ferr_global_hi_name));
371 specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum); 371 specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum);
372 is_fatal = ferr_global_hi_is_fatal(errnum); 372 is_fatal = ferr_global_hi_is_fatal(errnum);
373 373
374 /* Clear the error bit */ 374 /* Clear the error bit */
375 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 375 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
376 FERR_GLOBAL_HI, error_reg); 376 FERR_GLOBAL_HI, error_reg);
377 377
378 goto error_global; 378 goto error_global;
379 } 379 }
380 380
381 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 381 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
382 FERR_GLOBAL_LO, &error_reg); 382 FERR_GLOBAL_LO, &error_reg);
383 if (unlikely(error_reg)) { 383 if (unlikely(error_reg)) {
384 errors = error_reg; 384 errors = error_reg;
385 errnum = find_first_bit(&errors, 385 errnum = find_first_bit(&errors,
386 ARRAY_SIZE(ferr_global_lo_name)); 386 ARRAY_SIZE(ferr_global_lo_name));
387 specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum); 387 specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum);
388 is_fatal = ferr_global_lo_is_fatal(errnum); 388 is_fatal = ferr_global_lo_is_fatal(errnum);
389 389
390 /* Clear the error bit */ 390 /* Clear the error bit */
391 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 391 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
392 FERR_GLOBAL_LO, error_reg); 392 FERR_GLOBAL_LO, error_reg);
393 393
394 goto error_global; 394 goto error_global;
395 } 395 }
396 return; 396 return;
397 397
398 error_global: 398 error_global:
399 i7300_mc_printk(mci, KERN_EMERG, "%s misc error: %s\n", 399 i7300_mc_printk(mci, KERN_EMERG, "%s misc error: %s\n",
400 is_fatal ? "Fatal" : "NOT fatal", specific); 400 is_fatal ? "Fatal" : "NOT fatal", specific);
401 } 401 }
402 402
403 /** 403 /**
404 * i7300_process_fbd_error() - Retrieve the hardware error information from 404 * i7300_process_fbd_error() - Retrieve the hardware error information from
405 * the FBD error registers and sends it via 405 * the FBD error registers and sends it via
406 * EDAC error API calls 406 * EDAC error API calls
407 * @mci: struct mem_ctl_info pointer 407 * @mci: struct mem_ctl_info pointer
408 */ 408 */
409 static void i7300_process_fbd_error(struct mem_ctl_info *mci) 409 static void i7300_process_fbd_error(struct mem_ctl_info *mci)
410 { 410 {
411 struct i7300_pvt *pvt; 411 struct i7300_pvt *pvt;
412 u32 errnum, value, error_reg; 412 u32 errnum, value, error_reg;
413 u16 val16; 413 u16 val16;
414 unsigned branch, channel, bank, rank, cas, ras; 414 unsigned branch, channel, bank, rank, cas, ras;
415 u32 syndrome; 415 u32 syndrome;
416 416
417 unsigned long errors; 417 unsigned long errors;
418 const char *specific; 418 const char *specific;
419 bool is_wr; 419 bool is_wr;
420 420
421 pvt = mci->pvt_info; 421 pvt = mci->pvt_info;
422 422
423 /* read in the 1st FATAL error register */ 423 /* read in the 1st FATAL error register */
424 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 424 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
425 FERR_FAT_FBD, &error_reg); 425 FERR_FAT_FBD, &error_reg);
426 if (unlikely(error_reg & FERR_FAT_FBD_ERR_MASK)) { 426 if (unlikely(error_reg & FERR_FAT_FBD_ERR_MASK)) {
427 errors = error_reg & FERR_FAT_FBD_ERR_MASK ; 427 errors = error_reg & FERR_FAT_FBD_ERR_MASK ;
428 errnum = find_first_bit(&errors, 428 errnum = find_first_bit(&errors,
429 ARRAY_SIZE(ferr_fat_fbd_name)); 429 ARRAY_SIZE(ferr_fat_fbd_name));
430 specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum); 430 specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum);
431 branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0; 431 branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
432 432
433 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, 433 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
434 NRECMEMA, &val16); 434 NRECMEMA, &val16);
435 bank = NRECMEMA_BANK(val16); 435 bank = NRECMEMA_BANK(val16);
436 rank = NRECMEMA_RANK(val16); 436 rank = NRECMEMA_RANK(val16);
437 437
438 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 438 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
439 NRECMEMB, &value); 439 NRECMEMB, &value);
440 is_wr = NRECMEMB_IS_WR(value); 440 is_wr = NRECMEMB_IS_WR(value);
441 cas = NRECMEMB_CAS(value); 441 cas = NRECMEMB_CAS(value);
442 ras = NRECMEMB_RAS(value); 442 ras = NRECMEMB_RAS(value);
443 443
444 /* Clean the error register */ 444 /* Clean the error register */
445 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 445 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
446 FERR_FAT_FBD, error_reg); 446 FERR_FAT_FBD, error_reg);
447 447
448 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE, 448 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
449 "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))", 449 "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
450 bank, ras, cas, errors, specific); 450 bank, ras, cas, errors, specific);
451 451
452 edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0, 452 edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0,
453 branch, -1, rank, 453 branch, -1, rank,
454 is_wr ? "Write error" : "Read error", 454 is_wr ? "Write error" : "Read error",
455 pvt->tmp_prt_buffer); 455 pvt->tmp_prt_buffer);
456 456
457 } 457 }
458 458
459 /* read in the 1st NON-FATAL error register */ 459 /* read in the 1st NON-FATAL error register */
460 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 460 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
461 FERR_NF_FBD, &error_reg); 461 FERR_NF_FBD, &error_reg);
462 if (unlikely(error_reg & FERR_NF_FBD_ERR_MASK)) { 462 if (unlikely(error_reg & FERR_NF_FBD_ERR_MASK)) {
463 errors = error_reg & FERR_NF_FBD_ERR_MASK; 463 errors = error_reg & FERR_NF_FBD_ERR_MASK;
464 errnum = find_first_bit(&errors, 464 errnum = find_first_bit(&errors,
465 ARRAY_SIZE(ferr_nf_fbd_name)); 465 ARRAY_SIZE(ferr_nf_fbd_name));
466 specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum); 466 specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
467 branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0; 467 branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0;
468 468
469 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 469 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
470 REDMEMA, &syndrome); 470 REDMEMA, &syndrome);
471 471
472 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, 472 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
473 RECMEMA, &val16); 473 RECMEMA, &val16);
474 bank = RECMEMA_BANK(val16); 474 bank = RECMEMA_BANK(val16);
475 rank = RECMEMA_RANK(val16); 475 rank = RECMEMA_RANK(val16);
476 476
477 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 477 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
478 RECMEMB, &value); 478 RECMEMB, &value);
479 is_wr = RECMEMB_IS_WR(value); 479 is_wr = RECMEMB_IS_WR(value);
480 cas = RECMEMB_CAS(value); 480 cas = RECMEMB_CAS(value);
481 ras = RECMEMB_RAS(value); 481 ras = RECMEMB_RAS(value);
482 482
483 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 483 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
484 REDMEMB, &value); 484 REDMEMB, &value);
485 channel = (branch << 1); 485 channel = (branch << 1);
486 if (IS_SECOND_CH(value)) 486 if (IS_SECOND_CH(value))
487 channel++; 487 channel++;
488 488
489 /* Clear the error bit */ 489 /* Clear the error bit */
490 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 490 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
491 FERR_NF_FBD, error_reg); 491 FERR_NF_FBD, error_reg);
492 492
493 /* Form out message */ 493 /* Form out message */
494 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE, 494 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
495 "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))", 495 "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
496 bank, ras, cas, errors, specific); 496 bank, ras, cas, errors, specific);
497 497
498 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 498 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0,
499 syndrome, 499 syndrome,
500 branch >> 1, channel % 2, rank, 500 branch >> 1, channel % 2, rank,
501 is_wr ? "Write error" : "Read error", 501 is_wr ? "Write error" : "Read error",
502 pvt->tmp_prt_buffer); 502 pvt->tmp_prt_buffer);
503 } 503 }
504 return; 504 return;
505 } 505 }
506 506
507 /** 507 /**
508 * i7300_check_error() - Calls the error checking subroutines 508 * i7300_check_error() - Calls the error checking subroutines
509 * @mci: struct mem_ctl_info pointer 509 * @mci: struct mem_ctl_info pointer
510 */ 510 */
511 static void i7300_check_error(struct mem_ctl_info *mci) 511 static void i7300_check_error(struct mem_ctl_info *mci)
512 { 512 {
513 i7300_process_error_global(mci); 513 i7300_process_error_global(mci);
514 i7300_process_fbd_error(mci); 514 i7300_process_fbd_error(mci);
515 }; 515 };
516 516
517 /** 517 /**
518 * i7300_clear_error() - Clears the error registers 518 * i7300_clear_error() - Clears the error registers
519 * @mci: struct mem_ctl_info pointer 519 * @mci: struct mem_ctl_info pointer
520 */ 520 */
521 static void i7300_clear_error(struct mem_ctl_info *mci) 521 static void i7300_clear_error(struct mem_ctl_info *mci)
522 { 522 {
523 struct i7300_pvt *pvt = mci->pvt_info; 523 struct i7300_pvt *pvt = mci->pvt_info;
524 u32 value; 524 u32 value;
525 /* 525 /*
526 * All error values are RWC - we need to read and write 1 to the 526 * All error values are RWC - we need to read and write 1 to the
527 * bit that we want to cleanup 527 * bit that we want to cleanup
528 */ 528 */
529 529
530 /* Clear global error registers */ 530 /* Clear global error registers */
531 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 531 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
532 FERR_GLOBAL_HI, &value); 532 FERR_GLOBAL_HI, &value);
533 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 533 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
534 FERR_GLOBAL_HI, value); 534 FERR_GLOBAL_HI, value);
535 535
536 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 536 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
537 FERR_GLOBAL_LO, &value); 537 FERR_GLOBAL_LO, &value);
538 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, 538 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
539 FERR_GLOBAL_LO, value); 539 FERR_GLOBAL_LO, value);
540 540
541 /* Clear FBD error registers */ 541 /* Clear FBD error registers */
542 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 542 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
543 FERR_FAT_FBD, &value); 543 FERR_FAT_FBD, &value);
544 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 544 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
545 FERR_FAT_FBD, value); 545 FERR_FAT_FBD, value);
546 546
547 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 547 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
548 FERR_NF_FBD, &value); 548 FERR_NF_FBD, &value);
549 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 549 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
550 FERR_NF_FBD, value); 550 FERR_NF_FBD, value);
551 } 551 }
552 552
553 /** 553 /**
554 * i7300_enable_error_reporting() - Enable the memory reporting logic at the 554 * i7300_enable_error_reporting() - Enable the memory reporting logic at the
555 * hardware 555 * hardware
556 * @mci: struct mem_ctl_info pointer 556 * @mci: struct mem_ctl_info pointer
557 */ 557 */
558 static void i7300_enable_error_reporting(struct mem_ctl_info *mci) 558 static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
559 { 559 {
560 struct i7300_pvt *pvt = mci->pvt_info; 560 struct i7300_pvt *pvt = mci->pvt_info;
561 u32 fbd_error_mask; 561 u32 fbd_error_mask;
562 562
563 /* Read the FBD Error Mask Register */ 563 /* Read the FBD Error Mask Register */
564 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 564 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
565 EMASK_FBD, &fbd_error_mask); 565 EMASK_FBD, &fbd_error_mask);
566 566
567 /* Enable with a '0' */ 567 /* Enable with a '0' */
568 fbd_error_mask &= ~(EMASK_FBD_ERR_MASK); 568 fbd_error_mask &= ~(EMASK_FBD_ERR_MASK);
569 569
570 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, 570 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
571 EMASK_FBD, fbd_error_mask); 571 EMASK_FBD, fbd_error_mask);
572 } 572 }
573 573
574 /************************************************ 574 /************************************************
575 * i7300 Functions related to memory enumberation 575 * i7300 Functions related to memory enumberation
576 ************************************************/ 576 ************************************************/
577 577
578 /** 578 /**
579 * decode_mtr() - Decodes the MTR descriptor, filling the edac structs 579 * decode_mtr() - Decodes the MTR descriptor, filling the edac structs
580 * @pvt: pointer to the private data struct used by i7300 driver 580 * @pvt: pointer to the private data struct used by i7300 driver
581 * @slot: DIMM slot (0 to 7) 581 * @slot: DIMM slot (0 to 7)
582 * @ch: Channel number within the branch (0 or 1) 582 * @ch: Channel number within the branch (0 or 1)
583 * @branch: Branch number (0 or 1) 583 * @branch: Branch number (0 or 1)
584 * @dinfo: Pointer to DIMM info where dimm size is stored 584 * @dinfo: Pointer to DIMM info where dimm size is stored
585 * @p_csrow: Pointer to the struct csrow_info that corresponds to that element 585 * @p_csrow: Pointer to the struct csrow_info that corresponds to that element
586 */ 586 */
587 static int decode_mtr(struct i7300_pvt *pvt, 587 static int decode_mtr(struct i7300_pvt *pvt,
588 int slot, int ch, int branch, 588 int slot, int ch, int branch,
589 struct i7300_dimm_info *dinfo, 589 struct i7300_dimm_info *dinfo,
590 struct dimm_info *dimm) 590 struct dimm_info *dimm)
591 { 591 {
592 int mtr, ans, addrBits, channel; 592 int mtr, ans, addrBits, channel;
593 593
594 channel = to_channel(ch, branch); 594 channel = to_channel(ch, branch);
595 595
596 mtr = pvt->mtr[slot][branch]; 596 mtr = pvt->mtr[slot][branch];
597 ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0; 597 ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0;
598 598
599 edac_dbg(2, "\tMTR%d CH%d: DIMMs are %sPresent (mtr)\n", 599 edac_dbg(2, "\tMTR%d CH%d: DIMMs are %sPresent (mtr)\n",
600 slot, channel, ans ? "" : "NOT "); 600 slot, channel, ans ? "" : "NOT ");
601 601
602 /* Determine if there is a DIMM present in this DIMM slot */ 602 /* Determine if there is a DIMM present in this DIMM slot */
603 if (!ans) 603 if (!ans)
604 return 0; 604 return 0;
605 605
606 /* Start with the number of bits for a Bank 606 /* Start with the number of bits for a Bank
607 * on the DRAM */ 607 * on the DRAM */
608 addrBits = MTR_DRAM_BANKS_ADDR_BITS; 608 addrBits = MTR_DRAM_BANKS_ADDR_BITS;
609 /* Add thenumber of ROW bits */ 609 /* Add thenumber of ROW bits */
610 addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr); 610 addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
611 /* add the number of COLUMN bits */ 611 /* add the number of COLUMN bits */
612 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); 612 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
613 /* add the number of RANK bits */ 613 /* add the number of RANK bits */
614 addrBits += MTR_DIMM_RANKS(mtr); 614 addrBits += MTR_DIMM_RANKS(mtr);
615 615
616 addrBits += 6; /* add 64 bits per DIMM */ 616 addrBits += 6; /* add 64 bits per DIMM */
617 addrBits -= 20; /* divide by 2^^20 */ 617 addrBits -= 20; /* divide by 2^^20 */
618 addrBits -= 3; /* 8 bits per bytes */ 618 addrBits -= 3; /* 8 bits per bytes */
619 619
620 dinfo->megabytes = 1 << addrBits; 620 dinfo->megabytes = 1 << addrBits;
621 621
622 edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); 622 edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
623 623
624 edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n", 624 edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
625 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled"); 625 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
626 626
627 edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); 627 edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
628 edac_dbg(2, "\t\tNUMRANK: %s\n", 628 edac_dbg(2, "\t\tNUMRANK: %s\n",
629 MTR_DIMM_RANKS(mtr) ? "double" : "single"); 629 MTR_DIMM_RANKS(mtr) ? "double" : "single");
630 edac_dbg(2, "\t\tNUMROW: %s\n", 630 edac_dbg(2, "\t\tNUMROW: %s\n",
631 MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" : 631 MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
632 MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" : 632 MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
633 MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" : 633 MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
634 "65,536 - 16 rows"); 634 "65,536 - 16 rows");
635 edac_dbg(2, "\t\tNUMCOL: %s\n", 635 edac_dbg(2, "\t\tNUMCOL: %s\n",
636 MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" : 636 MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
637 MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" : 637 MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
638 MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" : 638 MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
639 "reserved"); 639 "reserved");
640 edac_dbg(2, "\t\tSIZE: %d MB\n", dinfo->megabytes); 640 edac_dbg(2, "\t\tSIZE: %d MB\n", dinfo->megabytes);
641 641
642 /* 642 /*
643 * The type of error detection actually depends of the 643 * The type of error detection actually depends of the
644 * mode of operation. When it is just one single memory chip, at 644 * mode of operation. When it is just one single memory chip, at
645 * socket 0, channel 0, it uses 8-byte-over-32-byte SECDED+ code. 645 * socket 0, channel 0, it uses 8-byte-over-32-byte SECDED+ code.
646 * In normal or mirrored mode, it uses Lockstep mode, 646 * In normal or mirrored mode, it uses Lockstep mode,
647 * with the possibility of using an extended algorithm for x8 memories 647 * with the possibility of using an extended algorithm for x8 memories
648 * See datasheet Sections 7.3.6 to 7.3.8 648 * See datasheet Sections 7.3.6 to 7.3.8
649 */ 649 */
650 650
651 dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes); 651 dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes);
652 dimm->grain = 8; 652 dimm->grain = 8;
653 dimm->mtype = MEM_FB_DDR2; 653 dimm->mtype = MEM_FB_DDR2;
654 if (IS_SINGLE_MODE(pvt->mc_settings_a)) { 654 if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
655 dimm->edac_mode = EDAC_SECDED; 655 dimm->edac_mode = EDAC_SECDED;
656 edac_dbg(2, "\t\tECC code is 8-byte-over-32-byte SECDED+ code\n"); 656 edac_dbg(2, "\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
657 } else { 657 } else {
658 edac_dbg(2, "\t\tECC code is on Lockstep mode\n"); 658 edac_dbg(2, "\t\tECC code is on Lockstep mode\n");
659 if (MTR_DRAM_WIDTH(mtr) == 8) 659 if (MTR_DRAM_WIDTH(mtr) == 8)
660 dimm->edac_mode = EDAC_S8ECD8ED; 660 dimm->edac_mode = EDAC_S8ECD8ED;
661 else 661 else
662 dimm->edac_mode = EDAC_S4ECD4ED; 662 dimm->edac_mode = EDAC_S4ECD4ED;
663 } 663 }
664 664
665 /* ask what device type on this row */ 665 /* ask what device type on this row */
666 if (MTR_DRAM_WIDTH(mtr) == 8) { 666 if (MTR_DRAM_WIDTH(mtr) == 8) {
667 edac_dbg(2, "\t\tScrub algorithm for x8 is on %s mode\n", 667 edac_dbg(2, "\t\tScrub algorithm for x8 is on %s mode\n",
668 IS_SCRBALGO_ENHANCED(pvt->mc_settings) ? 668 IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
669 "enhanced" : "normal"); 669 "enhanced" : "normal");
670 670
671 dimm->dtype = DEV_X8; 671 dimm->dtype = DEV_X8;
672 } else 672 } else
673 dimm->dtype = DEV_X4; 673 dimm->dtype = DEV_X4;
674 674
675 return mtr; 675 return mtr;
676 } 676 }
677 677
678 /** 678 /**
679 * print_dimm_size() - Prints dump of the memory organization 679 * print_dimm_size() - Prints dump of the memory organization
680 * @pvt: pointer to the private data struct used by i7300 driver 680 * @pvt: pointer to the private data struct used by i7300 driver
681 * 681 *
682 * Useful for debug. If debug is disabled, this routine do nothing 682 * Useful for debug. If debug is disabled, this routine do nothing
683 */ 683 */
684 static void print_dimm_size(struct i7300_pvt *pvt) 684 static void print_dimm_size(struct i7300_pvt *pvt)
685 { 685 {
686 #ifdef CONFIG_EDAC_DEBUG 686 #ifdef CONFIG_EDAC_DEBUG
687 struct i7300_dimm_info *dinfo; 687 struct i7300_dimm_info *dinfo;
688 char *p; 688 char *p;
689 int space, n; 689 int space, n;
690 int channel, slot; 690 int channel, slot;
691 691
692 space = PAGE_SIZE; 692 space = PAGE_SIZE;
693 p = pvt->tmp_prt_buffer; 693 p = pvt->tmp_prt_buffer;
694 694
695 n = snprintf(p, space, " "); 695 n = snprintf(p, space, " ");
696 p += n; 696 p += n;
697 space -= n; 697 space -= n;
698 for (channel = 0; channel < MAX_CHANNELS; channel++) { 698 for (channel = 0; channel < MAX_CHANNELS; channel++) {
699 n = snprintf(p, space, "channel %d | ", channel); 699 n = snprintf(p, space, "channel %d | ", channel);
700 p += n; 700 p += n;
701 space -= n; 701 space -= n;
702 } 702 }
703 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); 703 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
704 p = pvt->tmp_prt_buffer; 704 p = pvt->tmp_prt_buffer;
705 space = PAGE_SIZE; 705 space = PAGE_SIZE;
706 n = snprintf(p, space, "-------------------------------" 706 n = snprintf(p, space, "-------------------------------"
707 "------------------------------"); 707 "------------------------------");
708 p += n; 708 p += n;
709 space -= n; 709 space -= n;
710 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); 710 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
711 p = pvt->tmp_prt_buffer; 711 p = pvt->tmp_prt_buffer;
712 space = PAGE_SIZE; 712 space = PAGE_SIZE;
713 713
714 for (slot = 0; slot < MAX_SLOTS; slot++) { 714 for (slot = 0; slot < MAX_SLOTS; slot++) {
715 n = snprintf(p, space, "csrow/SLOT %d ", slot); 715 n = snprintf(p, space, "csrow/SLOT %d ", slot);
716 p += n; 716 p += n;
717 space -= n; 717 space -= n;
718 718
719 for (channel = 0; channel < MAX_CHANNELS; channel++) { 719 for (channel = 0; channel < MAX_CHANNELS; channel++) {
720 dinfo = &pvt->dimm_info[slot][channel]; 720 dinfo = &pvt->dimm_info[slot][channel];
721 n = snprintf(p, space, "%4d MB | ", dinfo->megabytes); 721 n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
722 p += n; 722 p += n;
723 space -= n; 723 space -= n;
724 } 724 }
725 725
726 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); 726 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
727 p = pvt->tmp_prt_buffer; 727 p = pvt->tmp_prt_buffer;
728 space = PAGE_SIZE; 728 space = PAGE_SIZE;
729 } 729 }
730 730
731 n = snprintf(p, space, "-------------------------------" 731 n = snprintf(p, space, "-------------------------------"
732 "------------------------------"); 732 "------------------------------");
733 p += n; 733 p += n;
734 space -= n; 734 space -= n;
735 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); 735 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
736 p = pvt->tmp_prt_buffer; 736 p = pvt->tmp_prt_buffer;
737 space = PAGE_SIZE; 737 space = PAGE_SIZE;
738 #endif 738 #endif
739 } 739 }
740 740
741 /** 741 /**
742 * i7300_init_csrows() - Initialize the 'csrows' table within 742 * i7300_init_csrows() - Initialize the 'csrows' table within
743 * the mci control structure with the 743 * the mci control structure with the
744 * addressing of memory. 744 * addressing of memory.
745 * @mci: struct mem_ctl_info pointer 745 * @mci: struct mem_ctl_info pointer
746 */ 746 */
747 static int i7300_init_csrows(struct mem_ctl_info *mci) 747 static int i7300_init_csrows(struct mem_ctl_info *mci)
748 { 748 {
749 struct i7300_pvt *pvt; 749 struct i7300_pvt *pvt;
750 struct i7300_dimm_info *dinfo; 750 struct i7300_dimm_info *dinfo;
751 int rc = -ENODEV; 751 int rc = -ENODEV;
752 int mtr; 752 int mtr;
753 int ch, branch, slot, channel; 753 int ch, branch, slot, channel, max_channel, max_branch;
754 struct dimm_info *dimm; 754 struct dimm_info *dimm;
755 755
756 pvt = mci->pvt_info; 756 pvt = mci->pvt_info;
757 757
758 edac_dbg(2, "Memory Technology Registers:\n"); 758 edac_dbg(2, "Memory Technology Registers:\n");
759 759
760 if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
761 max_branch = 1;
762 max_channel = 1;
763 } else {
764 max_branch = MAX_BRANCHES;
765 max_channel = MAX_CH_PER_BRANCH;
766 }
767
760 /* Get the AMB present registers for the four channels */ 768 /* Get the AMB present registers for the four channels */
761 for (branch = 0; branch < MAX_BRANCHES; branch++) { 769 for (branch = 0; branch < max_branch; branch++) {
762 /* Read and dump branch 0's MTRs */ 770 /* Read and dump branch 0's MTRs */
763 channel = to_channel(0, branch); 771 channel = to_channel(0, branch);
764 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], 772 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
765 AMBPRESENT_0, 773 AMBPRESENT_0,
766 &pvt->ambpresent[channel]); 774 &pvt->ambpresent[channel]);
767 edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n", 775 edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
768 channel, pvt->ambpresent[channel]); 776 channel, pvt->ambpresent[channel]);
769 777
778 if (max_channel == 1)
779 continue;
780
770 channel = to_channel(1, branch); 781 channel = to_channel(1, branch);
771 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], 782 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
772 AMBPRESENT_1, 783 AMBPRESENT_1,
773 &pvt->ambpresent[channel]); 784 &pvt->ambpresent[channel]);
774 edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n", 785 edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
775 channel, pvt->ambpresent[channel]); 786 channel, pvt->ambpresent[channel]);
776 } 787 }
777 788
778 /* Get the set of MTR[0-7] regs by each branch */ 789 /* Get the set of MTR[0-7] regs by each branch */
779 for (slot = 0; slot < MAX_SLOTS; slot++) { 790 for (slot = 0; slot < MAX_SLOTS; slot++) {
780 int where = mtr_regs[slot]; 791 int where = mtr_regs[slot];
781 for (branch = 0; branch < MAX_BRANCHES; branch++) { 792 for (branch = 0; branch < max_branch; branch++) {
782 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], 793 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
783 where, 794 where,
784 &pvt->mtr[slot][branch]); 795 &pvt->mtr[slot][branch]);
785 for (ch = 0; ch < MAX_CH_PER_BRANCH; ch++) { 796 for (ch = 0; ch < max_channel; ch++) {
786 int channel = to_channel(ch, branch); 797 int channel = to_channel(ch, branch);
787 798
788 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, 799 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
789 mci->n_layers, branch, ch, slot); 800 mci->n_layers, branch, ch, slot);
790 801
791 dinfo = &pvt->dimm_info[slot][channel]; 802 dinfo = &pvt->dimm_info[slot][channel];
792 803
793 mtr = decode_mtr(pvt, slot, ch, branch, 804 mtr = decode_mtr(pvt, slot, ch, branch,
794 dinfo, dimm); 805 dinfo, dimm);
795 806
796 /* if no DIMMS on this row, continue */ 807 /* if no DIMMS on this row, continue */
797 if (!MTR_DIMMS_PRESENT(mtr)) 808 if (!MTR_DIMMS_PRESENT(mtr))
798 continue; 809 continue;
799 810
800 rc = 0; 811 rc = 0;
801 812
802 } 813 }
803 } 814 }
804 } 815 }
805 816
806 return rc; 817 return rc;
807 } 818 }
808 819
809 /** 820 /**
810 * decode_mir() - Decodes Memory Interleave Register (MIR) info 821 * decode_mir() - Decodes Memory Interleave Register (MIR) info
811 * @int mir_no: number of the MIR register to decode 822 * @int mir_no: number of the MIR register to decode
812 * @mir: array with the MIR data cached on the driver 823 * @mir: array with the MIR data cached on the driver
813 */ 824 */
814 static void decode_mir(int mir_no, u16 mir[MAX_MIR]) 825 static void decode_mir(int mir_no, u16 mir[MAX_MIR])
815 { 826 {
816 if (mir[mir_no] & 3) 827 if (mir[mir_no] & 3)
817 edac_dbg(2, "MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n", 828 edac_dbg(2, "MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n",
818 mir_no, 829 mir_no,
819 (mir[mir_no] >> 4) & 0xfff, 830 (mir[mir_no] >> 4) & 0xfff,
820 (mir[mir_no] & 1) ? "B0" : "", 831 (mir[mir_no] & 1) ? "B0" : "",
821 (mir[mir_no] & 2) ? "B1" : ""); 832 (mir[mir_no] & 2) ? "B1" : "");
822 } 833 }
823 834
824 /** 835 /**
825 * i7300_get_mc_regs() - Get the contents of the MC enumeration registers 836 * i7300_get_mc_regs() - Get the contents of the MC enumeration registers
826 * @mci: struct mem_ctl_info pointer 837 * @mci: struct mem_ctl_info pointer
827 * 838 *
828 * Data read is cached internally for its usage when needed 839 * Data read is cached internally for its usage when needed
829 */ 840 */
830 static int i7300_get_mc_regs(struct mem_ctl_info *mci) 841 static int i7300_get_mc_regs(struct mem_ctl_info *mci)
831 { 842 {
832 struct i7300_pvt *pvt; 843 struct i7300_pvt *pvt;
833 u32 actual_tolm; 844 u32 actual_tolm;
834 int i, rc; 845 int i, rc;
835 846
836 pvt = mci->pvt_info; 847 pvt = mci->pvt_info;
837 848
838 pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE, 849 pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE,
839 (u32 *) &pvt->ambase); 850 (u32 *) &pvt->ambase);
840 851
841 edac_dbg(2, "AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase); 852 edac_dbg(2, "AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase);
842 853
843 /* Get the Branch Map regs */ 854 /* Get the Branch Map regs */
844 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm); 855 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm);
845 pvt->tolm >>= 12; 856 pvt->tolm >>= 12;
846 edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n", 857 edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n",
847 pvt->tolm, pvt->tolm); 858 pvt->tolm, pvt->tolm);
848 859
849 actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28)); 860 actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
850 edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n", 861 edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
851 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); 862 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
852 863
853 /* Get memory controller settings */ 864 /* Get memory controller settings */
854 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS, 865 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS,
855 &pvt->mc_settings); 866 &pvt->mc_settings);
856 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS_A, 867 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS_A,
857 &pvt->mc_settings_a); 868 &pvt->mc_settings_a);
858 869
859 if (IS_SINGLE_MODE(pvt->mc_settings_a)) 870 if (IS_SINGLE_MODE(pvt->mc_settings_a))
860 edac_dbg(0, "Memory controller operating on single mode\n"); 871 edac_dbg(0, "Memory controller operating on single mode\n");
861 else 872 else
862 edac_dbg(0, "Memory controller operating on %smirrored mode\n", 873 edac_dbg(0, "Memory controller operating on %smirrored mode\n",
863 IS_MIRRORED(pvt->mc_settings) ? "" : "non-"); 874 IS_MIRRORED(pvt->mc_settings) ? "" : "non-");
864 875
865 edac_dbg(0, "Error detection is %s\n", 876 edac_dbg(0, "Error detection is %s\n",
866 IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); 877 IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
867 edac_dbg(0, "Retry is %s\n", 878 edac_dbg(0, "Retry is %s\n",
868 IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); 879 IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
869 880
870 /* Get Memory Interleave Range registers */ 881 /* Get Memory Interleave Range registers */
871 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0, 882 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
872 &pvt->mir[0]); 883 &pvt->mir[0]);
873 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR1, 884 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR1,
874 &pvt->mir[1]); 885 &pvt->mir[1]);
875 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR2, 886 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR2,
876 &pvt->mir[2]); 887 &pvt->mir[2]);
877 888
878 /* Decode the MIR regs */ 889 /* Decode the MIR regs */
879 for (i = 0; i < MAX_MIR; i++) 890 for (i = 0; i < MAX_MIR; i++)
880 decode_mir(i, pvt->mir); 891 decode_mir(i, pvt->mir);
881 892
882 rc = i7300_init_csrows(mci); 893 rc = i7300_init_csrows(mci);
883 if (rc < 0) 894 if (rc < 0)
884 return rc; 895 return rc;
885 896
886 /* Go and determine the size of each DIMM and place in an 897 /* Go and determine the size of each DIMM and place in an
887 * orderly matrix */ 898 * orderly matrix */
888 print_dimm_size(pvt); 899 print_dimm_size(pvt);
889 900
890 return 0; 901 return 0;
891 } 902 }
892 903
893 /************************************************* 904 /*************************************************
894 * i7300 Functions related to device probe/release 905 * i7300 Functions related to device probe/release
895 *************************************************/ 906 *************************************************/
896 907
897 /** 908 /**
898 * i7300_put_devices() - Release the PCI devices 909 * i7300_put_devices() - Release the PCI devices
899 * @mci: struct mem_ctl_info pointer 910 * @mci: struct mem_ctl_info pointer
900 */ 911 */
901 static void i7300_put_devices(struct mem_ctl_info *mci) 912 static void i7300_put_devices(struct mem_ctl_info *mci)
902 { 913 {
903 struct i7300_pvt *pvt; 914 struct i7300_pvt *pvt;
904 int branch; 915 int branch;
905 916
906 pvt = mci->pvt_info; 917 pvt = mci->pvt_info;
907 918
908 /* Decrement usage count for devices */ 919 /* Decrement usage count for devices */
909 for (branch = 0; branch < MAX_CH_PER_BRANCH; branch++) 920 for (branch = 0; branch < MAX_CH_PER_BRANCH; branch++)
910 pci_dev_put(pvt->pci_dev_2x_0_fbd_branch[branch]); 921 pci_dev_put(pvt->pci_dev_2x_0_fbd_branch[branch]);
911 pci_dev_put(pvt->pci_dev_16_2_fsb_err_regs); 922 pci_dev_put(pvt->pci_dev_16_2_fsb_err_regs);
912 pci_dev_put(pvt->pci_dev_16_1_fsb_addr_map); 923 pci_dev_put(pvt->pci_dev_16_1_fsb_addr_map);
913 } 924 }
914 925
915 /** 926 /**
916 * i7300_get_devices() - Find and perform 'get' operation on the MCH's 927 * i7300_get_devices() - Find and perform 'get' operation on the MCH's
917 * device/functions we want to reference for this driver 928 * device/functions we want to reference for this driver
918 * @mci: struct mem_ctl_info pointer 929 * @mci: struct mem_ctl_info pointer
919 * 930 *
920 * Access and prepare the several devices for usage: 931 * Access and prepare the several devices for usage:
921 * I7300 devices used by this driver: 932 * I7300 devices used by this driver:
922 * Device 16, functions 0,1 and 2: PCI_DEVICE_ID_INTEL_I7300_MCH_ERR 933 * Device 16, functions 0,1 and 2: PCI_DEVICE_ID_INTEL_I7300_MCH_ERR
923 * Device 21 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB0 934 * Device 21 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB0
924 * Device 22 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB1 935 * Device 22 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB1
925 */ 936 */
926 static int i7300_get_devices(struct mem_ctl_info *mci) 937 static int i7300_get_devices(struct mem_ctl_info *mci)
927 { 938 {
928 struct i7300_pvt *pvt; 939 struct i7300_pvt *pvt;
929 struct pci_dev *pdev; 940 struct pci_dev *pdev;
930 941
931 pvt = mci->pvt_info; 942 pvt = mci->pvt_info;
932 943
933 /* Attempt to 'get' the MCH register we want */ 944 /* Attempt to 'get' the MCH register we want */
934 pdev = NULL; 945 pdev = NULL;
935 while (!pvt->pci_dev_16_1_fsb_addr_map || 946 while (!pvt->pci_dev_16_1_fsb_addr_map ||
936 !pvt->pci_dev_16_2_fsb_err_regs) { 947 !pvt->pci_dev_16_2_fsb_err_regs) {
937 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 948 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
938 PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, pdev); 949 PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, pdev);
939 if (!pdev) { 950 if (!pdev) {
940 /* End of list, leave */ 951 /* End of list, leave */
941 i7300_printk(KERN_ERR, 952 i7300_printk(KERN_ERR,
942 "'system address,Process Bus' " 953 "'system address,Process Bus' "
943 "device not found:" 954 "device not found:"
944 "vendor 0x%x device 0x%x ERR funcs " 955 "vendor 0x%x device 0x%x ERR funcs "
945 "(broken BIOS?)\n", 956 "(broken BIOS?)\n",
946 PCI_VENDOR_ID_INTEL, 957 PCI_VENDOR_ID_INTEL,
947 PCI_DEVICE_ID_INTEL_I7300_MCH_ERR); 958 PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
948 goto error; 959 goto error;
949 } 960 }
950 961
951 /* Store device 16 funcs 1 and 2 */ 962 /* Store device 16 funcs 1 and 2 */
952 switch (PCI_FUNC(pdev->devfn)) { 963 switch (PCI_FUNC(pdev->devfn)) {
953 case 1: 964 case 1:
954 pvt->pci_dev_16_1_fsb_addr_map = pdev; 965 pvt->pci_dev_16_1_fsb_addr_map = pdev;
955 break; 966 break;
956 case 2: 967 case 2:
957 pvt->pci_dev_16_2_fsb_err_regs = pdev; 968 pvt->pci_dev_16_2_fsb_err_regs = pdev;
958 break; 969 break;
959 } 970 }
960 } 971 }
961 972
962 edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n", 973 edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
963 pci_name(pvt->pci_dev_16_0_fsb_ctlr), 974 pci_name(pvt->pci_dev_16_0_fsb_ctlr),
964 pvt->pci_dev_16_0_fsb_ctlr->vendor, 975 pvt->pci_dev_16_0_fsb_ctlr->vendor,
965 pvt->pci_dev_16_0_fsb_ctlr->device); 976 pvt->pci_dev_16_0_fsb_ctlr->device);
966 edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", 977 edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
967 pci_name(pvt->pci_dev_16_1_fsb_addr_map), 978 pci_name(pvt->pci_dev_16_1_fsb_addr_map),
968 pvt->pci_dev_16_1_fsb_addr_map->vendor, 979 pvt->pci_dev_16_1_fsb_addr_map->vendor,
969 pvt->pci_dev_16_1_fsb_addr_map->device); 980 pvt->pci_dev_16_1_fsb_addr_map->device);
970 edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n", 981 edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
971 pci_name(pvt->pci_dev_16_2_fsb_err_regs), 982 pci_name(pvt->pci_dev_16_2_fsb_err_regs),
972 pvt->pci_dev_16_2_fsb_err_regs->vendor, 983 pvt->pci_dev_16_2_fsb_err_regs->vendor,
973 pvt->pci_dev_16_2_fsb_err_regs->device); 984 pvt->pci_dev_16_2_fsb_err_regs->device);
974 985
975 pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL, 986 pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL,
976 PCI_DEVICE_ID_INTEL_I7300_MCH_FB0, 987 PCI_DEVICE_ID_INTEL_I7300_MCH_FB0,
977 NULL); 988 NULL);
978 if (!pvt->pci_dev_2x_0_fbd_branch[0]) { 989 if (!pvt->pci_dev_2x_0_fbd_branch[0]) {
979 i7300_printk(KERN_ERR, 990 i7300_printk(KERN_ERR,
980 "MC: 'BRANCH 0' device not found:" 991 "MC: 'BRANCH 0' device not found:"
981 "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n", 992 "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
982 PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB0); 993 PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB0);
983 goto error; 994 goto error;
984 } 995 }
985 996
986 pvt->pci_dev_2x_0_fbd_branch[1] = pci_get_device(PCI_VENDOR_ID_INTEL, 997 pvt->pci_dev_2x_0_fbd_branch[1] = pci_get_device(PCI_VENDOR_ID_INTEL,
987 PCI_DEVICE_ID_INTEL_I7300_MCH_FB1, 998 PCI_DEVICE_ID_INTEL_I7300_MCH_FB1,
988 NULL); 999 NULL);
989 if (!pvt->pci_dev_2x_0_fbd_branch[1]) { 1000 if (!pvt->pci_dev_2x_0_fbd_branch[1]) {
990 i7300_printk(KERN_ERR, 1001 i7300_printk(KERN_ERR,
991 "MC: 'BRANCH 1' device not found:" 1002 "MC: 'BRANCH 1' device not found:"
992 "vendor 0x%x device 0x%x Func 0 " 1003 "vendor 0x%x device 0x%x Func 0 "
993 "(broken BIOS?)\n", 1004 "(broken BIOS?)\n",
994 PCI_VENDOR_ID_INTEL, 1005 PCI_VENDOR_ID_INTEL,
995 PCI_DEVICE_ID_INTEL_I7300_MCH_FB1); 1006 PCI_DEVICE_ID_INTEL_I7300_MCH_FB1);
996 goto error; 1007 goto error;
997 } 1008 }
998 1009
999 return 0; 1010 return 0;
1000 1011
1001 error: 1012 error:
1002 i7300_put_devices(mci); 1013 i7300_put_devices(mci);
1003 return -ENODEV; 1014 return -ENODEV;
1004 } 1015 }
1005 1016
1006 /** 1017 /**
1007 * i7300_init_one() - Probe for one instance of the device 1018 * i7300_init_one() - Probe for one instance of the device
1008 * @pdev: struct pci_dev pointer 1019 * @pdev: struct pci_dev pointer
1009 * @id: struct pci_device_id pointer - currently unused 1020 * @id: struct pci_device_id pointer - currently unused
1010 */ 1021 */
1011 static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 1022 static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1012 { 1023 {
1013 struct mem_ctl_info *mci; 1024 struct mem_ctl_info *mci;
1014 struct edac_mc_layer layers[3]; 1025 struct edac_mc_layer layers[3];
1015 struct i7300_pvt *pvt; 1026 struct i7300_pvt *pvt;
1016 int rc; 1027 int rc;
1017 1028
1018 /* wake up device */ 1029 /* wake up device */
1019 rc = pci_enable_device(pdev); 1030 rc = pci_enable_device(pdev);
1020 if (rc == -EIO) 1031 if (rc == -EIO)
1021 return rc; 1032 return rc;
1022 1033
1023 edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n", 1034 edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
1024 pdev->bus->number, 1035 pdev->bus->number,
1025 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 1036 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1026 1037
1027 /* We only are looking for func 0 of the set */ 1038 /* We only are looking for func 0 of the set */
1028 if (PCI_FUNC(pdev->devfn) != 0) 1039 if (PCI_FUNC(pdev->devfn) != 0)
1029 return -ENODEV; 1040 return -ENODEV;
1030 1041
1031 /* allocate a new MC control structure */ 1042 /* allocate a new MC control structure */
1032 layers[0].type = EDAC_MC_LAYER_BRANCH; 1043 layers[0].type = EDAC_MC_LAYER_BRANCH;
1033 layers[0].size = MAX_BRANCHES; 1044 layers[0].size = MAX_BRANCHES;
1034 layers[0].is_virt_csrow = false; 1045 layers[0].is_virt_csrow = false;
1035 layers[1].type = EDAC_MC_LAYER_CHANNEL; 1046 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1036 layers[1].size = MAX_CH_PER_BRANCH; 1047 layers[1].size = MAX_CH_PER_BRANCH;
1037 layers[1].is_virt_csrow = true; 1048 layers[1].is_virt_csrow = true;
1038 layers[2].type = EDAC_MC_LAYER_SLOT; 1049 layers[2].type = EDAC_MC_LAYER_SLOT;
1039 layers[2].size = MAX_SLOTS; 1050 layers[2].size = MAX_SLOTS;
1040 layers[2].is_virt_csrow = true; 1051 layers[2].is_virt_csrow = true;
1041 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); 1052 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1042 if (mci == NULL) 1053 if (mci == NULL)
1043 return -ENOMEM; 1054 return -ENOMEM;
1044 1055
1045 edac_dbg(0, "MC: mci = %p\n", mci); 1056 edac_dbg(0, "MC: mci = %p\n", mci);
1046 1057
1047 mci->pdev = &pdev->dev; /* record ptr to the generic device */ 1058 mci->pdev = &pdev->dev; /* record ptr to the generic device */
1048 1059
1049 pvt = mci->pvt_info; 1060 pvt = mci->pvt_info;
1050 pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */ 1061 pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */
1051 1062
1052 pvt->tmp_prt_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); 1063 pvt->tmp_prt_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1053 if (!pvt->tmp_prt_buffer) { 1064 if (!pvt->tmp_prt_buffer) {
1054 edac_mc_free(mci); 1065 edac_mc_free(mci);
1055 return -ENOMEM; 1066 return -ENOMEM;
1056 } 1067 }
1057 1068
1058 /* 'get' the pci devices we want to reserve for our use */ 1069 /* 'get' the pci devices we want to reserve for our use */
1059 if (i7300_get_devices(mci)) 1070 if (i7300_get_devices(mci))
1060 goto fail0; 1071 goto fail0;
1061 1072
1062 mci->mc_idx = 0; 1073 mci->mc_idx = 0;
1063 mci->mtype_cap = MEM_FLAG_FB_DDR2; 1074 mci->mtype_cap = MEM_FLAG_FB_DDR2;
1064 mci->edac_ctl_cap = EDAC_FLAG_NONE; 1075 mci->edac_ctl_cap = EDAC_FLAG_NONE;
1065 mci->edac_cap = EDAC_FLAG_NONE; 1076 mci->edac_cap = EDAC_FLAG_NONE;
1066 mci->mod_name = "i7300_edac.c"; 1077 mci->mod_name = "i7300_edac.c";
1067 mci->mod_ver = I7300_REVISION; 1078 mci->mod_ver = I7300_REVISION;
1068 mci->ctl_name = i7300_devs[0].ctl_name; 1079 mci->ctl_name = i7300_devs[0].ctl_name;
1069 mci->dev_name = pci_name(pdev); 1080 mci->dev_name = pci_name(pdev);
1070 mci->ctl_page_to_phys = NULL; 1081 mci->ctl_page_to_phys = NULL;
1071 1082
1072 /* Set the function pointer to an actual operation function */ 1083 /* Set the function pointer to an actual operation function */
1073 mci->edac_check = i7300_check_error; 1084 mci->edac_check = i7300_check_error;
1074 1085
1075 /* initialize the MC control structure 'csrows' table 1086 /* initialize the MC control structure 'csrows' table
1076 * with the mapping and control information */ 1087 * with the mapping and control information */
1077 if (i7300_get_mc_regs(mci)) { 1088 if (i7300_get_mc_regs(mci)) {
1078 edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i7300_init_csrows() returned nonzero value\n"); 1089 edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i7300_init_csrows() returned nonzero value\n");
1079 mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ 1090 mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
1080 } else { 1091 } else {
1081 edac_dbg(1, "MC: Enable error reporting now\n"); 1092 edac_dbg(1, "MC: Enable error reporting now\n");
1082 i7300_enable_error_reporting(mci); 1093 i7300_enable_error_reporting(mci);
1083 } 1094 }
1084 1095
1085 /* add this new MC control structure to EDAC's list of MCs */ 1096 /* add this new MC control structure to EDAC's list of MCs */
1086 if (edac_mc_add_mc(mci)) { 1097 if (edac_mc_add_mc(mci)) {
1087 edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); 1098 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1088 /* FIXME: perhaps some code should go here that disables error 1099 /* FIXME: perhaps some code should go here that disables error
1089 * reporting if we just enabled it 1100 * reporting if we just enabled it
1090 */ 1101 */
1091 goto fail1; 1102 goto fail1;
1092 } 1103 }
1093 1104
1094 i7300_clear_error(mci); 1105 i7300_clear_error(mci);
1095 1106
1096 /* allocating generic PCI control info */ 1107 /* allocating generic PCI control info */
1097 i7300_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); 1108 i7300_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1098 if (!i7300_pci) { 1109 if (!i7300_pci) {
1099 printk(KERN_WARNING 1110 printk(KERN_WARNING
1100 "%s(): Unable to create PCI control\n", 1111 "%s(): Unable to create PCI control\n",
1101 __func__); 1112 __func__);
1102 printk(KERN_WARNING 1113 printk(KERN_WARNING
1103 "%s(): PCI error report via EDAC not setup\n", 1114 "%s(): PCI error report via EDAC not setup\n",
1104 __func__); 1115 __func__);
1105 } 1116 }
1106 1117
1107 return 0; 1118 return 0;
1108 1119
1109 /* Error exit unwinding stack */ 1120 /* Error exit unwinding stack */
1110 fail1: 1121 fail1:
1111 1122
1112 i7300_put_devices(mci); 1123 i7300_put_devices(mci);
1113 1124
1114 fail0: 1125 fail0:
1115 kfree(pvt->tmp_prt_buffer); 1126 kfree(pvt->tmp_prt_buffer);
1116 edac_mc_free(mci); 1127 edac_mc_free(mci);
1117 return -ENODEV; 1128 return -ENODEV;
1118 } 1129 }
1119 1130
1120 /** 1131 /**
1121 * i7300_remove_one() - Remove the driver 1132 * i7300_remove_one() - Remove the driver
1122 * @pdev: struct pci_dev pointer 1133 * @pdev: struct pci_dev pointer
1123 */ 1134 */
1124 static void i7300_remove_one(struct pci_dev *pdev) 1135 static void i7300_remove_one(struct pci_dev *pdev)
1125 { 1136 {
1126 struct mem_ctl_info *mci; 1137 struct mem_ctl_info *mci;
1127 char *tmp; 1138 char *tmp;
1128 1139
1129 edac_dbg(0, "\n"); 1140 edac_dbg(0, "\n");
1130 1141
1131 if (i7300_pci) 1142 if (i7300_pci)
1132 edac_pci_release_generic_ctl(i7300_pci); 1143 edac_pci_release_generic_ctl(i7300_pci);
1133 1144
1134 mci = edac_mc_del_mc(&pdev->dev); 1145 mci = edac_mc_del_mc(&pdev->dev);
1135 if (!mci) 1146 if (!mci)
1136 return; 1147 return;
1137 1148
1138 tmp = ((struct i7300_pvt *)mci->pvt_info)->tmp_prt_buffer; 1149 tmp = ((struct i7300_pvt *)mci->pvt_info)->tmp_prt_buffer;
1139 1150
1140 /* retrieve references to resources, and free those resources */ 1151 /* retrieve references to resources, and free those resources */
1141 i7300_put_devices(mci); 1152 i7300_put_devices(mci);
1142 1153
1143 kfree(tmp); 1154 kfree(tmp);
1144 edac_mc_free(mci); 1155 edac_mc_free(mci);
1145 } 1156 }
1146 1157
1147 /* 1158 /*
1148 * pci_device_id: table for which devices we are looking for 1159 * pci_device_id: table for which devices we are looking for
1149 * 1160 *
1150 * Has only 8086:360c PCI ID 1161 * Has only 8086:360c PCI ID
1151 */ 1162 */
1152 static DEFINE_PCI_DEVICE_TABLE(i7300_pci_tbl) = { 1163 static DEFINE_PCI_DEVICE_TABLE(i7300_pci_tbl) = {
1153 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)}, 1164 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
1154 {0,} /* 0 terminated list. */ 1165 {0,} /* 0 terminated list. */
1155 }; 1166 };
1156 1167
1157 MODULE_DEVICE_TABLE(pci, i7300_pci_tbl); 1168 MODULE_DEVICE_TABLE(pci, i7300_pci_tbl);
1158 1169
1159 /* 1170 /*
1160 * i7300_driver: pci_driver structure for this module 1171 * i7300_driver: pci_driver structure for this module
1161 */ 1172 */
1162 static struct pci_driver i7300_driver = { 1173 static struct pci_driver i7300_driver = {
1163 .name = "i7300_edac", 1174 .name = "i7300_edac",
1164 .probe = i7300_init_one, 1175 .probe = i7300_init_one,
1165 .remove = i7300_remove_one, 1176 .remove = i7300_remove_one,
1166 .id_table = i7300_pci_tbl, 1177 .id_table = i7300_pci_tbl,
1167 }; 1178 };
1168 1179
1169 /** 1180 /**
1170 * i7300_init() - Registers the driver 1181 * i7300_init() - Registers the driver
1171 */ 1182 */
1172 static int __init i7300_init(void) 1183 static int __init i7300_init(void)
1173 { 1184 {
1174 int pci_rc; 1185 int pci_rc;
1175 1186
1176 edac_dbg(2, "\n"); 1187 edac_dbg(2, "\n");
1177 1188
1178 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1189 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1179 opstate_init(); 1190 opstate_init();
1180 1191
1181 pci_rc = pci_register_driver(&i7300_driver); 1192 pci_rc = pci_register_driver(&i7300_driver);
1182 1193
1183 return (pci_rc < 0) ? pci_rc : 0; 1194 return (pci_rc < 0) ? pci_rc : 0;
1184 } 1195 }
1185 1196
1186 /** 1197 /**
1187 * i7300_init() - Unregisters the driver 1198 * i7300_init() - Unregisters the driver
1188 */ 1199 */
1189 static void __exit i7300_exit(void) 1200 static void __exit i7300_exit(void)
1190 { 1201 {
1191 edac_dbg(2, "\n"); 1202 edac_dbg(2, "\n");
1192 pci_unregister_driver(&i7300_driver); 1203 pci_unregister_driver(&i7300_driver);
1193 } 1204 }
1194 1205
1195 module_init(i7300_init); 1206 module_init(i7300_init);
1196 module_exit(i7300_exit); 1207 module_exit(i7300_exit);
1197 1208
1198 MODULE_LICENSE("GPL"); 1209 MODULE_LICENSE("GPL");
1199 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); 1210 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
1200 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); 1211 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
1201 MODULE_DESCRIPTION("MC Driver for Intel I7300 memory controllers - " 1212 MODULE_DESCRIPTION("MC Driver for Intel I7300 memory controllers - "
1202 I7300_REVISION); 1213 I7300_REVISION);
1203 1214
1204 module_param(edac_op_state, int, 0444); 1215 module_param(edac_op_state, int, 0444);
1205 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 1216 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1206 1217
drivers/edac/sb_edac.c
1 /* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module 1 /* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module
2 * 2 *
3 * This driver supports the memory controllers found on the Intel 3 * This driver supports the memory controllers found on the Intel
4 * processor family Sandy Bridge. 4 * processor family Sandy Bridge.
5 * 5 *
6 * This file may be distributed under the terms of the 6 * This file may be distributed under the terms of the
7 * GNU General Public License version 2 only. 7 * GNU General Public License version 2 only.
8 * 8 *
9 * Copyright (c) 2011 by: 9 * Copyright (c) 2011 by:
10 * Mauro Carvalho Chehab <mchehab@redhat.com> 10 * Mauro Carvalho Chehab <mchehab@redhat.com>
11 */ 11 */
12 12
13 #include <linux/module.h> 13 #include <linux/module.h>
14 #include <linux/init.h> 14 #include <linux/init.h>
15 #include <linux/pci.h> 15 #include <linux/pci.h>
16 #include <linux/pci_ids.h> 16 #include <linux/pci_ids.h>
17 #include <linux/slab.h> 17 #include <linux/slab.h>
18 #include <linux/delay.h> 18 #include <linux/delay.h>
19 #include <linux/edac.h> 19 #include <linux/edac.h>
20 #include <linux/mmzone.h> 20 #include <linux/mmzone.h>
21 #include <linux/smp.h> 21 #include <linux/smp.h>
22 #include <linux/bitmap.h> 22 #include <linux/bitmap.h>
23 #include <linux/math64.h> 23 #include <linux/math64.h>
24 #include <asm/processor.h> 24 #include <asm/processor.h>
25 #include <asm/mce.h> 25 #include <asm/mce.h>
26 26
27 #include "edac_core.h" 27 #include "edac_core.h"
28 28
29 /* Static vars */ 29 /* Static vars */
30 static LIST_HEAD(sbridge_edac_list); 30 static LIST_HEAD(sbridge_edac_list);
31 static DEFINE_MUTEX(sbridge_edac_lock); 31 static DEFINE_MUTEX(sbridge_edac_lock);
32 static int probed; 32 static int probed;
33 33
34 /* 34 /*
35 * Alter this version for the module when modifications are made 35 * Alter this version for the module when modifications are made
36 */ 36 */
37 #define SBRIDGE_REVISION " Ver: 1.0.0 " 37 #define SBRIDGE_REVISION " Ver: 1.0.0 "
38 #define EDAC_MOD_STR "sbridge_edac" 38 #define EDAC_MOD_STR "sbridge_edac"
39 39
40 /* 40 /*
41 * Debug macros 41 * Debug macros
42 */ 42 */
43 #define sbridge_printk(level, fmt, arg...) \ 43 #define sbridge_printk(level, fmt, arg...) \
44 edac_printk(level, "sbridge", fmt, ##arg) 44 edac_printk(level, "sbridge", fmt, ##arg)
45 45
46 #define sbridge_mc_printk(mci, level, fmt, arg...) \ 46 #define sbridge_mc_printk(mci, level, fmt, arg...) \
47 edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg) 47 edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
48 48
49 /* 49 /*
50 * Get a bit field at register value <v>, from bit <lo> to bit <hi> 50 * Get a bit field at register value <v>, from bit <lo> to bit <hi>
51 */ 51 */
52 #define GET_BITFIELD(v, lo, hi) \ 52 #define GET_BITFIELD(v, lo, hi) \
53 (((v) & ((1ULL << ((hi) - (lo) + 1)) - 1) << (lo)) >> (lo)) 53 (((v) & ((1ULL << ((hi) - (lo) + 1)) - 1) << (lo)) >> (lo))
54 54
55 /* 55 /*
56 * sbridge Memory Controller Registers 56 * sbridge Memory Controller Registers
57 */ 57 */
58 58
59 /* 59 /*
60 * FIXME: For now, let's order by device function, as it makes 60 * FIXME: For now, let's order by device function, as it makes
61 * easier for driver's development process. This table should be 61 * easier for driver's development process. This table should be
62 * moved to pci_id.h when submitted upstream 62 * moved to pci_id.h when submitted upstream
63 */ 63 */
64 #define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */ 64 #define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */
65 #define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */ 65 #define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
66 #define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */ 66 #define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
67 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */ 67 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */
68 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */ 68 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */
69 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */ 69 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */
70 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */ 70 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */
71 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */ 71 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */
72 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */ 72 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */
73 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */ 73 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */
74 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */ 74 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */
75 75
76 /* 76 /*
77 * Currently, unused, but will be needed in the future 77 * Currently, unused, but will be needed in the future
78 * implementations, as they hold the error counters 78 * implementations, as they hold the error counters
79 */ 79 */
80 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */ 80 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */
81 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */ 81 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */
82 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */ 82 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */
83 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */ 83 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */
84 84
85 /* Devices 12 Function 6, Offsets 0x80 to 0xcc */ 85 /* Devices 12 Function 6, Offsets 0x80 to 0xcc */
86 static const u32 dram_rule[] = { 86 static const u32 dram_rule[] = {
87 0x80, 0x88, 0x90, 0x98, 0xa0, 87 0x80, 0x88, 0x90, 0x98, 0xa0,
88 0xa8, 0xb0, 0xb8, 0xc0, 0xc8, 88 0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
89 }; 89 };
90 #define MAX_SAD ARRAY_SIZE(dram_rule) 90 #define MAX_SAD ARRAY_SIZE(dram_rule)
91 91
92 #define SAD_LIMIT(reg) ((GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff) 92 #define SAD_LIMIT(reg) ((GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff)
93 #define DRAM_ATTR(reg) GET_BITFIELD(reg, 2, 3) 93 #define DRAM_ATTR(reg) GET_BITFIELD(reg, 2, 3)
94 #define INTERLEAVE_MODE(reg) GET_BITFIELD(reg, 1, 1) 94 #define INTERLEAVE_MODE(reg) GET_BITFIELD(reg, 1, 1)
95 #define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0) 95 #define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
96 96
97 static char *get_dram_attr(u32 reg) 97 static char *get_dram_attr(u32 reg)
98 { 98 {
99 switch(DRAM_ATTR(reg)) { 99 switch(DRAM_ATTR(reg)) {
100 case 0: 100 case 0:
101 return "DRAM"; 101 return "DRAM";
102 case 1: 102 case 1:
103 return "MMCFG"; 103 return "MMCFG";
104 case 2: 104 case 2:
105 return "NXM"; 105 return "NXM";
106 default: 106 default:
107 return "unknown"; 107 return "unknown";
108 } 108 }
109 } 109 }
110 110
111 static const u32 interleave_list[] = { 111 static const u32 interleave_list[] = {
112 0x84, 0x8c, 0x94, 0x9c, 0xa4, 112 0x84, 0x8c, 0x94, 0x9c, 0xa4,
113 0xac, 0xb4, 0xbc, 0xc4, 0xcc, 113 0xac, 0xb4, 0xbc, 0xc4, 0xcc,
114 }; 114 };
115 #define MAX_INTERLEAVE ARRAY_SIZE(interleave_list) 115 #define MAX_INTERLEAVE ARRAY_SIZE(interleave_list)
116 116
117 #define SAD_PKG0(reg) GET_BITFIELD(reg, 0, 2) 117 #define SAD_PKG0(reg) GET_BITFIELD(reg, 0, 2)
118 #define SAD_PKG1(reg) GET_BITFIELD(reg, 3, 5) 118 #define SAD_PKG1(reg) GET_BITFIELD(reg, 3, 5)
119 #define SAD_PKG2(reg) GET_BITFIELD(reg, 8, 10) 119 #define SAD_PKG2(reg) GET_BITFIELD(reg, 8, 10)
120 #define SAD_PKG3(reg) GET_BITFIELD(reg, 11, 13) 120 #define SAD_PKG3(reg) GET_BITFIELD(reg, 11, 13)
121 #define SAD_PKG4(reg) GET_BITFIELD(reg, 16, 18) 121 #define SAD_PKG4(reg) GET_BITFIELD(reg, 16, 18)
122 #define SAD_PKG5(reg) GET_BITFIELD(reg, 19, 21) 122 #define SAD_PKG5(reg) GET_BITFIELD(reg, 19, 21)
123 #define SAD_PKG6(reg) GET_BITFIELD(reg, 24, 26) 123 #define SAD_PKG6(reg) GET_BITFIELD(reg, 24, 26)
124 #define SAD_PKG7(reg) GET_BITFIELD(reg, 27, 29) 124 #define SAD_PKG7(reg) GET_BITFIELD(reg, 27, 29)
125 125
126 static inline int sad_pkg(u32 reg, int interleave) 126 static inline int sad_pkg(u32 reg, int interleave)
127 { 127 {
128 switch (interleave) { 128 switch (interleave) {
129 case 0: 129 case 0:
130 return SAD_PKG0(reg); 130 return SAD_PKG0(reg);
131 case 1: 131 case 1:
132 return SAD_PKG1(reg); 132 return SAD_PKG1(reg);
133 case 2: 133 case 2:
134 return SAD_PKG2(reg); 134 return SAD_PKG2(reg);
135 case 3: 135 case 3:
136 return SAD_PKG3(reg); 136 return SAD_PKG3(reg);
137 case 4: 137 case 4:
138 return SAD_PKG4(reg); 138 return SAD_PKG4(reg);
139 case 5: 139 case 5:
140 return SAD_PKG5(reg); 140 return SAD_PKG5(reg);
141 case 6: 141 case 6:
142 return SAD_PKG6(reg); 142 return SAD_PKG6(reg);
143 case 7: 143 case 7:
144 return SAD_PKG7(reg); 144 return SAD_PKG7(reg);
145 default: 145 default:
146 return -EINVAL; 146 return -EINVAL;
147 } 147 }
148 } 148 }
149 149
150 /* Devices 12 Function 7 */ 150 /* Devices 12 Function 7 */
151 151
152 #define TOLM 0x80 152 #define TOLM 0x80
153 #define TOHM 0x84 153 #define TOHM 0x84
154 154
155 #define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff) 155 #define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff)
156 #define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff) 156 #define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
157 157
158 /* Device 13 Function 6 */ 158 /* Device 13 Function 6 */
159 159
160 #define SAD_TARGET 0xf0 160 #define SAD_TARGET 0xf0
161 161
162 #define SOURCE_ID(reg) GET_BITFIELD(reg, 9, 11) 162 #define SOURCE_ID(reg) GET_BITFIELD(reg, 9, 11)
163 163
164 #define SAD_CONTROL 0xf4 164 #define SAD_CONTROL 0xf4
165 165
166 #define NODE_ID(reg) GET_BITFIELD(reg, 0, 2) 166 #define NODE_ID(reg) GET_BITFIELD(reg, 0, 2)
167 167
168 /* Device 14 function 0 */ 168 /* Device 14 function 0 */
169 169
170 static const u32 tad_dram_rule[] = { 170 static const u32 tad_dram_rule[] = {
171 0x40, 0x44, 0x48, 0x4c, 171 0x40, 0x44, 0x48, 0x4c,
172 0x50, 0x54, 0x58, 0x5c, 172 0x50, 0x54, 0x58, 0x5c,
173 0x60, 0x64, 0x68, 0x6c, 173 0x60, 0x64, 0x68, 0x6c,
174 }; 174 };
175 #define MAX_TAD ARRAY_SIZE(tad_dram_rule) 175 #define MAX_TAD ARRAY_SIZE(tad_dram_rule)
176 176
177 #define TAD_LIMIT(reg) ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff) 177 #define TAD_LIMIT(reg) ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
178 #define TAD_SOCK(reg) GET_BITFIELD(reg, 10, 11) 178 #define TAD_SOCK(reg) GET_BITFIELD(reg, 10, 11)
179 #define TAD_CH(reg) GET_BITFIELD(reg, 8, 9) 179 #define TAD_CH(reg) GET_BITFIELD(reg, 8, 9)
180 #define TAD_TGT3(reg) GET_BITFIELD(reg, 6, 7) 180 #define TAD_TGT3(reg) GET_BITFIELD(reg, 6, 7)
181 #define TAD_TGT2(reg) GET_BITFIELD(reg, 4, 5) 181 #define TAD_TGT2(reg) GET_BITFIELD(reg, 4, 5)
182 #define TAD_TGT1(reg) GET_BITFIELD(reg, 2, 3) 182 #define TAD_TGT1(reg) GET_BITFIELD(reg, 2, 3)
183 #define TAD_TGT0(reg) GET_BITFIELD(reg, 0, 1) 183 #define TAD_TGT0(reg) GET_BITFIELD(reg, 0, 1)
184 184
185 /* Device 15, function 0 */ 185 /* Device 15, function 0 */
186 186
187 #define MCMTR 0x7c 187 #define MCMTR 0x7c
188 188
189 #define IS_ECC_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 2, 2) 189 #define IS_ECC_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 2, 2)
190 #define IS_LOCKSTEP_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 1, 1) 190 #define IS_LOCKSTEP_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 1, 1)
191 #define IS_CLOSE_PG(mcmtr) GET_BITFIELD(mcmtr, 0, 0) 191 #define IS_CLOSE_PG(mcmtr) GET_BITFIELD(mcmtr, 0, 0)
192 192
193 /* Device 15, function 1 */ 193 /* Device 15, function 1 */
194 194
195 #define RASENABLES 0xac 195 #define RASENABLES 0xac
196 #define IS_MIRROR_ENABLED(reg) GET_BITFIELD(reg, 0, 0) 196 #define IS_MIRROR_ENABLED(reg) GET_BITFIELD(reg, 0, 0)
197 197
198 /* Device 15, functions 2-5 */ 198 /* Device 15, functions 2-5 */
199 199
200 static const int mtr_regs[] = { 200 static const int mtr_regs[] = {
201 0x80, 0x84, 0x88, 201 0x80, 0x84, 0x88,
202 }; 202 };
203 203
204 #define RANK_DISABLE(mtr) GET_BITFIELD(mtr, 16, 19) 204 #define RANK_DISABLE(mtr) GET_BITFIELD(mtr, 16, 19)
205 #define IS_DIMM_PRESENT(mtr) GET_BITFIELD(mtr, 14, 14) 205 #define IS_DIMM_PRESENT(mtr) GET_BITFIELD(mtr, 14, 14)
206 #define RANK_CNT_BITS(mtr) GET_BITFIELD(mtr, 12, 13) 206 #define RANK_CNT_BITS(mtr) GET_BITFIELD(mtr, 12, 13)
207 #define RANK_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 2, 4) 207 #define RANK_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 2, 4)
208 #define COL_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 0, 1) 208 #define COL_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 0, 1)
209 209
210 static const u32 tad_ch_nilv_offset[] = { 210 static const u32 tad_ch_nilv_offset[] = {
211 0x90, 0x94, 0x98, 0x9c, 211 0x90, 0x94, 0x98, 0x9c,
212 0xa0, 0xa4, 0xa8, 0xac, 212 0xa0, 0xa4, 0xa8, 0xac,
213 0xb0, 0xb4, 0xb8, 0xbc, 213 0xb0, 0xb4, 0xb8, 0xbc,
214 }; 214 };
215 #define CHN_IDX_OFFSET(reg) GET_BITFIELD(reg, 28, 29) 215 #define CHN_IDX_OFFSET(reg) GET_BITFIELD(reg, 28, 29)
216 #define TAD_OFFSET(reg) (GET_BITFIELD(reg, 6, 25) << 26) 216 #define TAD_OFFSET(reg) (GET_BITFIELD(reg, 6, 25) << 26)
217 217
218 static const u32 rir_way_limit[] = { 218 static const u32 rir_way_limit[] = {
219 0x108, 0x10c, 0x110, 0x114, 0x118, 219 0x108, 0x10c, 0x110, 0x114, 0x118,
220 }; 220 };
221 #define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit) 221 #define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
222 222
223 #define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31) 223 #define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31)
224 #define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29) 224 #define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29)
225 #define RIR_LIMIT(reg) ((GET_BITFIELD(reg, 1, 10) << 29)| 0x1fffffff) 225 #define RIR_LIMIT(reg) ((GET_BITFIELD(reg, 1, 10) << 29)| 0x1fffffff)
226 226
227 #define MAX_RIR_WAY 8 227 #define MAX_RIR_WAY 8
228 228
229 static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = { 229 static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
230 { 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c }, 230 { 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
231 { 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c }, 231 { 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
232 { 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c }, 232 { 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
233 { 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c }, 233 { 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
234 { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc }, 234 { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
235 }; 235 };
236 236
237 #define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19) 237 #define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19)
238 #define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14) 238 #define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14)
239 239
240 /* Device 16, functions 2-7 */ 240 /* Device 16, functions 2-7 */
241 241
242 /* 242 /*
243 * FIXME: Implement the error count reads directly 243 * FIXME: Implement the error count reads directly
244 */ 244 */
245 245
246 static const u32 correrrcnt[] = { 246 static const u32 correrrcnt[] = {
247 0x104, 0x108, 0x10c, 0x110, 247 0x104, 0x108, 0x10c, 0x110,
248 }; 248 };
249 249
250 #define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31) 250 #define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31)
251 #define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30) 251 #define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30)
252 #define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15) 252 #define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15)
253 #define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14) 253 #define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14)
254 254
255 static const u32 correrrthrsld[] = { 255 static const u32 correrrthrsld[] = {
256 0x11c, 0x120, 0x124, 0x128, 256 0x11c, 0x120, 0x124, 0x128,
257 }; 257 };
258 258
259 #define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30) 259 #define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30)
260 #define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14) 260 #define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14)
261 261
262 262
263 /* Device 17, function 0 */ 263 /* Device 17, function 0 */
264 264
265 #define RANK_CFG_A 0x0328 265 #define RANK_CFG_A 0x0328
266 266
267 #define IS_RDIMM_ENABLED(reg) GET_BITFIELD(reg, 11, 11) 267 #define IS_RDIMM_ENABLED(reg) GET_BITFIELD(reg, 11, 11)
268 268
269 /* 269 /*
270 * sbridge structs 270 * sbridge structs
271 */ 271 */
272 272
273 #define NUM_CHANNELS 4 273 #define NUM_CHANNELS 4
274 #define MAX_DIMMS 3 /* Max DIMMS per channel */ 274 #define MAX_DIMMS 3 /* Max DIMMS per channel */
275 275
276 struct sbridge_info { 276 struct sbridge_info {
277 u32 mcmtr; 277 u32 mcmtr;
278 }; 278 };
279 279
280 struct sbridge_channel { 280 struct sbridge_channel {
281 u32 ranks; 281 u32 ranks;
282 u32 dimms; 282 u32 dimms;
283 }; 283 };
284 284
285 struct pci_id_descr { 285 struct pci_id_descr {
286 int dev; 286 int dev;
287 int func; 287 int func;
288 int dev_id; 288 int dev_id;
289 int optional; 289 int optional;
290 }; 290 };
291 291
292 struct pci_id_table { 292 struct pci_id_table {
293 const struct pci_id_descr *descr; 293 const struct pci_id_descr *descr;
294 int n_devs; 294 int n_devs;
295 }; 295 };
296 296
297 struct sbridge_dev { 297 struct sbridge_dev {
298 struct list_head list; 298 struct list_head list;
299 u8 bus, mc; 299 u8 bus, mc;
300 u8 node_id, source_id; 300 u8 node_id, source_id;
301 struct pci_dev **pdev; 301 struct pci_dev **pdev;
302 int n_devs; 302 int n_devs;
303 struct mem_ctl_info *mci; 303 struct mem_ctl_info *mci;
304 }; 304 };
305 305
306 struct sbridge_pvt { 306 struct sbridge_pvt {
307 struct pci_dev *pci_ta, *pci_ddrio, *pci_ras; 307 struct pci_dev *pci_ta, *pci_ddrio, *pci_ras;
308 struct pci_dev *pci_sad0, *pci_sad1, *pci_ha0; 308 struct pci_dev *pci_sad0, *pci_sad1, *pci_ha0;
309 struct pci_dev *pci_br; 309 struct pci_dev *pci_br;
310 struct pci_dev *pci_tad[NUM_CHANNELS]; 310 struct pci_dev *pci_tad[NUM_CHANNELS];
311 311
312 struct sbridge_dev *sbridge_dev; 312 struct sbridge_dev *sbridge_dev;
313 313
314 struct sbridge_info info; 314 struct sbridge_info info;
315 struct sbridge_channel channel[NUM_CHANNELS]; 315 struct sbridge_channel channel[NUM_CHANNELS];
316 316
317 /* Memory type detection */ 317 /* Memory type detection */
318 bool is_mirrored, is_lockstep, is_close_pg; 318 bool is_mirrored, is_lockstep, is_close_pg;
319 319
320 /* Fifo double buffers */ 320 /* Fifo double buffers */
321 struct mce mce_entry[MCE_LOG_LEN]; 321 struct mce mce_entry[MCE_LOG_LEN];
322 struct mce mce_outentry[MCE_LOG_LEN]; 322 struct mce mce_outentry[MCE_LOG_LEN];
323 323
324 /* Fifo in/out counters */ 324 /* Fifo in/out counters */
325 unsigned mce_in, mce_out; 325 unsigned mce_in, mce_out;
326 326
327 /* Count indicator to show errors not got */ 327 /* Count indicator to show errors not got */
328 unsigned mce_overrun; 328 unsigned mce_overrun;
329 329
330 /* Memory description */ 330 /* Memory description */
331 u64 tolm, tohm; 331 u64 tolm, tohm;
332 }; 332 };
333 333
334 #define PCI_DESCR(device, function, device_id) \ 334 #define PCI_DESCR(device, function, device_id, opt) \
335 .dev = (device), \ 335 .dev = (device), \
336 .func = (function), \ 336 .func = (function), \
337 .dev_id = (device_id) 337 .dev_id = (device_id), \
338 .optional = opt
338 339
339 static const struct pci_id_descr pci_dev_descr_sbridge[] = { 340 static const struct pci_id_descr pci_dev_descr_sbridge[] = {
340 /* Processor Home Agent */ 341 /* Processor Home Agent */
341 { PCI_DESCR(14, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0) }, 342 { PCI_DESCR(14, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0) },
342 343
343 /* Memory controller */ 344 /* Memory controller */
344 { PCI_DESCR(15, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA) }, 345 { PCI_DESCR(15, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0) },
345 { PCI_DESCR(15, 1, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS) }, 346 { PCI_DESCR(15, 1, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0) },
346 { PCI_DESCR(15, 2, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0) }, 347 { PCI_DESCR(15, 2, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0) },
347 { PCI_DESCR(15, 3, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1) }, 348 { PCI_DESCR(15, 3, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0) },
348 { PCI_DESCR(15, 4, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2) }, 349 { PCI_DESCR(15, 4, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0) },
349 { PCI_DESCR(15, 5, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3) }, 350 { PCI_DESCR(15, 5, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0) },
350 { PCI_DESCR(17, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO) }, 351 { PCI_DESCR(17, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1) },
351 352
352 /* System Address Decoder */ 353 /* System Address Decoder */
353 { PCI_DESCR(12, 6, PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0) }, 354 { PCI_DESCR(12, 6, PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0) },
354 { PCI_DESCR(12, 7, PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1) }, 355 { PCI_DESCR(12, 7, PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0) },
355 356
356 /* Broadcast Registers */ 357 /* Broadcast Registers */
357 { PCI_DESCR(13, 6, PCI_DEVICE_ID_INTEL_SBRIDGE_BR) }, 358 { PCI_DESCR(13, 6, PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) },
358 }; 359 };
359 360
360 #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) } 361 #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
361 static const struct pci_id_table pci_dev_descr_sbridge_table[] = { 362 static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
362 PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge), 363 PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge),
363 {0,} /* 0 terminated list. */ 364 {0,} /* 0 terminated list. */
364 }; 365 };
365 366
366 /* 367 /*
367 * pci_device_id table for which devices we are looking for 368 * pci_device_id table for which devices we are looking for
368 */ 369 */
369 static DEFINE_PCI_DEVICE_TABLE(sbridge_pci_tbl) = { 370 static DEFINE_PCI_DEVICE_TABLE(sbridge_pci_tbl) = {
370 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)}, 371 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
371 {0,} /* 0 terminated list. */ 372 {0,} /* 0 terminated list. */
372 }; 373 };
373 374
374 375
375 /**************************************************************************** 376 /****************************************************************************
376 Ancillary status routines 377 Ancillary status routines
377 ****************************************************************************/ 378 ****************************************************************************/
378 379
379 static inline int numrank(u32 mtr) 380 static inline int numrank(u32 mtr)
380 { 381 {
381 int ranks = (1 << RANK_CNT_BITS(mtr)); 382 int ranks = (1 << RANK_CNT_BITS(mtr));
382 383
383 if (ranks > 4) { 384 if (ranks > 4) {
384 edac_dbg(0, "Invalid number of ranks: %d (max = 4) raw value = %x (%04x)\n", 385 edac_dbg(0, "Invalid number of ranks: %d (max = 4) raw value = %x (%04x)\n",
385 ranks, (unsigned int)RANK_CNT_BITS(mtr), mtr); 386 ranks, (unsigned int)RANK_CNT_BITS(mtr), mtr);
386 return -EINVAL; 387 return -EINVAL;
387 } 388 }
388 389
389 return ranks; 390 return ranks;
390 } 391 }
391 392
392 static inline int numrow(u32 mtr) 393 static inline int numrow(u32 mtr)
393 { 394 {
394 int rows = (RANK_WIDTH_BITS(mtr) + 12); 395 int rows = (RANK_WIDTH_BITS(mtr) + 12);
395 396
396 if (rows < 13 || rows > 18) { 397 if (rows < 13 || rows > 18) {
397 edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n", 398 edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
398 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr); 399 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
399 return -EINVAL; 400 return -EINVAL;
400 } 401 }
401 402
402 return 1 << rows; 403 return 1 << rows;
403 } 404 }
404 405
405 static inline int numcol(u32 mtr) 406 static inline int numcol(u32 mtr)
406 { 407 {
407 int cols = (COL_WIDTH_BITS(mtr) + 10); 408 int cols = (COL_WIDTH_BITS(mtr) + 10);
408 409
409 if (cols > 12) { 410 if (cols > 12) {
410 edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n", 411 edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
411 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr); 412 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
412 return -EINVAL; 413 return -EINVAL;
413 } 414 }
414 415
415 return 1 << cols; 416 return 1 << cols;
416 } 417 }
417 418
418 static struct sbridge_dev *get_sbridge_dev(u8 bus) 419 static struct sbridge_dev *get_sbridge_dev(u8 bus)
419 { 420 {
420 struct sbridge_dev *sbridge_dev; 421 struct sbridge_dev *sbridge_dev;
421 422
422 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { 423 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
423 if (sbridge_dev->bus == bus) 424 if (sbridge_dev->bus == bus)
424 return sbridge_dev; 425 return sbridge_dev;
425 } 426 }
426 427
427 return NULL; 428 return NULL;
428 } 429 }
429 430
430 static struct sbridge_dev *alloc_sbridge_dev(u8 bus, 431 static struct sbridge_dev *alloc_sbridge_dev(u8 bus,
431 const struct pci_id_table *table) 432 const struct pci_id_table *table)
432 { 433 {
433 struct sbridge_dev *sbridge_dev; 434 struct sbridge_dev *sbridge_dev;
434 435
435 sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL); 436 sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
436 if (!sbridge_dev) 437 if (!sbridge_dev)
437 return NULL; 438 return NULL;
438 439
439 sbridge_dev->pdev = kzalloc(sizeof(*sbridge_dev->pdev) * table->n_devs, 440 sbridge_dev->pdev = kzalloc(sizeof(*sbridge_dev->pdev) * table->n_devs,
440 GFP_KERNEL); 441 GFP_KERNEL);
441 if (!sbridge_dev->pdev) { 442 if (!sbridge_dev->pdev) {
442 kfree(sbridge_dev); 443 kfree(sbridge_dev);
443 return NULL; 444 return NULL;
444 } 445 }
445 446
446 sbridge_dev->bus = bus; 447 sbridge_dev->bus = bus;
447 sbridge_dev->n_devs = table->n_devs; 448 sbridge_dev->n_devs = table->n_devs;
448 list_add_tail(&sbridge_dev->list, &sbridge_edac_list); 449 list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
449 450
450 return sbridge_dev; 451 return sbridge_dev;
451 } 452 }
452 453
453 static void free_sbridge_dev(struct sbridge_dev *sbridge_dev) 454 static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
454 { 455 {
455 list_del(&sbridge_dev->list); 456 list_del(&sbridge_dev->list);
456 kfree(sbridge_dev->pdev); 457 kfree(sbridge_dev->pdev);
457 kfree(sbridge_dev); 458 kfree(sbridge_dev);
458 } 459 }
459 460
460 /**************************************************************************** 461 /****************************************************************************
461 Memory check routines 462 Memory check routines
462 ****************************************************************************/ 463 ****************************************************************************/
463 static struct pci_dev *get_pdev_slot_func(u8 bus, unsigned slot, 464 static struct pci_dev *get_pdev_slot_func(u8 bus, unsigned slot,
464 unsigned func) 465 unsigned func)
465 { 466 {
466 struct sbridge_dev *sbridge_dev = get_sbridge_dev(bus); 467 struct sbridge_dev *sbridge_dev = get_sbridge_dev(bus);
467 int i; 468 int i;
468 469
469 if (!sbridge_dev) 470 if (!sbridge_dev)
470 return NULL; 471 return NULL;
471 472
472 for (i = 0; i < sbridge_dev->n_devs; i++) { 473 for (i = 0; i < sbridge_dev->n_devs; i++) {
473 if (!sbridge_dev->pdev[i]) 474 if (!sbridge_dev->pdev[i])
474 continue; 475 continue;
475 476
476 if (PCI_SLOT(sbridge_dev->pdev[i]->devfn) == slot && 477 if (PCI_SLOT(sbridge_dev->pdev[i]->devfn) == slot &&
477 PCI_FUNC(sbridge_dev->pdev[i]->devfn) == func) { 478 PCI_FUNC(sbridge_dev->pdev[i]->devfn) == func) {
478 edac_dbg(1, "Associated %02x.%02x.%d with %p\n", 479 edac_dbg(1, "Associated %02x.%02x.%d with %p\n",
479 bus, slot, func, sbridge_dev->pdev[i]); 480 bus, slot, func, sbridge_dev->pdev[i]);
480 return sbridge_dev->pdev[i]; 481 return sbridge_dev->pdev[i];
481 } 482 }
482 } 483 }
483 484
484 return NULL; 485 return NULL;
485 } 486 }
486 487
487 /** 488 /**
488 * check_if_ecc_is_active() - Checks if ECC is active 489 * check_if_ecc_is_active() - Checks if ECC is active
489 * bus: Device bus 490 * bus: Device bus
490 */ 491 */
491 static int check_if_ecc_is_active(const u8 bus) 492 static int check_if_ecc_is_active(const u8 bus)
492 { 493 {
493 struct pci_dev *pdev = NULL; 494 struct pci_dev *pdev = NULL;
494 u32 mcmtr; 495 u32 mcmtr;
495 496
496 pdev = get_pdev_slot_func(bus, 15, 0); 497 pdev = get_pdev_slot_func(bus, 15, 0);
497 if (!pdev) { 498 if (!pdev) {
498 sbridge_printk(KERN_ERR, "Couldn't find PCI device " 499 sbridge_printk(KERN_ERR, "Couldn't find PCI device "
499 "%2x.%02d.%d!!!\n", 500 "%2x.%02d.%d!!!\n",
500 bus, 15, 0); 501 bus, 15, 0);
501 return -ENODEV; 502 return -ENODEV;
502 } 503 }
503 504
504 pci_read_config_dword(pdev, MCMTR, &mcmtr); 505 pci_read_config_dword(pdev, MCMTR, &mcmtr);
505 if (!IS_ECC_ENABLED(mcmtr)) { 506 if (!IS_ECC_ENABLED(mcmtr)) {
506 sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n"); 507 sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
507 return -ENODEV; 508 return -ENODEV;
508 } 509 }
509 return 0; 510 return 0;
510 } 511 }
511 512
512 static int get_dimm_config(struct mem_ctl_info *mci) 513 static int get_dimm_config(struct mem_ctl_info *mci)
513 { 514 {
514 struct sbridge_pvt *pvt = mci->pvt_info; 515 struct sbridge_pvt *pvt = mci->pvt_info;
515 struct dimm_info *dimm; 516 struct dimm_info *dimm;
516 unsigned i, j, banks, ranks, rows, cols, npages; 517 unsigned i, j, banks, ranks, rows, cols, npages;
517 u64 size; 518 u64 size;
518 u32 reg; 519 u32 reg;
519 enum edac_type mode; 520 enum edac_type mode;
520 enum mem_type mtype; 521 enum mem_type mtype;
521 522
522 pci_read_config_dword(pvt->pci_br, SAD_TARGET, &reg); 523 pci_read_config_dword(pvt->pci_br, SAD_TARGET, &reg);
523 pvt->sbridge_dev->source_id = SOURCE_ID(reg); 524 pvt->sbridge_dev->source_id = SOURCE_ID(reg);
524 525
525 pci_read_config_dword(pvt->pci_br, SAD_CONTROL, &reg); 526 pci_read_config_dword(pvt->pci_br, SAD_CONTROL, &reg);
526 pvt->sbridge_dev->node_id = NODE_ID(reg); 527 pvt->sbridge_dev->node_id = NODE_ID(reg);
527 edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n", 528 edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
528 pvt->sbridge_dev->mc, 529 pvt->sbridge_dev->mc,
529 pvt->sbridge_dev->node_id, 530 pvt->sbridge_dev->node_id,
530 pvt->sbridge_dev->source_id); 531 pvt->sbridge_dev->source_id);
531 532
532 pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg); 533 pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg);
533 if (IS_MIRROR_ENABLED(reg)) { 534 if (IS_MIRROR_ENABLED(reg)) {
534 edac_dbg(0, "Memory mirror is enabled\n"); 535 edac_dbg(0, "Memory mirror is enabled\n");
535 pvt->is_mirrored = true; 536 pvt->is_mirrored = true;
536 } else { 537 } else {
537 edac_dbg(0, "Memory mirror is disabled\n"); 538 edac_dbg(0, "Memory mirror is disabled\n");
538 pvt->is_mirrored = false; 539 pvt->is_mirrored = false;
539 } 540 }
540 541
541 pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr); 542 pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
542 if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) { 543 if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
543 edac_dbg(0, "Lockstep is enabled\n"); 544 edac_dbg(0, "Lockstep is enabled\n");
544 mode = EDAC_S8ECD8ED; 545 mode = EDAC_S8ECD8ED;
545 pvt->is_lockstep = true; 546 pvt->is_lockstep = true;
546 } else { 547 } else {
547 edac_dbg(0, "Lockstep is disabled\n"); 548 edac_dbg(0, "Lockstep is disabled\n");
548 mode = EDAC_S4ECD4ED; 549 mode = EDAC_S4ECD4ED;
549 pvt->is_lockstep = false; 550 pvt->is_lockstep = false;
550 } 551 }
551 if (IS_CLOSE_PG(pvt->info.mcmtr)) { 552 if (IS_CLOSE_PG(pvt->info.mcmtr)) {
552 edac_dbg(0, "address map is on closed page mode\n"); 553 edac_dbg(0, "address map is on closed page mode\n");
553 pvt->is_close_pg = true; 554 pvt->is_close_pg = true;
554 } else { 555 } else {
555 edac_dbg(0, "address map is on open page mode\n"); 556 edac_dbg(0, "address map is on open page mode\n");
556 pvt->is_close_pg = false; 557 pvt->is_close_pg = false;
557 } 558 }
558 559
559 pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, &reg); 560 if (pvt->pci_ddrio) {
560 if (IS_RDIMM_ENABLED(reg)) { 561 pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, &reg);
561 /* FIXME: Can also be LRDIMM */ 562 if (IS_RDIMM_ENABLED(reg)) {
562 edac_dbg(0, "Memory is registered\n"); 563 /* FIXME: Can also be LRDIMM */
563 mtype = MEM_RDDR3; 564 edac_dbg(0, "Memory is registered\n");
565 mtype = MEM_RDDR3;
566 } else {
567 edac_dbg(0, "Memory is unregistered\n");
568 mtype = MEM_DDR3;
569 }
564 } else { 570 } else {
565 edac_dbg(0, "Memory is unregistered\n"); 571 edac_dbg(0, "Cannot determine memory type\n");
566 mtype = MEM_DDR3; 572 mtype = MEM_UNKNOWN;
567 } 573 }
568 574
569 /* On all supported DDR3 DIMM types, there are 8 banks available */ 575 /* On all supported DDR3 DIMM types, there are 8 banks available */
570 banks = 8; 576 banks = 8;
571 577
572 for (i = 0; i < NUM_CHANNELS; i++) { 578 for (i = 0; i < NUM_CHANNELS; i++) {
573 u32 mtr; 579 u32 mtr;
574 580
575 for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) { 581 for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
576 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, 582 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
577 i, j, 0); 583 i, j, 0);
578 pci_read_config_dword(pvt->pci_tad[i], 584 pci_read_config_dword(pvt->pci_tad[i],
579 mtr_regs[j], &mtr); 585 mtr_regs[j], &mtr);
580 edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr); 586 edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr);
581 if (IS_DIMM_PRESENT(mtr)) { 587 if (IS_DIMM_PRESENT(mtr)) {
582 pvt->channel[i].dimms++; 588 pvt->channel[i].dimms++;
583 589
584 ranks = numrank(mtr); 590 ranks = numrank(mtr);
585 rows = numrow(mtr); 591 rows = numrow(mtr);
586 cols = numcol(mtr); 592 cols = numcol(mtr);
587 593
588 /* DDR3 has 8 I/O banks */ 594 /* DDR3 has 8 I/O banks */
589 size = ((u64)rows * cols * banks * ranks) >> (20 - 3); 595 size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
590 npages = MiB_TO_PAGES(size); 596 npages = MiB_TO_PAGES(size);
591 597
592 edac_dbg(0, "mc#%d: channel %d, dimm %d, %Ld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", 598 edac_dbg(0, "mc#%d: channel %d, dimm %d, %Ld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
593 pvt->sbridge_dev->mc, i, j, 599 pvt->sbridge_dev->mc, i, j,
594 size, npages, 600 size, npages,
595 banks, ranks, rows, cols); 601 banks, ranks, rows, cols);
596 602
597 dimm->nr_pages = npages; 603 dimm->nr_pages = npages;
598 dimm->grain = 32; 604 dimm->grain = 32;
599 dimm->dtype = (banks == 8) ? DEV_X8 : DEV_X4; 605 dimm->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
600 dimm->mtype = mtype; 606 dimm->mtype = mtype;
601 dimm->edac_mode = mode; 607 dimm->edac_mode = mode;
602 snprintf(dimm->label, sizeof(dimm->label), 608 snprintf(dimm->label, sizeof(dimm->label),
603 "CPU_SrcID#%u_Channel#%u_DIMM#%u", 609 "CPU_SrcID#%u_Channel#%u_DIMM#%u",
604 pvt->sbridge_dev->source_id, i, j); 610 pvt->sbridge_dev->source_id, i, j);
605 } 611 }
606 } 612 }
607 } 613 }
608 614
609 return 0; 615 return 0;
610 } 616 }
611 617
612 static void get_memory_layout(const struct mem_ctl_info *mci) 618 static void get_memory_layout(const struct mem_ctl_info *mci)
613 { 619 {
614 struct sbridge_pvt *pvt = mci->pvt_info; 620 struct sbridge_pvt *pvt = mci->pvt_info;
615 int i, j, k, n_sads, n_tads, sad_interl; 621 int i, j, k, n_sads, n_tads, sad_interl;
616 u32 reg; 622 u32 reg;
617 u64 limit, prv = 0; 623 u64 limit, prv = 0;
618 u64 tmp_mb; 624 u64 tmp_mb;
619 u32 mb, kb; 625 u32 mb, kb;
620 u32 rir_way; 626 u32 rir_way;
621 627
622 /* 628 /*
623 * Step 1) Get TOLM/TOHM ranges 629 * Step 1) Get TOLM/TOHM ranges
624 */ 630 */
625 631
626 /* Address range is 32:28 */ 632 /* Address range is 32:28 */
627 pci_read_config_dword(pvt->pci_sad1, TOLM, 633 pci_read_config_dword(pvt->pci_sad1, TOLM,
628 &reg); 634 &reg);
629 pvt->tolm = GET_TOLM(reg); 635 pvt->tolm = GET_TOLM(reg);
630 tmp_mb = (1 + pvt->tolm) >> 20; 636 tmp_mb = (1 + pvt->tolm) >> 20;
631 637
632 mb = div_u64_rem(tmp_mb, 1000, &kb); 638 mb = div_u64_rem(tmp_mb, 1000, &kb);
633 edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm); 639 edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
634 640
635 /* Address range is already 45:25 */ 641 /* Address range is already 45:25 */
636 pci_read_config_dword(pvt->pci_sad1, TOHM, 642 pci_read_config_dword(pvt->pci_sad1, TOHM,
637 &reg); 643 &reg);
638 pvt->tohm = GET_TOHM(reg); 644 pvt->tohm = GET_TOHM(reg);
639 tmp_mb = (1 + pvt->tohm) >> 20; 645 tmp_mb = (1 + pvt->tohm) >> 20;
640 646
641 mb = div_u64_rem(tmp_mb, 1000, &kb); 647 mb = div_u64_rem(tmp_mb, 1000, &kb);
642 edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm); 648 edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm);
643 649
644 /* 650 /*
645 * Step 2) Get SAD range and SAD Interleave list 651 * Step 2) Get SAD range and SAD Interleave list
646 * TAD registers contain the interleave wayness. However, it 652 * TAD registers contain the interleave wayness. However, it
647 * seems simpler to just discover it indirectly, with the 653 * seems simpler to just discover it indirectly, with the
648 * algorithm bellow. 654 * algorithm bellow.
649 */ 655 */
650 prv = 0; 656 prv = 0;
651 for (n_sads = 0; n_sads < MAX_SAD; n_sads++) { 657 for (n_sads = 0; n_sads < MAX_SAD; n_sads++) {
652 /* SAD_LIMIT Address range is 45:26 */ 658 /* SAD_LIMIT Address range is 45:26 */
653 pci_read_config_dword(pvt->pci_sad0, dram_rule[n_sads], 659 pci_read_config_dword(pvt->pci_sad0, dram_rule[n_sads],
654 &reg); 660 &reg);
655 limit = SAD_LIMIT(reg); 661 limit = SAD_LIMIT(reg);
656 662
657 if (!DRAM_RULE_ENABLE(reg)) 663 if (!DRAM_RULE_ENABLE(reg))
658 continue; 664 continue;
659 665
660 if (limit <= prv) 666 if (limit <= prv)
661 break; 667 break;
662 668
663 tmp_mb = (limit + 1) >> 20; 669 tmp_mb = (limit + 1) >> 20;
664 mb = div_u64_rem(tmp_mb, 1000, &kb); 670 mb = div_u64_rem(tmp_mb, 1000, &kb);
665 edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n", 671 edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
666 n_sads, 672 n_sads,
667 get_dram_attr(reg), 673 get_dram_attr(reg),
668 mb, kb, 674 mb, kb,
669 ((u64)tmp_mb) << 20L, 675 ((u64)tmp_mb) << 20L,
670 INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]", 676 INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
671 reg); 677 reg);
672 prv = limit; 678 prv = limit;
673 679
674 pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads], 680 pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
675 &reg); 681 &reg);
676 sad_interl = sad_pkg(reg, 0); 682 sad_interl = sad_pkg(reg, 0);
677 for (j = 0; j < 8; j++) { 683 for (j = 0; j < 8; j++) {
678 if (j > 0 && sad_interl == sad_pkg(reg, j)) 684 if (j > 0 && sad_interl == sad_pkg(reg, j))
679 break; 685 break;
680 686
681 edac_dbg(0, "SAD#%d, interleave #%d: %d\n", 687 edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
682 n_sads, j, sad_pkg(reg, j)); 688 n_sads, j, sad_pkg(reg, j));
683 } 689 }
684 } 690 }
685 691
686 /* 692 /*
687 * Step 3) Get TAD range 693 * Step 3) Get TAD range
688 */ 694 */
689 prv = 0; 695 prv = 0;
690 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) { 696 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
691 pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads], 697 pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads],
692 &reg); 698 &reg);
693 limit = TAD_LIMIT(reg); 699 limit = TAD_LIMIT(reg);
694 if (limit <= prv) 700 if (limit <= prv)
695 break; 701 break;
696 tmp_mb = (limit + 1) >> 20; 702 tmp_mb = (limit + 1) >> 20;
697 703
698 mb = div_u64_rem(tmp_mb, 1000, &kb); 704 mb = div_u64_rem(tmp_mb, 1000, &kb);
699 edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n", 705 edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
700 n_tads, mb, kb, 706 n_tads, mb, kb,
701 ((u64)tmp_mb) << 20L, 707 ((u64)tmp_mb) << 20L,
702 (u32)TAD_SOCK(reg), 708 (u32)TAD_SOCK(reg),
703 (u32)TAD_CH(reg), 709 (u32)TAD_CH(reg),
704 (u32)TAD_TGT0(reg), 710 (u32)TAD_TGT0(reg),
705 (u32)TAD_TGT1(reg), 711 (u32)TAD_TGT1(reg),
706 (u32)TAD_TGT2(reg), 712 (u32)TAD_TGT2(reg),
707 (u32)TAD_TGT3(reg), 713 (u32)TAD_TGT3(reg),
708 reg); 714 reg);
709 prv = limit; 715 prv = limit;
710 } 716 }
711 717
712 /* 718 /*
713 * Step 4) Get TAD offsets, per each channel 719 * Step 4) Get TAD offsets, per each channel
714 */ 720 */
715 for (i = 0; i < NUM_CHANNELS; i++) { 721 for (i = 0; i < NUM_CHANNELS; i++) {
716 if (!pvt->channel[i].dimms) 722 if (!pvt->channel[i].dimms)
717 continue; 723 continue;
718 for (j = 0; j < n_tads; j++) { 724 for (j = 0; j < n_tads; j++) {
719 pci_read_config_dword(pvt->pci_tad[i], 725 pci_read_config_dword(pvt->pci_tad[i],
720 tad_ch_nilv_offset[j], 726 tad_ch_nilv_offset[j],
721 &reg); 727 &reg);
722 tmp_mb = TAD_OFFSET(reg) >> 20; 728 tmp_mb = TAD_OFFSET(reg) >> 20;
723 mb = div_u64_rem(tmp_mb, 1000, &kb); 729 mb = div_u64_rem(tmp_mb, 1000, &kb);
724 edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n", 730 edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
725 i, j, 731 i, j,
726 mb, kb, 732 mb, kb,
727 ((u64)tmp_mb) << 20L, 733 ((u64)tmp_mb) << 20L,
728 reg); 734 reg);
729 } 735 }
730 } 736 }
731 737
732 /* 738 /*
733 * Step 6) Get RIR Wayness/Limit, per each channel 739 * Step 6) Get RIR Wayness/Limit, per each channel
734 */ 740 */
735 for (i = 0; i < NUM_CHANNELS; i++) { 741 for (i = 0; i < NUM_CHANNELS; i++) {
736 if (!pvt->channel[i].dimms) 742 if (!pvt->channel[i].dimms)
737 continue; 743 continue;
738 for (j = 0; j < MAX_RIR_RANGES; j++) { 744 for (j = 0; j < MAX_RIR_RANGES; j++) {
739 pci_read_config_dword(pvt->pci_tad[i], 745 pci_read_config_dword(pvt->pci_tad[i],
740 rir_way_limit[j], 746 rir_way_limit[j],
741 &reg); 747 &reg);
742 748
743 if (!IS_RIR_VALID(reg)) 749 if (!IS_RIR_VALID(reg))
744 continue; 750 continue;
745 751
746 tmp_mb = RIR_LIMIT(reg) >> 20; 752 tmp_mb = RIR_LIMIT(reg) >> 20;
747 rir_way = 1 << RIR_WAY(reg); 753 rir_way = 1 << RIR_WAY(reg);
748 mb = div_u64_rem(tmp_mb, 1000, &kb); 754 mb = div_u64_rem(tmp_mb, 1000, &kb);
749 edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n", 755 edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
750 i, j, 756 i, j,
751 mb, kb, 757 mb, kb,
752 ((u64)tmp_mb) << 20L, 758 ((u64)tmp_mb) << 20L,
753 rir_way, 759 rir_way,
754 reg); 760 reg);
755 761
756 for (k = 0; k < rir_way; k++) { 762 for (k = 0; k < rir_way; k++) {
757 pci_read_config_dword(pvt->pci_tad[i], 763 pci_read_config_dword(pvt->pci_tad[i],
758 rir_offset[j][k], 764 rir_offset[j][k],
759 &reg); 765 &reg);
760 tmp_mb = RIR_OFFSET(reg) << 6; 766 tmp_mb = RIR_OFFSET(reg) << 6;
761 767
762 mb = div_u64_rem(tmp_mb, 1000, &kb); 768 mb = div_u64_rem(tmp_mb, 1000, &kb);
763 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", 769 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
764 i, j, k, 770 i, j, k,
765 mb, kb, 771 mb, kb,
766 ((u64)tmp_mb) << 20L, 772 ((u64)tmp_mb) << 20L,
767 (u32)RIR_RNK_TGT(reg), 773 (u32)RIR_RNK_TGT(reg),
768 reg); 774 reg);
769 } 775 }
770 } 776 }
771 } 777 }
772 } 778 }
773 779
774 struct mem_ctl_info *get_mci_for_node_id(u8 node_id) 780 struct mem_ctl_info *get_mci_for_node_id(u8 node_id)
775 { 781 {
776 struct sbridge_dev *sbridge_dev; 782 struct sbridge_dev *sbridge_dev;
777 783
778 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { 784 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
779 if (sbridge_dev->node_id == node_id) 785 if (sbridge_dev->node_id == node_id)
780 return sbridge_dev->mci; 786 return sbridge_dev->mci;
781 } 787 }
782 return NULL; 788 return NULL;
783 } 789 }
784 790
785 static int get_memory_error_data(struct mem_ctl_info *mci, 791 static int get_memory_error_data(struct mem_ctl_info *mci,
786 u64 addr, 792 u64 addr,
787 u8 *socket, 793 u8 *socket,
788 long *channel_mask, 794 long *channel_mask,
789 u8 *rank, 795 u8 *rank,
790 char **area_type, char *msg) 796 char **area_type, char *msg)
791 { 797 {
792 struct mem_ctl_info *new_mci; 798 struct mem_ctl_info *new_mci;
793 struct sbridge_pvt *pvt = mci->pvt_info; 799 struct sbridge_pvt *pvt = mci->pvt_info;
794 int n_rir, n_sads, n_tads, sad_way, sck_xch; 800 int n_rir, n_sads, n_tads, sad_way, sck_xch;
795 int sad_interl, idx, base_ch; 801 int sad_interl, idx, base_ch;
796 int interleave_mode; 802 int interleave_mode;
797 unsigned sad_interleave[MAX_INTERLEAVE]; 803 unsigned sad_interleave[MAX_INTERLEAVE];
798 u32 reg; 804 u32 reg;
799 u8 ch_way,sck_way; 805 u8 ch_way,sck_way;
800 u32 tad_offset; 806 u32 tad_offset;
801 u32 rir_way; 807 u32 rir_way;
802 u32 mb, kb; 808 u32 mb, kb;
803 u64 ch_addr, offset, limit, prv = 0; 809 u64 ch_addr, offset, limit, prv = 0;
804 810
805 811
806 /* 812 /*
807 * Step 0) Check if the address is at special memory ranges 813 * Step 0) Check if the address is at special memory ranges
808 * The check bellow is probably enough to fill all cases where 814 * The check bellow is probably enough to fill all cases where
809 * the error is not inside a memory, except for the legacy 815 * the error is not inside a memory, except for the legacy
810 * range (e. g. VGA addresses). It is unlikely, however, that the 816 * range (e. g. VGA addresses). It is unlikely, however, that the
811 * memory controller would generate an error on that range. 817 * memory controller would generate an error on that range.
812 */ 818 */
813 if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) { 819 if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
814 sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr); 820 sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
815 return -EINVAL; 821 return -EINVAL;
816 } 822 }
817 if (addr >= (u64)pvt->tohm) { 823 if (addr >= (u64)pvt->tohm) {
818 sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr); 824 sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
819 return -EINVAL; 825 return -EINVAL;
820 } 826 }
821 827
822 /* 828 /*
823 * Step 1) Get socket 829 * Step 1) Get socket
824 */ 830 */
825 for (n_sads = 0; n_sads < MAX_SAD; n_sads++) { 831 for (n_sads = 0; n_sads < MAX_SAD; n_sads++) {
826 pci_read_config_dword(pvt->pci_sad0, dram_rule[n_sads], 832 pci_read_config_dword(pvt->pci_sad0, dram_rule[n_sads],
827 &reg); 833 &reg);
828 834
829 if (!DRAM_RULE_ENABLE(reg)) 835 if (!DRAM_RULE_ENABLE(reg))
830 continue; 836 continue;
831 837
832 limit = SAD_LIMIT(reg); 838 limit = SAD_LIMIT(reg);
833 if (limit <= prv) { 839 if (limit <= prv) {
834 sprintf(msg, "Can't discover the memory socket"); 840 sprintf(msg, "Can't discover the memory socket");
835 return -EINVAL; 841 return -EINVAL;
836 } 842 }
837 if (addr <= limit) 843 if (addr <= limit)
838 break; 844 break;
839 prv = limit; 845 prv = limit;
840 } 846 }
841 if (n_sads == MAX_SAD) { 847 if (n_sads == MAX_SAD) {
842 sprintf(msg, "Can't discover the memory socket"); 848 sprintf(msg, "Can't discover the memory socket");
843 return -EINVAL; 849 return -EINVAL;
844 } 850 }
845 *area_type = get_dram_attr(reg); 851 *area_type = get_dram_attr(reg);
846 interleave_mode = INTERLEAVE_MODE(reg); 852 interleave_mode = INTERLEAVE_MODE(reg);
847 853
848 pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads], 854 pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
849 &reg); 855 &reg);
850 sad_interl = sad_pkg(reg, 0); 856 sad_interl = sad_pkg(reg, 0);
851 for (sad_way = 0; sad_way < 8; sad_way++) { 857 for (sad_way = 0; sad_way < 8; sad_way++) {
852 if (sad_way > 0 && sad_interl == sad_pkg(reg, sad_way)) 858 if (sad_way > 0 && sad_interl == sad_pkg(reg, sad_way))
853 break; 859 break;
854 sad_interleave[sad_way] = sad_pkg(reg, sad_way); 860 sad_interleave[sad_way] = sad_pkg(reg, sad_way);
855 edac_dbg(0, "SAD interleave #%d: %d\n", 861 edac_dbg(0, "SAD interleave #%d: %d\n",
856 sad_way, sad_interleave[sad_way]); 862 sad_way, sad_interleave[sad_way]);
857 } 863 }
858 edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n", 864 edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
859 pvt->sbridge_dev->mc, 865 pvt->sbridge_dev->mc,
860 n_sads, 866 n_sads,
861 addr, 867 addr,
862 limit, 868 limit,
863 sad_way + 7, 869 sad_way + 7,
864 interleave_mode ? "" : "XOR[18:16]"); 870 interleave_mode ? "" : "XOR[18:16]");
865 if (interleave_mode) 871 if (interleave_mode)
866 idx = ((addr >> 6) ^ (addr >> 16)) & 7; 872 idx = ((addr >> 6) ^ (addr >> 16)) & 7;
867 else 873 else
868 idx = (addr >> 6) & 7; 874 idx = (addr >> 6) & 7;
869 switch (sad_way) { 875 switch (sad_way) {
870 case 1: 876 case 1:
871 idx = 0; 877 idx = 0;
872 break; 878 break;
873 case 2: 879 case 2:
874 idx = idx & 1; 880 idx = idx & 1;
875 break; 881 break;
876 case 4: 882 case 4:
877 idx = idx & 3; 883 idx = idx & 3;
878 break; 884 break;
879 case 8: 885 case 8:
880 break; 886 break;
881 default: 887 default:
882 sprintf(msg, "Can't discover socket interleave"); 888 sprintf(msg, "Can't discover socket interleave");
883 return -EINVAL; 889 return -EINVAL;
884 } 890 }
885 *socket = sad_interleave[idx]; 891 *socket = sad_interleave[idx];
886 edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n", 892 edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
887 idx, sad_way, *socket); 893 idx, sad_way, *socket);
888 894
889 /* 895 /*
890 * Move to the proper node structure, in order to access the 896 * Move to the proper node structure, in order to access the
891 * right PCI registers 897 * right PCI registers
892 */ 898 */
893 new_mci = get_mci_for_node_id(*socket); 899 new_mci = get_mci_for_node_id(*socket);
894 if (!new_mci) { 900 if (!new_mci) {
895 sprintf(msg, "Struct for socket #%u wasn't initialized", 901 sprintf(msg, "Struct for socket #%u wasn't initialized",
896 *socket); 902 *socket);
897 return -EINVAL; 903 return -EINVAL;
898 } 904 }
899 mci = new_mci; 905 mci = new_mci;
900 pvt = mci->pvt_info; 906 pvt = mci->pvt_info;
901 907
902 /* 908 /*
903 * Step 2) Get memory channel 909 * Step 2) Get memory channel
904 */ 910 */
905 prv = 0; 911 prv = 0;
906 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) { 912 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
907 pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads], 913 pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads],
908 &reg); 914 &reg);
909 limit = TAD_LIMIT(reg); 915 limit = TAD_LIMIT(reg);
910 if (limit <= prv) { 916 if (limit <= prv) {
911 sprintf(msg, "Can't discover the memory channel"); 917 sprintf(msg, "Can't discover the memory channel");
912 return -EINVAL; 918 return -EINVAL;
913 } 919 }
914 if (addr <= limit) 920 if (addr <= limit)
915 break; 921 break;
916 prv = limit; 922 prv = limit;
917 } 923 }
918 ch_way = TAD_CH(reg) + 1; 924 ch_way = TAD_CH(reg) + 1;
919 sck_way = TAD_SOCK(reg) + 1; 925 sck_way = TAD_SOCK(reg) + 1;
920 /* 926 /*
921 * FIXME: Is it right to always use channel 0 for offsets? 927 * FIXME: Is it right to always use channel 0 for offsets?
922 */ 928 */
923 pci_read_config_dword(pvt->pci_tad[0], 929 pci_read_config_dword(pvt->pci_tad[0],
924 tad_ch_nilv_offset[n_tads], 930 tad_ch_nilv_offset[n_tads],
925 &tad_offset); 931 &tad_offset);
926 932
927 if (ch_way == 3) 933 if (ch_way == 3)
928 idx = addr >> 6; 934 idx = addr >> 6;
929 else 935 else
930 idx = addr >> (6 + sck_way); 936 idx = addr >> (6 + sck_way);
931 idx = idx % ch_way; 937 idx = idx % ch_way;
932 938
933 /* 939 /*
934 * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ??? 940 * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ???
935 */ 941 */
936 switch (idx) { 942 switch (idx) {
937 case 0: 943 case 0:
938 base_ch = TAD_TGT0(reg); 944 base_ch = TAD_TGT0(reg);
939 break; 945 break;
940 case 1: 946 case 1:
941 base_ch = TAD_TGT1(reg); 947 base_ch = TAD_TGT1(reg);
942 break; 948 break;
943 case 2: 949 case 2:
944 base_ch = TAD_TGT2(reg); 950 base_ch = TAD_TGT2(reg);
945 break; 951 break;
946 case 3: 952 case 3:
947 base_ch = TAD_TGT3(reg); 953 base_ch = TAD_TGT3(reg);
948 break; 954 break;
949 default: 955 default:
950 sprintf(msg, "Can't discover the TAD target"); 956 sprintf(msg, "Can't discover the TAD target");
951 return -EINVAL; 957 return -EINVAL;
952 } 958 }
953 *channel_mask = 1 << base_ch; 959 *channel_mask = 1 << base_ch;
954 960
955 if (pvt->is_mirrored) { 961 if (pvt->is_mirrored) {
956 *channel_mask |= 1 << ((base_ch + 2) % 4); 962 *channel_mask |= 1 << ((base_ch + 2) % 4);
957 switch(ch_way) { 963 switch(ch_way) {
958 case 2: 964 case 2:
959 case 4: 965 case 4:
960 sck_xch = 1 << sck_way * (ch_way >> 1); 966 sck_xch = 1 << sck_way * (ch_way >> 1);
961 break; 967 break;
962 default: 968 default:
963 sprintf(msg, "Invalid mirror set. Can't decode addr"); 969 sprintf(msg, "Invalid mirror set. Can't decode addr");
964 return -EINVAL; 970 return -EINVAL;
965 } 971 }
966 } else 972 } else
967 sck_xch = (1 << sck_way) * ch_way; 973 sck_xch = (1 << sck_way) * ch_way;
968 974
969 if (pvt->is_lockstep) 975 if (pvt->is_lockstep)
970 *channel_mask |= 1 << ((base_ch + 1) % 4); 976 *channel_mask |= 1 << ((base_ch + 1) % 4);
971 977
972 offset = TAD_OFFSET(tad_offset); 978 offset = TAD_OFFSET(tad_offset);
973 979
974 edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n", 980 edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
975 n_tads, 981 n_tads,
976 addr, 982 addr,
977 limit, 983 limit,
978 (u32)TAD_SOCK(reg), 984 (u32)TAD_SOCK(reg),
979 ch_way, 985 ch_way,
980 offset, 986 offset,
981 idx, 987 idx,
982 base_ch, 988 base_ch,
983 *channel_mask); 989 *channel_mask);
984 990
985 /* Calculate channel address */ 991 /* Calculate channel address */
986 /* Remove the TAD offset */ 992 /* Remove the TAD offset */
987 993
988 if (offset > addr) { 994 if (offset > addr) {
989 sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!", 995 sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
990 offset, addr); 996 offset, addr);
991 return -EINVAL; 997 return -EINVAL;
992 } 998 }
993 addr -= offset; 999 addr -= offset;
994 /* Store the low bits [0:6] of the addr */ 1000 /* Store the low bits [0:6] of the addr */
995 ch_addr = addr & 0x7f; 1001 ch_addr = addr & 0x7f;
996 /* Remove socket wayness and remove 6 bits */ 1002 /* Remove socket wayness and remove 6 bits */
997 addr >>= 6; 1003 addr >>= 6;
998 addr = div_u64(addr, sck_xch); 1004 addr = div_u64(addr, sck_xch);
999 #if 0 1005 #if 0
1000 /* Divide by channel way */ 1006 /* Divide by channel way */
1001 addr = addr / ch_way; 1007 addr = addr / ch_way;
1002 #endif 1008 #endif
1003 /* Recover the last 6 bits */ 1009 /* Recover the last 6 bits */
1004 ch_addr |= addr << 6; 1010 ch_addr |= addr << 6;
1005 1011
1006 /* 1012 /*
1007 * Step 3) Decode rank 1013 * Step 3) Decode rank
1008 */ 1014 */
1009 for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) { 1015 for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
1010 pci_read_config_dword(pvt->pci_tad[base_ch], 1016 pci_read_config_dword(pvt->pci_tad[base_ch],
1011 rir_way_limit[n_rir], 1017 rir_way_limit[n_rir],
1012 &reg); 1018 &reg);
1013 1019
1014 if (!IS_RIR_VALID(reg)) 1020 if (!IS_RIR_VALID(reg))
1015 continue; 1021 continue;
1016 1022
1017 limit = RIR_LIMIT(reg); 1023 limit = RIR_LIMIT(reg);
1018 mb = div_u64_rem(limit >> 20, 1000, &kb); 1024 mb = div_u64_rem(limit >> 20, 1000, &kb);
1019 edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n", 1025 edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
1020 n_rir, 1026 n_rir,
1021 mb, kb, 1027 mb, kb,
1022 limit, 1028 limit,
1023 1 << RIR_WAY(reg)); 1029 1 << RIR_WAY(reg));
1024 if (ch_addr <= limit) 1030 if (ch_addr <= limit)
1025 break; 1031 break;
1026 } 1032 }
1027 if (n_rir == MAX_RIR_RANGES) { 1033 if (n_rir == MAX_RIR_RANGES) {
1028 sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx", 1034 sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
1029 ch_addr); 1035 ch_addr);
1030 return -EINVAL; 1036 return -EINVAL;
1031 } 1037 }
1032 rir_way = RIR_WAY(reg); 1038 rir_way = RIR_WAY(reg);
1033 if (pvt->is_close_pg) 1039 if (pvt->is_close_pg)
1034 idx = (ch_addr >> 6); 1040 idx = (ch_addr >> 6);
1035 else 1041 else
1036 idx = (ch_addr >> 13); /* FIXME: Datasheet says to shift by 15 */ 1042 idx = (ch_addr >> 13); /* FIXME: Datasheet says to shift by 15 */
1037 idx %= 1 << rir_way; 1043 idx %= 1 << rir_way;
1038 1044
1039 pci_read_config_dword(pvt->pci_tad[base_ch], 1045 pci_read_config_dword(pvt->pci_tad[base_ch],
1040 rir_offset[n_rir][idx], 1046 rir_offset[n_rir][idx],
1041 &reg); 1047 &reg);
1042 *rank = RIR_RNK_TGT(reg); 1048 *rank = RIR_RNK_TGT(reg);
1043 1049
1044 edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", 1050 edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
1045 n_rir, 1051 n_rir,
1046 ch_addr, 1052 ch_addr,
1047 limit, 1053 limit,
1048 rir_way, 1054 rir_way,
1049 idx); 1055 idx);
1050 1056
1051 return 0; 1057 return 0;
1052 } 1058 }
1053 1059
1054 /**************************************************************************** 1060 /****************************************************************************
1055 Device initialization routines: put/get, init/exit 1061 Device initialization routines: put/get, init/exit
1056 ****************************************************************************/ 1062 ****************************************************************************/
1057 1063
1058 /* 1064 /*
1059 * sbridge_put_all_devices 'put' all the devices that we have 1065 * sbridge_put_all_devices 'put' all the devices that we have
1060 * reserved via 'get' 1066 * reserved via 'get'
1061 */ 1067 */
1062 static void sbridge_put_devices(struct sbridge_dev *sbridge_dev) 1068 static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
1063 { 1069 {
1064 int i; 1070 int i;
1065 1071
1066 edac_dbg(0, "\n"); 1072 edac_dbg(0, "\n");
1067 for (i = 0; i < sbridge_dev->n_devs; i++) { 1073 for (i = 0; i < sbridge_dev->n_devs; i++) {
1068 struct pci_dev *pdev = sbridge_dev->pdev[i]; 1074 struct pci_dev *pdev = sbridge_dev->pdev[i];
1069 if (!pdev) 1075 if (!pdev)
1070 continue; 1076 continue;
1071 edac_dbg(0, "Removing dev %02x:%02x.%d\n", 1077 edac_dbg(0, "Removing dev %02x:%02x.%d\n",
1072 pdev->bus->number, 1078 pdev->bus->number,
1073 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 1079 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1074 pci_dev_put(pdev); 1080 pci_dev_put(pdev);
1075 } 1081 }
1076 } 1082 }
1077 1083
1078 static void sbridge_put_all_devices(void) 1084 static void sbridge_put_all_devices(void)
1079 { 1085 {
1080 struct sbridge_dev *sbridge_dev, *tmp; 1086 struct sbridge_dev *sbridge_dev, *tmp;
1081 1087
1082 list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) { 1088 list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
1083 sbridge_put_devices(sbridge_dev); 1089 sbridge_put_devices(sbridge_dev);
1084 free_sbridge_dev(sbridge_dev); 1090 free_sbridge_dev(sbridge_dev);
1085 } 1091 }
1086 } 1092 }
1087 1093
1088 /* 1094 /*
1089 * sbridge_get_all_devices Find and perform 'get' operation on the MCH's 1095 * sbridge_get_all_devices Find and perform 'get' operation on the MCH's
1090 * device/functions we want to reference for this driver 1096 * device/functions we want to reference for this driver
1091 * 1097 *
1092 * Need to 'get' device 16 func 1 and func 2 1098 * Need to 'get' device 16 func 1 and func 2
1093 */ 1099 */
1094 static int sbridge_get_onedevice(struct pci_dev **prev, 1100 static int sbridge_get_onedevice(struct pci_dev **prev,
1095 u8 *num_mc, 1101 u8 *num_mc,
1096 const struct pci_id_table *table, 1102 const struct pci_id_table *table,
1097 const unsigned devno) 1103 const unsigned devno)
1098 { 1104 {
1099 struct sbridge_dev *sbridge_dev; 1105 struct sbridge_dev *sbridge_dev;
1100 const struct pci_id_descr *dev_descr = &table->descr[devno]; 1106 const struct pci_id_descr *dev_descr = &table->descr[devno];
1101 1107
1102 struct pci_dev *pdev = NULL; 1108 struct pci_dev *pdev = NULL;
1103 u8 bus = 0; 1109 u8 bus = 0;
1104 1110
1105 sbridge_printk(KERN_INFO, 1111 sbridge_printk(KERN_INFO,
1106 "Seeking for: dev %02x.%d PCI ID %04x:%04x\n", 1112 "Seeking for: dev %02x.%d PCI ID %04x:%04x\n",
1107 dev_descr->dev, dev_descr->func, 1113 dev_descr->dev, dev_descr->func,
1108 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 1114 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1109 1115
1110 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 1116 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1111 dev_descr->dev_id, *prev); 1117 dev_descr->dev_id, *prev);
1112 1118
1113 if (!pdev) { 1119 if (!pdev) {
1114 if (*prev) { 1120 if (*prev) {
1115 *prev = pdev; 1121 *prev = pdev;
1116 return 0; 1122 return 0;
1117 } 1123 }
1118 1124
1119 if (dev_descr->optional) 1125 if (dev_descr->optional)
1120 return 0; 1126 return 0;
1121 1127
1122 if (devno == 0) 1128 if (devno == 0)
1123 return -ENODEV; 1129 return -ENODEV;
1124 1130
1125 sbridge_printk(KERN_INFO, 1131 sbridge_printk(KERN_INFO,
1126 "Device not found: dev %02x.%d PCI ID %04x:%04x\n", 1132 "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
1127 dev_descr->dev, dev_descr->func, 1133 dev_descr->dev, dev_descr->func,
1128 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 1134 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1129 1135
1130 /* End of list, leave */ 1136 /* End of list, leave */
1131 return -ENODEV; 1137 return -ENODEV;
1132 } 1138 }
1133 bus = pdev->bus->number; 1139 bus = pdev->bus->number;
1134 1140
1135 sbridge_dev = get_sbridge_dev(bus); 1141 sbridge_dev = get_sbridge_dev(bus);
1136 if (!sbridge_dev) { 1142 if (!sbridge_dev) {
1137 sbridge_dev = alloc_sbridge_dev(bus, table); 1143 sbridge_dev = alloc_sbridge_dev(bus, table);
1138 if (!sbridge_dev) { 1144 if (!sbridge_dev) {
1139 pci_dev_put(pdev); 1145 pci_dev_put(pdev);
1140 return -ENOMEM; 1146 return -ENOMEM;
1141 } 1147 }
1142 (*num_mc)++; 1148 (*num_mc)++;
1143 } 1149 }
1144 1150
1145 if (sbridge_dev->pdev[devno]) { 1151 if (sbridge_dev->pdev[devno]) {
1146 sbridge_printk(KERN_ERR, 1152 sbridge_printk(KERN_ERR,
1147 "Duplicated device for " 1153 "Duplicated device for "
1148 "dev %02x:%d.%d PCI ID %04x:%04x\n", 1154 "dev %02x:%d.%d PCI ID %04x:%04x\n",
1149 bus, dev_descr->dev, dev_descr->func, 1155 bus, dev_descr->dev, dev_descr->func,
1150 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 1156 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1151 pci_dev_put(pdev); 1157 pci_dev_put(pdev);
1152 return -ENODEV; 1158 return -ENODEV;
1153 } 1159 }
1154 1160
1155 sbridge_dev->pdev[devno] = pdev; 1161 sbridge_dev->pdev[devno] = pdev;
1156 1162
1157 /* Sanity check */ 1163 /* Sanity check */
1158 if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev || 1164 if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
1159 PCI_FUNC(pdev->devfn) != dev_descr->func)) { 1165 PCI_FUNC(pdev->devfn) != dev_descr->func)) {
1160 sbridge_printk(KERN_ERR, 1166 sbridge_printk(KERN_ERR,
1161 "Device PCI ID %04x:%04x " 1167 "Device PCI ID %04x:%04x "
1162 "has dev %02x:%d.%d instead of dev %02x:%02x.%d\n", 1168 "has dev %02x:%d.%d instead of dev %02x:%02x.%d\n",
1163 PCI_VENDOR_ID_INTEL, dev_descr->dev_id, 1169 PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
1164 bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 1170 bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1165 bus, dev_descr->dev, dev_descr->func); 1171 bus, dev_descr->dev, dev_descr->func);
1166 return -ENODEV; 1172 return -ENODEV;
1167 } 1173 }
1168 1174
1169 /* Be sure that the device is enabled */ 1175 /* Be sure that the device is enabled */
1170 if (unlikely(pci_enable_device(pdev) < 0)) { 1176 if (unlikely(pci_enable_device(pdev) < 0)) {
1171 sbridge_printk(KERN_ERR, 1177 sbridge_printk(KERN_ERR,
1172 "Couldn't enable " 1178 "Couldn't enable "
1173 "dev %02x:%d.%d PCI ID %04x:%04x\n", 1179 "dev %02x:%d.%d PCI ID %04x:%04x\n",
1174 bus, dev_descr->dev, dev_descr->func, 1180 bus, dev_descr->dev, dev_descr->func,
1175 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 1181 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1176 return -ENODEV; 1182 return -ENODEV;
1177 } 1183 }
1178 1184
1179 edac_dbg(0, "Detected dev %02x:%d.%d PCI ID %04x:%04x\n", 1185 edac_dbg(0, "Detected dev %02x:%d.%d PCI ID %04x:%04x\n",
1180 bus, dev_descr->dev, dev_descr->func, 1186 bus, dev_descr->dev, dev_descr->func,
1181 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 1187 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1182 1188
1183 /* 1189 /*
1184 * As stated on drivers/pci/search.c, the reference count for 1190 * As stated on drivers/pci/search.c, the reference count for
1185 * @from is always decremented if it is not %NULL. So, as we need 1191 * @from is always decremented if it is not %NULL. So, as we need
1186 * to get all devices up to null, we need to do a get for the device 1192 * to get all devices up to null, we need to do a get for the device
1187 */ 1193 */
1188 pci_dev_get(pdev); 1194 pci_dev_get(pdev);
1189 1195
1190 *prev = pdev; 1196 *prev = pdev;
1191 1197
1192 return 0; 1198 return 0;
1193 } 1199 }
1194 1200
1195 static int sbridge_get_all_devices(u8 *num_mc) 1201 static int sbridge_get_all_devices(u8 *num_mc)
1196 { 1202 {
1197 int i, rc; 1203 int i, rc;
1198 struct pci_dev *pdev = NULL; 1204 struct pci_dev *pdev = NULL;
1199 const struct pci_id_table *table = pci_dev_descr_sbridge_table; 1205 const struct pci_id_table *table = pci_dev_descr_sbridge_table;
1200 1206
1201 while (table && table->descr) { 1207 while (table && table->descr) {
1202 for (i = 0; i < table->n_devs; i++) { 1208 for (i = 0; i < table->n_devs; i++) {
1203 pdev = NULL; 1209 pdev = NULL;
1204 do { 1210 do {
1205 rc = sbridge_get_onedevice(&pdev, num_mc, 1211 rc = sbridge_get_onedevice(&pdev, num_mc,
1206 table, i); 1212 table, i);
1207 if (rc < 0) { 1213 if (rc < 0) {
1208 if (i == 0) { 1214 if (i == 0) {
1209 i = table->n_devs; 1215 i = table->n_devs;
1210 break; 1216 break;
1211 } 1217 }
1212 sbridge_put_all_devices(); 1218 sbridge_put_all_devices();
1213 return -ENODEV; 1219 return -ENODEV;
1214 } 1220 }
1215 } while (pdev); 1221 } while (pdev);
1216 } 1222 }
1217 table++; 1223 table++;
1218 } 1224 }
1219 1225
1220 return 0; 1226 return 0;
1221 } 1227 }
1222 1228
1223 static int mci_bind_devs(struct mem_ctl_info *mci, 1229 static int mci_bind_devs(struct mem_ctl_info *mci,
1224 struct sbridge_dev *sbridge_dev) 1230 struct sbridge_dev *sbridge_dev)
1225 { 1231 {
1226 struct sbridge_pvt *pvt = mci->pvt_info; 1232 struct sbridge_pvt *pvt = mci->pvt_info;
1227 struct pci_dev *pdev; 1233 struct pci_dev *pdev;
1228 int i, func, slot; 1234 int i, func, slot;
1229 1235
1230 for (i = 0; i < sbridge_dev->n_devs; i++) { 1236 for (i = 0; i < sbridge_dev->n_devs; i++) {
1231 pdev = sbridge_dev->pdev[i]; 1237 pdev = sbridge_dev->pdev[i];
1232 if (!pdev) 1238 if (!pdev)
1233 continue; 1239 continue;
1234 slot = PCI_SLOT(pdev->devfn); 1240 slot = PCI_SLOT(pdev->devfn);
1235 func = PCI_FUNC(pdev->devfn); 1241 func = PCI_FUNC(pdev->devfn);
1236 switch (slot) { 1242 switch (slot) {
1237 case 12: 1243 case 12:
1238 switch (func) { 1244 switch (func) {
1239 case 6: 1245 case 6:
1240 pvt->pci_sad0 = pdev; 1246 pvt->pci_sad0 = pdev;
1241 break; 1247 break;
1242 case 7: 1248 case 7:
1243 pvt->pci_sad1 = pdev; 1249 pvt->pci_sad1 = pdev;
1244 break; 1250 break;
1245 default: 1251 default:
1246 goto error; 1252 goto error;
1247 } 1253 }
1248 break; 1254 break;
1249 case 13: 1255 case 13:
1250 switch (func) { 1256 switch (func) {
1251 case 6: 1257 case 6:
1252 pvt->pci_br = pdev; 1258 pvt->pci_br = pdev;
1253 break; 1259 break;
1254 default: 1260 default:
1255 goto error; 1261 goto error;
1256 } 1262 }
1257 break; 1263 break;
1258 case 14: 1264 case 14:
1259 switch (func) { 1265 switch (func) {
1260 case 0: 1266 case 0:
1261 pvt->pci_ha0 = pdev; 1267 pvt->pci_ha0 = pdev;
1262 break; 1268 break;
1263 default: 1269 default:
1264 goto error; 1270 goto error;
1265 } 1271 }
1266 break; 1272 break;
1267 case 15: 1273 case 15:
1268 switch (func) { 1274 switch (func) {
1269 case 0: 1275 case 0:
1270 pvt->pci_ta = pdev; 1276 pvt->pci_ta = pdev;
1271 break; 1277 break;
1272 case 1: 1278 case 1:
1273 pvt->pci_ras = pdev; 1279 pvt->pci_ras = pdev;
1274 break; 1280 break;
1275 case 2: 1281 case 2:
1276 case 3: 1282 case 3:
1277 case 4: 1283 case 4:
1278 case 5: 1284 case 5:
1279 pvt->pci_tad[func - 2] = pdev; 1285 pvt->pci_tad[func - 2] = pdev;
1280 break; 1286 break;
1281 default: 1287 default:
1282 goto error; 1288 goto error;
1283 } 1289 }
1284 break; 1290 break;
1285 case 17: 1291 case 17:
1286 switch (func) { 1292 switch (func) {
1287 case 0: 1293 case 0:
1288 pvt->pci_ddrio = pdev; 1294 pvt->pci_ddrio = pdev;
1289 break; 1295 break;
1290 default: 1296 default:
1291 goto error; 1297 goto error;
1292 } 1298 }
1293 break; 1299 break;
1294 default: 1300 default:
1295 goto error; 1301 goto error;
1296 } 1302 }
1297 1303
1298 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n", 1304 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
1299 sbridge_dev->bus, 1305 sbridge_dev->bus,
1300 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), 1306 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1301 pdev); 1307 pdev);
1302 } 1308 }
1303 1309
1304 /* Check if everything were registered */ 1310 /* Check if everything were registered */
1305 if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha0 || 1311 if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha0 ||
1306 !pvt-> pci_tad || !pvt->pci_ras || !pvt->pci_ta || 1312 !pvt-> pci_tad || !pvt->pci_ras || !pvt->pci_ta)
1307 !pvt->pci_ddrio)
1308 goto enodev; 1313 goto enodev;
1309 1314
1310 for (i = 0; i < NUM_CHANNELS; i++) { 1315 for (i = 0; i < NUM_CHANNELS; i++) {
1311 if (!pvt->pci_tad[i]) 1316 if (!pvt->pci_tad[i])
1312 goto enodev; 1317 goto enodev;
1313 } 1318 }
1314 return 0; 1319 return 0;
1315 1320
1316 enodev: 1321 enodev:
1317 sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); 1322 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
1318 return -ENODEV; 1323 return -ENODEV;
1319 1324
1320 error: 1325 error:
1321 sbridge_printk(KERN_ERR, "Device %d, function %d " 1326 sbridge_printk(KERN_ERR, "Device %d, function %d "
1322 "is out of the expected range\n", 1327 "is out of the expected range\n",
1323 slot, func); 1328 slot, func);
1324 return -EINVAL; 1329 return -EINVAL;
1325 } 1330 }
1326 1331
1327 /**************************************************************************** 1332 /****************************************************************************
1328 Error check routines 1333 Error check routines
1329 ****************************************************************************/ 1334 ****************************************************************************/
1330 1335
1331 /* 1336 /*
1332 * While Sandy Bridge has error count registers, SMI BIOS read values from 1337 * While Sandy Bridge has error count registers, SMI BIOS read values from
1333 * and resets the counters. So, they are not reliable for the OS to read 1338 * and resets the counters. So, they are not reliable for the OS to read
1334 * from them. So, we have no option but to just trust on whatever MCE is 1339 * from them. So, we have no option but to just trust on whatever MCE is
1335 * telling us about the errors. 1340 * telling us about the errors.
1336 */ 1341 */
1337 static void sbridge_mce_output_error(struct mem_ctl_info *mci, 1342 static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1338 const struct mce *m) 1343 const struct mce *m)
1339 { 1344 {
1340 struct mem_ctl_info *new_mci; 1345 struct mem_ctl_info *new_mci;
1341 struct sbridge_pvt *pvt = mci->pvt_info; 1346 struct sbridge_pvt *pvt = mci->pvt_info;
1342 enum hw_event_mc_err_type tp_event; 1347 enum hw_event_mc_err_type tp_event;
1343 char *type, *optype, msg[256]; 1348 char *type, *optype, msg[256];
1344 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); 1349 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
1345 bool overflow = GET_BITFIELD(m->status, 62, 62); 1350 bool overflow = GET_BITFIELD(m->status, 62, 62);
1346 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); 1351 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
1347 bool recoverable = GET_BITFIELD(m->status, 56, 56); 1352 bool recoverable = GET_BITFIELD(m->status, 56, 56);
1348 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); 1353 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1349 u32 mscod = GET_BITFIELD(m->status, 16, 31); 1354 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1350 u32 errcode = GET_BITFIELD(m->status, 0, 15); 1355 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1351 u32 channel = GET_BITFIELD(m->status, 0, 3); 1356 u32 channel = GET_BITFIELD(m->status, 0, 3);
1352 u32 optypenum = GET_BITFIELD(m->status, 4, 6); 1357 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1353 long channel_mask, first_channel; 1358 long channel_mask, first_channel;
1354 u8 rank, socket; 1359 u8 rank, socket;
1355 int rc, dimm; 1360 int rc, dimm;
1356 char *area_type = NULL; 1361 char *area_type = NULL;
1357 1362
1358 if (uncorrected_error) { 1363 if (uncorrected_error) {
1359 if (ripv) { 1364 if (ripv) {
1360 type = "FATAL"; 1365 type = "FATAL";
1361 tp_event = HW_EVENT_ERR_FATAL; 1366 tp_event = HW_EVENT_ERR_FATAL;
1362 } else { 1367 } else {
1363 type = "NON_FATAL"; 1368 type = "NON_FATAL";
1364 tp_event = HW_EVENT_ERR_UNCORRECTED; 1369 tp_event = HW_EVENT_ERR_UNCORRECTED;
1365 } 1370 }
1366 } else { 1371 } else {
1367 type = "CORRECTED"; 1372 type = "CORRECTED";
1368 tp_event = HW_EVENT_ERR_CORRECTED; 1373 tp_event = HW_EVENT_ERR_CORRECTED;
1369 } 1374 }
1370 1375
1371 /* 1376 /*
1372 * According with Table 15-9 of the Intel Architecture spec vol 3A, 1377 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1373 * memory errors should fit in this mask: 1378 * memory errors should fit in this mask:
1374 * 000f 0000 1mmm cccc (binary) 1379 * 000f 0000 1mmm cccc (binary)
1375 * where: 1380 * where:
1376 * f = Correction Report Filtering Bit. If 1, subsequent errors 1381 * f = Correction Report Filtering Bit. If 1, subsequent errors
1377 * won't be shown 1382 * won't be shown
1378 * mmm = error type 1383 * mmm = error type
1379 * cccc = channel 1384 * cccc = channel
1380 * If the mask doesn't match, report an error to the parsing logic 1385 * If the mask doesn't match, report an error to the parsing logic
1381 */ 1386 */
1382 if (! ((errcode & 0xef80) == 0x80)) { 1387 if (! ((errcode & 0xef80) == 0x80)) {
1383 optype = "Can't parse: it is not a mem"; 1388 optype = "Can't parse: it is not a mem";
1384 } else { 1389 } else {
1385 switch (optypenum) { 1390 switch (optypenum) {
1386 case 0: 1391 case 0:
1387 optype = "generic undef request error"; 1392 optype = "generic undef request error";
1388 break; 1393 break;
1389 case 1: 1394 case 1:
1390 optype = "memory read error"; 1395 optype = "memory read error";
1391 break; 1396 break;
1392 case 2: 1397 case 2:
1393 optype = "memory write error"; 1398 optype = "memory write error";
1394 break; 1399 break;
1395 case 3: 1400 case 3:
1396 optype = "addr/cmd error"; 1401 optype = "addr/cmd error";
1397 break; 1402 break;
1398 case 4: 1403 case 4:
1399 optype = "memory scrubbing error"; 1404 optype = "memory scrubbing error";
1400 break; 1405 break;
1401 default: 1406 default:
1402 optype = "reserved"; 1407 optype = "reserved";
1403 break; 1408 break;
1404 } 1409 }
1405 } 1410 }
1406 1411
1407 rc = get_memory_error_data(mci, m->addr, &socket, 1412 rc = get_memory_error_data(mci, m->addr, &socket,
1408 &channel_mask, &rank, &area_type, msg); 1413 &channel_mask, &rank, &area_type, msg);
1409 if (rc < 0) 1414 if (rc < 0)
1410 goto err_parsing; 1415 goto err_parsing;
1411 new_mci = get_mci_for_node_id(socket); 1416 new_mci = get_mci_for_node_id(socket);
1412 if (!new_mci) { 1417 if (!new_mci) {
1413 strcpy(msg, "Error: socket got corrupted!"); 1418 strcpy(msg, "Error: socket got corrupted!");
1414 goto err_parsing; 1419 goto err_parsing;
1415 } 1420 }
1416 mci = new_mci; 1421 mci = new_mci;
1417 pvt = mci->pvt_info; 1422 pvt = mci->pvt_info;
1418 1423
1419 first_channel = find_first_bit(&channel_mask, NUM_CHANNELS); 1424 first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
1420 1425
1421 if (rank < 4) 1426 if (rank < 4)
1422 dimm = 0; 1427 dimm = 0;
1423 else if (rank < 8) 1428 else if (rank < 8)
1424 dimm = 1; 1429 dimm = 1;
1425 else 1430 else
1426 dimm = 2; 1431 dimm = 2;
1427 1432
1428 1433
1429 /* 1434 /*
1430 * FIXME: On some memory configurations (mirror, lockstep), the 1435 * FIXME: On some memory configurations (mirror, lockstep), the
1431 * Memory Controller can't point the error to a single DIMM. The 1436 * Memory Controller can't point the error to a single DIMM. The
1432 * EDAC core should be handling the channel mask, in order to point 1437 * EDAC core should be handling the channel mask, in order to point
1433 * to the group of dimm's where the error may be happening. 1438 * to the group of dimm's where the error may be happening.
1434 */ 1439 */
1435 snprintf(msg, sizeof(msg), 1440 snprintf(msg, sizeof(msg),
1436 "%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d", 1441 "%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d",
1437 overflow ? " OVERFLOW" : "", 1442 overflow ? " OVERFLOW" : "",
1438 (uncorrected_error && recoverable) ? " recoverable" : "", 1443 (uncorrected_error && recoverable) ? " recoverable" : "",
1439 area_type, 1444 area_type,
1440 mscod, errcode, 1445 mscod, errcode,
1441 socket, 1446 socket,
1442 channel_mask, 1447 channel_mask,
1443 rank); 1448 rank);
1444 1449
1445 edac_dbg(0, "%s\n", msg); 1450 edac_dbg(0, "%s\n", msg);
1446 1451
1447 /* FIXME: need support for channel mask */ 1452 /* FIXME: need support for channel mask */
1448 1453
1449 /* Call the helper to output message */ 1454 /* Call the helper to output message */
1450 edac_mc_handle_error(tp_event, mci, core_err_cnt, 1455 edac_mc_handle_error(tp_event, mci, core_err_cnt,
1451 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, 1456 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
1452 channel, dimm, -1, 1457 channel, dimm, -1,
1453 optype, msg); 1458 optype, msg);
1454 return; 1459 return;
1455 err_parsing: 1460 err_parsing:
1456 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, 1461 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
1457 -1, -1, -1, 1462 -1, -1, -1,
1458 msg, ""); 1463 msg, "");
1459 1464
1460 } 1465 }
1461 1466
1462 /* 1467 /*
1463 * sbridge_check_error Retrieve and process errors reported by the 1468 * sbridge_check_error Retrieve and process errors reported by the
1464 * hardware. Called by the Core module. 1469 * hardware. Called by the Core module.
1465 */ 1470 */
1466 static void sbridge_check_error(struct mem_ctl_info *mci) 1471 static void sbridge_check_error(struct mem_ctl_info *mci)
1467 { 1472 {
1468 struct sbridge_pvt *pvt = mci->pvt_info; 1473 struct sbridge_pvt *pvt = mci->pvt_info;
1469 int i; 1474 int i;
1470 unsigned count = 0; 1475 unsigned count = 0;
1471 struct mce *m; 1476 struct mce *m;
1472 1477
1473 /* 1478 /*
1474 * MCE first step: Copy all mce errors into a temporary buffer 1479 * MCE first step: Copy all mce errors into a temporary buffer
1475 * We use a double buffering here, to reduce the risk of 1480 * We use a double buffering here, to reduce the risk of
1476 * loosing an error. 1481 * loosing an error.
1477 */ 1482 */
1478 smp_rmb(); 1483 smp_rmb();
1479 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in) 1484 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
1480 % MCE_LOG_LEN; 1485 % MCE_LOG_LEN;
1481 if (!count) 1486 if (!count)
1482 return; 1487 return;
1483 1488
1484 m = pvt->mce_outentry; 1489 m = pvt->mce_outentry;
1485 if (pvt->mce_in + count > MCE_LOG_LEN) { 1490 if (pvt->mce_in + count > MCE_LOG_LEN) {
1486 unsigned l = MCE_LOG_LEN - pvt->mce_in; 1491 unsigned l = MCE_LOG_LEN - pvt->mce_in;
1487 1492
1488 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l); 1493 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
1489 smp_wmb(); 1494 smp_wmb();
1490 pvt->mce_in = 0; 1495 pvt->mce_in = 0;
1491 count -= l; 1496 count -= l;
1492 m += l; 1497 m += l;
1493 } 1498 }
1494 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count); 1499 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
1495 smp_wmb(); 1500 smp_wmb();
1496 pvt->mce_in += count; 1501 pvt->mce_in += count;
1497 1502
1498 smp_rmb(); 1503 smp_rmb();
1499 if (pvt->mce_overrun) { 1504 if (pvt->mce_overrun) {
1500 sbridge_printk(KERN_ERR, "Lost %d memory errors\n", 1505 sbridge_printk(KERN_ERR, "Lost %d memory errors\n",
1501 pvt->mce_overrun); 1506 pvt->mce_overrun);
1502 smp_wmb(); 1507 smp_wmb();
1503 pvt->mce_overrun = 0; 1508 pvt->mce_overrun = 0;
1504 } 1509 }
1505 1510
1506 /* 1511 /*
1507 * MCE second step: parse errors and display 1512 * MCE second step: parse errors and display
1508 */ 1513 */
1509 for (i = 0; i < count; i++) 1514 for (i = 0; i < count; i++)
1510 sbridge_mce_output_error(mci, &pvt->mce_outentry[i]); 1515 sbridge_mce_output_error(mci, &pvt->mce_outentry[i]);
1511 } 1516 }
1512 1517
1513 /* 1518 /*
1514 * sbridge_mce_check_error Replicates mcelog routine to get errors 1519 * sbridge_mce_check_error Replicates mcelog routine to get errors
1515 * This routine simply queues mcelog errors, and 1520 * This routine simply queues mcelog errors, and
1516 * return. The error itself should be handled later 1521 * return. The error itself should be handled later
1517 * by sbridge_check_error. 1522 * by sbridge_check_error.
1518 * WARNING: As this routine should be called at NMI time, extra care should 1523 * WARNING: As this routine should be called at NMI time, extra care should
1519 * be taken to avoid deadlocks, and to be as fast as possible. 1524 * be taken to avoid deadlocks, and to be as fast as possible.
1520 */ 1525 */
1521 static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val, 1526 static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
1522 void *data) 1527 void *data)
1523 { 1528 {
1524 struct mce *mce = (struct mce *)data; 1529 struct mce *mce = (struct mce *)data;
1525 struct mem_ctl_info *mci; 1530 struct mem_ctl_info *mci;
1526 struct sbridge_pvt *pvt; 1531 struct sbridge_pvt *pvt;
1527 1532
1528 mci = get_mci_for_node_id(mce->socketid); 1533 mci = get_mci_for_node_id(mce->socketid);
1529 if (!mci) 1534 if (!mci)
1530 return NOTIFY_BAD; 1535 return NOTIFY_BAD;
1531 pvt = mci->pvt_info; 1536 pvt = mci->pvt_info;
1532 1537
1533 /* 1538 /*
1534 * Just let mcelog handle it if the error is 1539 * Just let mcelog handle it if the error is
1535 * outside the memory controller. A memory error 1540 * outside the memory controller. A memory error
1536 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0. 1541 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1537 * bit 12 has an special meaning. 1542 * bit 12 has an special meaning.
1538 */ 1543 */
1539 if ((mce->status & 0xefff) >> 7 != 1) 1544 if ((mce->status & 0xefff) >> 7 != 1)
1540 return NOTIFY_DONE; 1545 return NOTIFY_DONE;
1541 1546
1542 printk("sbridge: HANDLING MCE MEMORY ERROR\n"); 1547 printk("sbridge: HANDLING MCE MEMORY ERROR\n");
1543 1548
1544 printk("CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n", 1549 printk("CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
1545 mce->extcpu, mce->mcgstatus, mce->bank, mce->status); 1550 mce->extcpu, mce->mcgstatus, mce->bank, mce->status);
1546 printk("TSC %llx ", mce->tsc); 1551 printk("TSC %llx ", mce->tsc);
1547 printk("ADDR %llx ", mce->addr); 1552 printk("ADDR %llx ", mce->addr);
1548 printk("MISC %llx ", mce->misc); 1553 printk("MISC %llx ", mce->misc);
1549 1554
1550 printk("PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n", 1555 printk("PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1551 mce->cpuvendor, mce->cpuid, mce->time, 1556 mce->cpuvendor, mce->cpuid, mce->time,
1552 mce->socketid, mce->apicid); 1557 mce->socketid, mce->apicid);
1553 1558
1554 /* Only handle if it is the right mc controller */ 1559 /* Only handle if it is the right mc controller */
1555 if (cpu_data(mce->cpu).phys_proc_id != pvt->sbridge_dev->mc) 1560 if (cpu_data(mce->cpu).phys_proc_id != pvt->sbridge_dev->mc)
1556 return NOTIFY_DONE; 1561 return NOTIFY_DONE;
1557 1562
1558 smp_rmb(); 1563 smp_rmb();
1559 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) { 1564 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
1560 smp_wmb(); 1565 smp_wmb();
1561 pvt->mce_overrun++; 1566 pvt->mce_overrun++;
1562 return NOTIFY_DONE; 1567 return NOTIFY_DONE;
1563 } 1568 }
1564 1569
1565 /* Copy memory error at the ringbuffer */ 1570 /* Copy memory error at the ringbuffer */
1566 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce)); 1571 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
1567 smp_wmb(); 1572 smp_wmb();
1568 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN; 1573 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
1569 1574
1570 /* Handle fatal errors immediately */ 1575 /* Handle fatal errors immediately */
1571 if (mce->mcgstatus & 1) 1576 if (mce->mcgstatus & 1)
1572 sbridge_check_error(mci); 1577 sbridge_check_error(mci);
1573 1578
1574 /* Advice mcelog that the error were handled */ 1579 /* Advice mcelog that the error were handled */
1575 return NOTIFY_STOP; 1580 return NOTIFY_STOP;
1576 } 1581 }
1577 1582
1578 static struct notifier_block sbridge_mce_dec = { 1583 static struct notifier_block sbridge_mce_dec = {
1579 .notifier_call = sbridge_mce_check_error, 1584 .notifier_call = sbridge_mce_check_error,
1580 }; 1585 };
1581 1586
1582 /**************************************************************************** 1587 /****************************************************************************
1583 EDAC register/unregister logic 1588 EDAC register/unregister logic
1584 ****************************************************************************/ 1589 ****************************************************************************/
1585 1590
1586 static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev) 1591 static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
1587 { 1592 {
1588 struct mem_ctl_info *mci = sbridge_dev->mci; 1593 struct mem_ctl_info *mci = sbridge_dev->mci;
1589 struct sbridge_pvt *pvt; 1594 struct sbridge_pvt *pvt;
1590 1595
1591 if (unlikely(!mci || !mci->pvt_info)) { 1596 if (unlikely(!mci || !mci->pvt_info)) {
1592 edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev); 1597 edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
1593 1598
1594 sbridge_printk(KERN_ERR, "Couldn't find mci handler\n"); 1599 sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
1595 return; 1600 return;
1596 } 1601 }
1597 1602
1598 pvt = mci->pvt_info; 1603 pvt = mci->pvt_info;
1599 1604
1600 edac_dbg(0, "MC: mci = %p, dev = %p\n", 1605 edac_dbg(0, "MC: mci = %p, dev = %p\n",
1601 mci, &sbridge_dev->pdev[0]->dev); 1606 mci, &sbridge_dev->pdev[0]->dev);
1602 1607
1603 /* Remove MC sysfs nodes */ 1608 /* Remove MC sysfs nodes */
1604 edac_mc_del_mc(mci->pdev); 1609 edac_mc_del_mc(mci->pdev);
1605 1610
1606 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); 1611 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1607 kfree(mci->ctl_name); 1612 kfree(mci->ctl_name);
1608 edac_mc_free(mci); 1613 edac_mc_free(mci);
1609 sbridge_dev->mci = NULL; 1614 sbridge_dev->mci = NULL;
1610 } 1615 }
1611 1616
1612 static int sbridge_register_mci(struct sbridge_dev *sbridge_dev) 1617 static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
1613 { 1618 {
1614 struct mem_ctl_info *mci; 1619 struct mem_ctl_info *mci;
1615 struct edac_mc_layer layers[2]; 1620 struct edac_mc_layer layers[2];
1616 struct sbridge_pvt *pvt; 1621 struct sbridge_pvt *pvt;
1617 int rc; 1622 int rc;
1618 1623
1619 /* Check the number of active and not disabled channels */ 1624 /* Check the number of active and not disabled channels */
1620 rc = check_if_ecc_is_active(sbridge_dev->bus); 1625 rc = check_if_ecc_is_active(sbridge_dev->bus);
1621 if (unlikely(rc < 0)) 1626 if (unlikely(rc < 0))
1622 return rc; 1627 return rc;
1623 1628
1624 /* allocate a new MC control structure */ 1629 /* allocate a new MC control structure */
1625 layers[0].type = EDAC_MC_LAYER_CHANNEL; 1630 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1626 layers[0].size = NUM_CHANNELS; 1631 layers[0].size = NUM_CHANNELS;
1627 layers[0].is_virt_csrow = false; 1632 layers[0].is_virt_csrow = false;
1628 layers[1].type = EDAC_MC_LAYER_SLOT; 1633 layers[1].type = EDAC_MC_LAYER_SLOT;
1629 layers[1].size = MAX_DIMMS; 1634 layers[1].size = MAX_DIMMS;
1630 layers[1].is_virt_csrow = true; 1635 layers[1].is_virt_csrow = true;
1631 mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers, 1636 mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
1632 sizeof(*pvt)); 1637 sizeof(*pvt));
1633 1638
1634 if (unlikely(!mci)) 1639 if (unlikely(!mci))
1635 return -ENOMEM; 1640 return -ENOMEM;
1636 1641
1637 edac_dbg(0, "MC: mci = %p, dev = %p\n", 1642 edac_dbg(0, "MC: mci = %p, dev = %p\n",
1638 mci, &sbridge_dev->pdev[0]->dev); 1643 mci, &sbridge_dev->pdev[0]->dev);
1639 1644
1640 pvt = mci->pvt_info; 1645 pvt = mci->pvt_info;
1641 memset(pvt, 0, sizeof(*pvt)); 1646 memset(pvt, 0, sizeof(*pvt));
1642 1647
1643 /* Associate sbridge_dev and mci for future usage */ 1648 /* Associate sbridge_dev and mci for future usage */
1644 pvt->sbridge_dev = sbridge_dev; 1649 pvt->sbridge_dev = sbridge_dev;
1645 sbridge_dev->mci = mci; 1650 sbridge_dev->mci = mci;
1646 1651
1647 mci->mtype_cap = MEM_FLAG_DDR3; 1652 mci->mtype_cap = MEM_FLAG_DDR3;
1648 mci->edac_ctl_cap = EDAC_FLAG_NONE; 1653 mci->edac_ctl_cap = EDAC_FLAG_NONE;
1649 mci->edac_cap = EDAC_FLAG_NONE; 1654 mci->edac_cap = EDAC_FLAG_NONE;
1650 mci->mod_name = "sbridge_edac.c"; 1655 mci->mod_name = "sbridge_edac.c";
1651 mci->mod_ver = SBRIDGE_REVISION; 1656 mci->mod_ver = SBRIDGE_REVISION;
1652 mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx); 1657 mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx);
1653 mci->dev_name = pci_name(sbridge_dev->pdev[0]); 1658 mci->dev_name = pci_name(sbridge_dev->pdev[0]);
1654 mci->ctl_page_to_phys = NULL; 1659 mci->ctl_page_to_phys = NULL;
1655 1660
1656 /* Set the function pointer to an actual operation function */ 1661 /* Set the function pointer to an actual operation function */
1657 mci->edac_check = sbridge_check_error; 1662 mci->edac_check = sbridge_check_error;
1658 1663
1659 /* Store pci devices at mci for faster access */ 1664 /* Store pci devices at mci for faster access */
1660 rc = mci_bind_devs(mci, sbridge_dev); 1665 rc = mci_bind_devs(mci, sbridge_dev);
1661 if (unlikely(rc < 0)) 1666 if (unlikely(rc < 0))
1662 goto fail0; 1667 goto fail0;
1663 1668
1664 /* Get dimm basic config and the memory layout */ 1669 /* Get dimm basic config and the memory layout */
1665 get_dimm_config(mci); 1670 get_dimm_config(mci);
1666 get_memory_layout(mci); 1671 get_memory_layout(mci);
1667 1672
1668 /* record ptr to the generic device */ 1673 /* record ptr to the generic device */
1669 mci->pdev = &sbridge_dev->pdev[0]->dev; 1674 mci->pdev = &sbridge_dev->pdev[0]->dev;
1670 1675
1671 /* add this new MC control structure to EDAC's list of MCs */ 1676 /* add this new MC control structure to EDAC's list of MCs */
1672 if (unlikely(edac_mc_add_mc(mci))) { 1677 if (unlikely(edac_mc_add_mc(mci))) {
1673 edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); 1678 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1674 rc = -EINVAL; 1679 rc = -EINVAL;
1675 goto fail0; 1680 goto fail0;
1676 } 1681 }
1677 1682
1678 return 0; 1683 return 0;
1679 1684
1680 fail0: 1685 fail0:
1681 kfree(mci->ctl_name); 1686 kfree(mci->ctl_name);
1682 edac_mc_free(mci); 1687 edac_mc_free(mci);
1683 sbridge_dev->mci = NULL; 1688 sbridge_dev->mci = NULL;
1684 return rc; 1689 return rc;
1685 } 1690 }
1686 1691
1687 /* 1692 /*
1688 * sbridge_probe Probe for ONE instance of device to see if it is 1693 * sbridge_probe Probe for ONE instance of device to see if it is
1689 * present. 1694 * present.
1690 * return: 1695 * return:
1691 * 0 for FOUND a device 1696 * 0 for FOUND a device
1692 * < 0 for error code 1697 * < 0 for error code
1693 */ 1698 */
1694 1699
1695 static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1700 static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1696 { 1701 {
1697 int rc; 1702 int rc;
1698 u8 mc, num_mc = 0; 1703 u8 mc, num_mc = 0;
1699 struct sbridge_dev *sbridge_dev; 1704 struct sbridge_dev *sbridge_dev;
1700 1705
1701 /* get the pci devices we want to reserve for our use */ 1706 /* get the pci devices we want to reserve for our use */
1702 mutex_lock(&sbridge_edac_lock); 1707 mutex_lock(&sbridge_edac_lock);
1703 1708
1704 /* 1709 /*
1705 * All memory controllers are allocated at the first pass. 1710 * All memory controllers are allocated at the first pass.
1706 */ 1711 */
1707 if (unlikely(probed >= 1)) { 1712 if (unlikely(probed >= 1)) {
1708 mutex_unlock(&sbridge_edac_lock); 1713 mutex_unlock(&sbridge_edac_lock);
1709 return -ENODEV; 1714 return -ENODEV;
1710 } 1715 }
1711 probed++; 1716 probed++;
1712 1717
1713 rc = sbridge_get_all_devices(&num_mc); 1718 rc = sbridge_get_all_devices(&num_mc);
1714 if (unlikely(rc < 0)) 1719 if (unlikely(rc < 0))
1715 goto fail0; 1720 goto fail0;
1716 mc = 0; 1721 mc = 0;
1717 1722
1718 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { 1723 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
1719 edac_dbg(0, "Registering MC#%d (%d of %d)\n", 1724 edac_dbg(0, "Registering MC#%d (%d of %d)\n",
1720 mc, mc + 1, num_mc); 1725 mc, mc + 1, num_mc);
1721 sbridge_dev->mc = mc++; 1726 sbridge_dev->mc = mc++;
1722 rc = sbridge_register_mci(sbridge_dev); 1727 rc = sbridge_register_mci(sbridge_dev);
1723 if (unlikely(rc < 0)) 1728 if (unlikely(rc < 0))
1724 goto fail1; 1729 goto fail1;
1725 } 1730 }
1726 1731
1727 sbridge_printk(KERN_INFO, "Driver loaded.\n"); 1732 sbridge_printk(KERN_INFO, "Driver loaded.\n");
1728 1733
1729 mutex_unlock(&sbridge_edac_lock); 1734 mutex_unlock(&sbridge_edac_lock);
1730 return 0; 1735 return 0;
1731 1736
1732 fail1: 1737 fail1:
1733 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) 1738 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
1734 sbridge_unregister_mci(sbridge_dev); 1739 sbridge_unregister_mci(sbridge_dev);
1735 1740
1736 sbridge_put_all_devices(); 1741 sbridge_put_all_devices();
1737 fail0: 1742 fail0:
1738 mutex_unlock(&sbridge_edac_lock); 1743 mutex_unlock(&sbridge_edac_lock);
1739 return rc; 1744 return rc;
1740 } 1745 }
1741 1746
1742 /* 1747 /*
1743 * sbridge_remove destructor for one instance of device 1748 * sbridge_remove destructor for one instance of device
1744 * 1749 *
1745 */ 1750 */
1746 static void sbridge_remove(struct pci_dev *pdev) 1751 static void sbridge_remove(struct pci_dev *pdev)
1747 { 1752 {
1748 struct sbridge_dev *sbridge_dev; 1753 struct sbridge_dev *sbridge_dev;
1749 1754
1750 edac_dbg(0, "\n"); 1755 edac_dbg(0, "\n");
1751 1756
1752 /* 1757 /*
1753 * we have a trouble here: pdev value for removal will be wrong, since 1758 * we have a trouble here: pdev value for removal will be wrong, since
1754 * it will point to the X58 register used to detect that the machine 1759 * it will point to the X58 register used to detect that the machine
1755 * is a Nehalem or upper design. However, due to the way several PCI 1760 * is a Nehalem or upper design. However, due to the way several PCI
1756 * devices are grouped together to provide MC functionality, we need 1761 * devices are grouped together to provide MC functionality, we need
1757 * to use a different method for releasing the devices 1762 * to use a different method for releasing the devices
1758 */ 1763 */
1759 1764
1760 mutex_lock(&sbridge_edac_lock); 1765 mutex_lock(&sbridge_edac_lock);
1761 1766
1762 if (unlikely(!probed)) { 1767 if (unlikely(!probed)) {
1763 mutex_unlock(&sbridge_edac_lock); 1768 mutex_unlock(&sbridge_edac_lock);
1764 return; 1769 return;
1765 } 1770 }
1766 1771
1767 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) 1772 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
1768 sbridge_unregister_mci(sbridge_dev); 1773 sbridge_unregister_mci(sbridge_dev);
1769 1774
1770 /* Release PCI resources */ 1775 /* Release PCI resources */
1771 sbridge_put_all_devices(); 1776 sbridge_put_all_devices();
1772 1777
1773 probed--; 1778 probed--;
1774 1779
1775 mutex_unlock(&sbridge_edac_lock); 1780 mutex_unlock(&sbridge_edac_lock);
1776 } 1781 }
1777 1782
1778 MODULE_DEVICE_TABLE(pci, sbridge_pci_tbl); 1783 MODULE_DEVICE_TABLE(pci, sbridge_pci_tbl);
1779 1784
1780 /* 1785 /*
1781 * sbridge_driver pci_driver structure for this module 1786 * sbridge_driver pci_driver structure for this module
1782 * 1787 *
1783 */ 1788 */
1784 static struct pci_driver sbridge_driver = { 1789 static struct pci_driver sbridge_driver = {
1785 .name = "sbridge_edac", 1790 .name = "sbridge_edac",
1786 .probe = sbridge_probe, 1791 .probe = sbridge_probe,
1787 .remove = sbridge_remove, 1792 .remove = sbridge_remove,
1788 .id_table = sbridge_pci_tbl, 1793 .id_table = sbridge_pci_tbl,
1789 }; 1794 };
1790 1795
1791 /* 1796 /*
1792 * sbridge_init Module entry function 1797 * sbridge_init Module entry function
1793 * Try to initialize this module for its devices 1798 * Try to initialize this module for its devices
1794 */ 1799 */
1795 static int __init sbridge_init(void) 1800 static int __init sbridge_init(void)
1796 { 1801 {
1797 int pci_rc; 1802 int pci_rc;
1798 1803
1799 edac_dbg(2, "\n"); 1804 edac_dbg(2, "\n");
1800 1805
1801 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1806 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1802 opstate_init(); 1807 opstate_init();
1803 1808
1804 pci_rc = pci_register_driver(&sbridge_driver); 1809 pci_rc = pci_register_driver(&sbridge_driver);
1805 1810
1806 if (pci_rc >= 0) { 1811 if (pci_rc >= 0) {
1807 mce_register_decode_chain(&sbridge_mce_dec); 1812 mce_register_decode_chain(&sbridge_mce_dec);
1808 return 0; 1813 return 0;
1809 } 1814 }
1810 1815
1811 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n", 1816 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
1812 pci_rc); 1817 pci_rc);
1813 1818
1814 return pci_rc; 1819 return pci_rc;
1815 } 1820 }
1816 1821
1817 /* 1822 /*
1818 * sbridge_exit() Module exit function 1823 * sbridge_exit() Module exit function
1819 * Unregister the driver 1824 * Unregister the driver
1820 */ 1825 */
1821 static void __exit sbridge_exit(void) 1826 static void __exit sbridge_exit(void)
1822 { 1827 {
1823 edac_dbg(2, "\n"); 1828 edac_dbg(2, "\n");
1824 pci_unregister_driver(&sbridge_driver); 1829 pci_unregister_driver(&sbridge_driver);
1825 mce_unregister_decode_chain(&sbridge_mce_dec); 1830 mce_unregister_decode_chain(&sbridge_mce_dec);
1826 } 1831 }
1827 1832
1828 module_init(sbridge_init); 1833 module_init(sbridge_init);
1829 module_exit(sbridge_exit); 1834 module_exit(sbridge_exit);
1830 1835
1831 module_param(edac_op_state, int, 0444); 1836 module_param(edac_op_state, int, 0444);
1832 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 1837 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1833 1838
1834 MODULE_LICENSE("GPL"); 1839 MODULE_LICENSE("GPL");
1835 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); 1840 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
1836 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); 1841 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
1837 MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge memory controllers - " 1842 MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge memory controllers - "
1838 SBRIDGE_REVISION); 1843 SBRIDGE_REVISION);