Commit 1318952514d5651c453d89989595a9df3b37267b
Committed by
Linus Torvalds
1 parent
2d7bbb91c8
Exists in
master
and in
7 other branches
[PATCH] EDAC: probe1 cleanup 1-of-2
- Add lower-level functions that handle various parts of the initialization done by the xxx_probe1() functions. Some of the xxx_probe1() functions are much too long and complicated (see "Chapter 5: Functions" in Documentation/CodingStyle). - Cleanup of probe1() functions in EDAC Signed-off-by: Doug Thompson <norsk5@xmission.com> Cc: Alan Cox <alan@lxorguk.ukuu.org.uk> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 6 changed files with 518 additions and 444 deletions Inline Diff
drivers/edac/amd76x_edac.c
1 | /* | 1 | /* |
2 | * AMD 76x Memory Controller kernel module | 2 | * AMD 76x Memory Controller kernel module |
3 | * (C) 2003 Linux Networx (http://lnxi.com) | 3 | * (C) 2003 Linux Networx (http://lnxi.com) |
4 | * This file may be distributed under the terms of the | 4 | * This file may be distributed under the terms of the |
5 | * GNU General Public License. | 5 | * GNU General Public License. |
6 | * | 6 | * |
7 | * Written by Thayne Harbaugh | 7 | * Written by Thayne Harbaugh |
8 | * Based on work by Dan Hollis <goemon at anime dot net> and others. | 8 | * Based on work by Dan Hollis <goemon at anime dot net> and others. |
9 | * http://www.anime.net/~goemon/linux-ecc/ | 9 | * http://www.anime.net/~goemon/linux-ecc/ |
10 | * | 10 | * |
11 | * $Id: edac_amd76x.c,v 1.4.2.5 2005/10/05 00:43:44 dsp_llnl Exp $ | 11 | * $Id: edac_amd76x.c,v 1.4.2.5 2005/10/05 00:43:44 dsp_llnl Exp $ |
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/config.h> | 15 | #include <linux/config.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/pci.h> | 18 | #include <linux/pci.h> |
19 | #include <linux/pci_ids.h> | 19 | #include <linux/pci_ids.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include "edac_mc.h" | 21 | #include "edac_mc.h" |
22 | 22 | ||
23 | #define AMD76X_REVISION " Ver: 2.0.0 " __DATE__ | 23 | #define AMD76X_REVISION " Ver: 2.0.0 " __DATE__ |
24 | 24 | ||
25 | 25 | ||
26 | #define amd76x_printk(level, fmt, arg...) \ | 26 | #define amd76x_printk(level, fmt, arg...) \ |
27 | edac_printk(level, "amd76x", fmt, ##arg) | 27 | edac_printk(level, "amd76x", fmt, ##arg) |
28 | 28 | ||
29 | #define amd76x_mc_printk(mci, level, fmt, arg...) \ | 29 | #define amd76x_mc_printk(mci, level, fmt, arg...) \ |
30 | edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg) | 30 | edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg) |
31 | 31 | ||
32 | #define AMD76X_NR_CSROWS 8 | 32 | #define AMD76X_NR_CSROWS 8 |
33 | #define AMD76X_NR_CHANS 1 | 33 | #define AMD76X_NR_CHANS 1 |
34 | #define AMD76X_NR_DIMMS 4 | 34 | #define AMD76X_NR_DIMMS 4 |
35 | 35 | ||
36 | /* AMD 76x register addresses - device 0 function 0 - PCI bridge */ | 36 | /* AMD 76x register addresses - device 0 function 0 - PCI bridge */ |
37 | 37 | ||
38 | #define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b) | 38 | #define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b) |
39 | * | 39 | * |
40 | * 31:16 reserved | 40 | * 31:16 reserved |
41 | * 15:14 SERR enabled: x1=ue 1x=ce | 41 | * 15:14 SERR enabled: x1=ue 1x=ce |
42 | * 13 reserved | 42 | * 13 reserved |
43 | * 12 diag: disabled, enabled | 43 | * 12 diag: disabled, enabled |
44 | * 11:10 mode: dis, EC, ECC, ECC+scrub | 44 | * 11:10 mode: dis, EC, ECC, ECC+scrub |
45 | * 9:8 status: x1=ue 1x=ce | 45 | * 9:8 status: x1=ue 1x=ce |
46 | * 7:4 UE cs row | 46 | * 7:4 UE cs row |
47 | * 3:0 CE cs row | 47 | * 3:0 CE cs row |
48 | */ | 48 | */ |
49 | 49 | ||
50 | #define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b) | 50 | #define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b) |
51 | * | 51 | * |
52 | * 31:26 clock disable 5 - 0 | 52 | * 31:26 clock disable 5 - 0 |
53 | * 25 SDRAM init | 53 | * 25 SDRAM init |
54 | * 24 reserved | 54 | * 24 reserved |
55 | * 23 mode register service | 55 | * 23 mode register service |
56 | * 22:21 suspend to RAM | 56 | * 22:21 suspend to RAM |
57 | * 20 burst refresh enable | 57 | * 20 burst refresh enable |
58 | * 19 refresh disable | 58 | * 19 refresh disable |
59 | * 18 reserved | 59 | * 18 reserved |
60 | * 17:16 cycles-per-refresh | 60 | * 17:16 cycles-per-refresh |
61 | * 15:8 reserved | 61 | * 15:8 reserved |
62 | * 7:0 x4 mode enable 7 - 0 | 62 | * 7:0 x4 mode enable 7 - 0 |
63 | */ | 63 | */ |
64 | 64 | ||
65 | #define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b) | 65 | #define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b) |
66 | * | 66 | * |
67 | * 31:23 chip-select base | 67 | * 31:23 chip-select base |
68 | * 22:16 reserved | 68 | * 22:16 reserved |
69 | * 15:7 chip-select mask | 69 | * 15:7 chip-select mask |
70 | * 6:3 reserved | 70 | * 6:3 reserved |
71 | * 2:1 address mode | 71 | * 2:1 address mode |
72 | * 0 chip-select enable | 72 | * 0 chip-select enable |
73 | */ | 73 | */ |
74 | 74 | ||
75 | struct amd76x_error_info { | 75 | struct amd76x_error_info { |
76 | u32 ecc_mode_status; | 76 | u32 ecc_mode_status; |
77 | }; | 77 | }; |
78 | 78 | ||
79 | enum amd76x_chips { | 79 | enum amd76x_chips { |
80 | AMD761 = 0, | 80 | AMD761 = 0, |
81 | AMD762 | 81 | AMD762 |
82 | }; | 82 | }; |
83 | 83 | ||
84 | struct amd76x_dev_info { | 84 | struct amd76x_dev_info { |
85 | const char *ctl_name; | 85 | const char *ctl_name; |
86 | }; | 86 | }; |
87 | 87 | ||
88 | static const struct amd76x_dev_info amd76x_devs[] = { | 88 | static const struct amd76x_dev_info amd76x_devs[] = { |
89 | [AMD761] = { | 89 | [AMD761] = { |
90 | .ctl_name = "AMD761" | 90 | .ctl_name = "AMD761" |
91 | }, | 91 | }, |
92 | [AMD762] = { | 92 | [AMD762] = { |
93 | .ctl_name = "AMD762" | 93 | .ctl_name = "AMD762" |
94 | }, | 94 | }, |
95 | }; | 95 | }; |
96 | 96 | ||
97 | /** | 97 | /** |
98 | * amd76x_get_error_info - fetch error information | 98 | * amd76x_get_error_info - fetch error information |
99 | * @mci: Memory controller | 99 | * @mci: Memory controller |
100 | * @info: Info to fill in | 100 | * @info: Info to fill in |
101 | * | 101 | * |
102 | * Fetch and store the AMD76x ECC status. Clear pending status | 102 | * Fetch and store the AMD76x ECC status. Clear pending status |
103 | * on the chip so that further errors will be reported | 103 | * on the chip so that further errors will be reported |
104 | */ | 104 | */ |
105 | static void amd76x_get_error_info(struct mem_ctl_info *mci, | 105 | static void amd76x_get_error_info(struct mem_ctl_info *mci, |
106 | struct amd76x_error_info *info) | 106 | struct amd76x_error_info *info) |
107 | { | 107 | { |
108 | struct pci_dev *pdev; | 108 | struct pci_dev *pdev; |
109 | 109 | ||
110 | pdev = to_pci_dev(mci->dev); | 110 | pdev = to_pci_dev(mci->dev); |
111 | pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, | 111 | pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, |
112 | &info->ecc_mode_status); | 112 | &info->ecc_mode_status); |
113 | 113 | ||
114 | if (info->ecc_mode_status & BIT(8)) | 114 | if (info->ecc_mode_status & BIT(8)) |
115 | pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS, | 115 | pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS, |
116 | (u32) BIT(8), (u32) BIT(8)); | 116 | (u32) BIT(8), (u32) BIT(8)); |
117 | 117 | ||
118 | if (info->ecc_mode_status & BIT(9)) | 118 | if (info->ecc_mode_status & BIT(9)) |
119 | pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS, | 119 | pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS, |
120 | (u32) BIT(9), (u32) BIT(9)); | 120 | (u32) BIT(9), (u32) BIT(9)); |
121 | } | 121 | } |
122 | 122 | ||
123 | /** | 123 | /** |
124 | * amd76x_process_error_info - Error check | 124 | * amd76x_process_error_info - Error check |
125 | * @mci: Memory controller | 125 | * @mci: Memory controller |
126 | * @info: Previously fetched information from chip | 126 | * @info: Previously fetched information from chip |
127 | * @handle_errors: 1 if we should do recovery | 127 | * @handle_errors: 1 if we should do recovery |
128 | * | 128 | * |
129 | * Process the chip state and decide if an error has occurred. | 129 | * Process the chip state and decide if an error has occurred. |
130 | * A return of 1 indicates an error. Also if handle_errors is true | 130 | * A return of 1 indicates an error. Also if handle_errors is true |
131 | * then attempt to handle and clean up after the error | 131 | * then attempt to handle and clean up after the error |
132 | */ | 132 | */ |
133 | static int amd76x_process_error_info(struct mem_ctl_info *mci, | 133 | static int amd76x_process_error_info(struct mem_ctl_info *mci, |
134 | struct amd76x_error_info *info, int handle_errors) | 134 | struct amd76x_error_info *info, int handle_errors) |
135 | { | 135 | { |
136 | int error_found; | 136 | int error_found; |
137 | u32 row; | 137 | u32 row; |
138 | 138 | ||
139 | error_found = 0; | 139 | error_found = 0; |
140 | 140 | ||
141 | /* | 141 | /* |
142 | * Check for an uncorrectable error | 142 | * Check for an uncorrectable error |
143 | */ | 143 | */ |
144 | if (info->ecc_mode_status & BIT(8)) { | 144 | if (info->ecc_mode_status & BIT(8)) { |
145 | error_found = 1; | 145 | error_found = 1; |
146 | 146 | ||
147 | if (handle_errors) { | 147 | if (handle_errors) { |
148 | row = (info->ecc_mode_status >> 4) & 0xf; | 148 | row = (info->ecc_mode_status >> 4) & 0xf; |
149 | edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0, | 149 | edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0, |
150 | row, mci->ctl_name); | 150 | row, mci->ctl_name); |
151 | } | 151 | } |
152 | } | 152 | } |
153 | 153 | ||
154 | /* | 154 | /* |
155 | * Check for a correctable error | 155 | * Check for a correctable error |
156 | */ | 156 | */ |
157 | if (info->ecc_mode_status & BIT(9)) { | 157 | if (info->ecc_mode_status & BIT(9)) { |
158 | error_found = 1; | 158 | error_found = 1; |
159 | 159 | ||
160 | if (handle_errors) { | 160 | if (handle_errors) { |
161 | row = info->ecc_mode_status & 0xf; | 161 | row = info->ecc_mode_status & 0xf; |
162 | edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0, | 162 | edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0, |
163 | 0, row, 0, mci->ctl_name); | 163 | 0, row, 0, mci->ctl_name); |
164 | } | 164 | } |
165 | } | 165 | } |
166 | 166 | ||
167 | return error_found; | 167 | return error_found; |
168 | } | 168 | } |
169 | 169 | ||
170 | /** | 170 | /** |
171 | * amd76x_check - Poll the controller | 171 | * amd76x_check - Poll the controller |
172 | * @mci: Memory controller | 172 | * @mci: Memory controller |
173 | * | 173 | * |
174 | * Called by the poll handlers this function reads the status | 174 | * Called by the poll handlers this function reads the status |
175 | * from the controller and checks for errors. | 175 | * from the controller and checks for errors. |
176 | */ | 176 | */ |
177 | static void amd76x_check(struct mem_ctl_info *mci) | 177 | static void amd76x_check(struct mem_ctl_info *mci) |
178 | { | 178 | { |
179 | struct amd76x_error_info info; | 179 | struct amd76x_error_info info; |
180 | debugf3("%s()\n", __func__); | 180 | debugf3("%s()\n", __func__); |
181 | amd76x_get_error_info(mci, &info); | 181 | amd76x_get_error_info(mci, &info); |
182 | amd76x_process_error_info(mci, &info, 1); | 182 | amd76x_process_error_info(mci, &info, 1); |
183 | } | 183 | } |
184 | 184 | ||
185 | static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | ||
186 | enum edac_type edac_mode) | ||
187 | { | ||
188 | struct csrow_info *csrow; | ||
189 | u32 mba, mba_base, mba_mask, dms; | ||
190 | int index; | ||
191 | |||
192 | for (index = 0; index < mci->nr_csrows; index++) { | ||
193 | csrow = &mci->csrows[index]; | ||
194 | |||
195 | /* find the DRAM Chip Select Base address and mask */ | ||
196 | pci_read_config_dword(pdev, | ||
197 | AMD76X_MEM_BASE_ADDR + (index * 4), | ||
198 | &mba); | ||
199 | |||
200 | if (!(mba & BIT(0))) | ||
201 | continue; | ||
202 | |||
203 | mba_base = mba & 0xff800000UL; | ||
204 | mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL; | ||
205 | pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms); | ||
206 | csrow->first_page = mba_base >> PAGE_SHIFT; | ||
207 | csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT; | ||
208 | csrow->last_page = csrow->first_page + csrow->nr_pages - 1; | ||
209 | csrow->page_mask = mba_mask >> PAGE_SHIFT; | ||
210 | csrow->grain = csrow->nr_pages << PAGE_SHIFT; | ||
211 | csrow->mtype = MEM_RDDR; | ||
212 | csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN; | ||
213 | csrow->edac_mode = edac_mode; | ||
214 | } | ||
215 | } | ||
216 | |||
185 | /** | 217 | /** |
186 | * amd76x_probe1 - Perform set up for detected device | 218 | * amd76x_probe1 - Perform set up for detected device |
187 | * @pdev; PCI device detected | 219 | * @pdev; PCI device detected |
188 | * @dev_idx: Device type index | 220 | * @dev_idx: Device type index |
189 | * | 221 | * |
190 | * We have found an AMD76x and now need to set up the memory | 222 | * We have found an AMD76x and now need to set up the memory |
191 | * controller status reporting. We configure and set up the | 223 | * controller status reporting. We configure and set up the |
192 | * memory controller reporting and claim the device. | 224 | * memory controller reporting and claim the device. |
193 | */ | 225 | */ |
194 | static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) | 226 | static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) |
195 | { | 227 | { |
196 | int rc = -ENODEV; | 228 | static const enum edac_type ems_modes[] = { |
197 | int index; | ||
198 | struct mem_ctl_info *mci = NULL; | ||
199 | enum edac_type ems_modes[] = { | ||
200 | EDAC_NONE, | 229 | EDAC_NONE, |
201 | EDAC_EC, | 230 | EDAC_EC, |
202 | EDAC_SECDED, | 231 | EDAC_SECDED, |
203 | EDAC_SECDED | 232 | EDAC_SECDED |
204 | }; | 233 | }; |
234 | struct mem_ctl_info *mci = NULL; | ||
205 | u32 ems; | 235 | u32 ems; |
206 | u32 ems_mode; | 236 | u32 ems_mode; |
207 | struct amd76x_error_info discard; | 237 | struct amd76x_error_info discard; |
208 | 238 | ||
209 | debugf0("%s()\n", __func__); | 239 | debugf0("%s()\n", __func__); |
210 | pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems); | 240 | pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems); |
211 | ems_mode = (ems >> 10) & 0x3; | 241 | ems_mode = (ems >> 10) & 0x3; |
212 | mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS); | 242 | mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS); |
213 | 243 | ||
214 | if (mci == NULL) { | 244 | if (mci == NULL) { |
215 | rc = -ENOMEM; | 245 | return -ENOMEM; |
216 | goto fail; | ||
217 | } | 246 | } |
218 | 247 | ||
219 | debugf0("%s(): mci = %p\n", __func__, mci); | 248 | debugf0("%s(): mci = %p\n", __func__, mci); |
220 | mci->dev = &pdev->dev; | 249 | mci->dev = &pdev->dev; |
221 | mci->mtype_cap = MEM_FLAG_RDDR; | 250 | mci->mtype_cap = MEM_FLAG_RDDR; |
222 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; | 251 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; |
223 | mci->edac_cap = ems_mode ? | 252 | mci->edac_cap = ems_mode ? |
224 | (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE; | 253 | (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE; |
225 | mci->mod_name = EDAC_MOD_STR; | 254 | mci->mod_name = EDAC_MOD_STR; |
226 | mci->mod_ver = AMD76X_REVISION; | 255 | mci->mod_ver = AMD76X_REVISION; |
227 | mci->ctl_name = amd76x_devs[dev_idx].ctl_name; | 256 | mci->ctl_name = amd76x_devs[dev_idx].ctl_name; |
228 | mci->edac_check = amd76x_check; | 257 | mci->edac_check = amd76x_check; |
229 | mci->ctl_page_to_phys = NULL; | 258 | mci->ctl_page_to_phys = NULL; |
230 | 259 | ||
231 | for (index = 0; index < mci->nr_csrows; index++) { | 260 | amd76x_init_csrows(mci, pdev, ems_modes[ems_mode]); |
232 | struct csrow_info *csrow = &mci->csrows[index]; | ||
233 | u32 mba; | ||
234 | u32 mba_base; | ||
235 | u32 mba_mask; | ||
236 | u32 dms; | ||
237 | |||
238 | /* find the DRAM Chip Select Base address and mask */ | ||
239 | pci_read_config_dword(pdev, | ||
240 | AMD76X_MEM_BASE_ADDR + (index * 4), &mba); | ||
241 | |||
242 | if (!(mba & BIT(0))) | ||
243 | continue; | ||
244 | |||
245 | mba_base = mba & 0xff800000UL; | ||
246 | mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL; | ||
247 | pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms); | ||
248 | csrow->first_page = mba_base >> PAGE_SHIFT; | ||
249 | csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT; | ||
250 | csrow->last_page = csrow->first_page + csrow->nr_pages - 1; | ||
251 | csrow->page_mask = mba_mask >> PAGE_SHIFT; | ||
252 | csrow->grain = csrow->nr_pages << PAGE_SHIFT; | ||
253 | csrow->mtype = MEM_RDDR; | ||
254 | csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN; | ||
255 | csrow->edac_mode = ems_modes[ems_mode]; | ||
256 | } | ||
257 | |||
258 | amd76x_get_error_info(mci, &discard); /* clear counters */ | 261 | amd76x_get_error_info(mci, &discard); /* clear counters */ |
259 | 262 | ||
260 | /* Here we assume that we will never see multiple instances of this | 263 | /* Here we assume that we will never see multiple instances of this |
261 | * type of memory controller. The ID is therefore hardcoded to 0. | 264 | * type of memory controller. The ID is therefore hardcoded to 0. |
262 | */ | 265 | */ |
263 | if (edac_mc_add_mc(mci,0)) { | 266 | if (edac_mc_add_mc(mci,0)) { |
264 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); | 267 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
265 | goto fail; | 268 | goto fail; |
266 | } | 269 | } |
267 | 270 | ||
268 | /* get this far and it's successful */ | 271 | /* get this far and it's successful */ |
269 | debugf3("%s(): success\n", __func__); | 272 | debugf3("%s(): success\n", __func__); |
270 | return 0; | 273 | return 0; |
271 | 274 | ||
272 | fail: | 275 | fail: |
273 | if (mci != NULL) | 276 | edac_mc_free(mci); |
274 | edac_mc_free(mci); | 277 | return -ENODEV; |
275 | return rc; | ||
276 | } | 278 | } |
277 | 279 | ||
278 | /* returns count (>= 0), or negative on error */ | 280 | /* returns count (>= 0), or negative on error */ |
279 | static int __devinit amd76x_init_one(struct pci_dev *pdev, | 281 | static int __devinit amd76x_init_one(struct pci_dev *pdev, |
280 | const struct pci_device_id *ent) | 282 | const struct pci_device_id *ent) |
281 | { | 283 | { |
282 | debugf0("%s()\n", __func__); | 284 | debugf0("%s()\n", __func__); |
283 | 285 | ||
284 | /* don't need to call pci_device_enable() */ | 286 | /* don't need to call pci_device_enable() */ |
285 | return amd76x_probe1(pdev, ent->driver_data); | 287 | return amd76x_probe1(pdev, ent->driver_data); |
286 | } | 288 | } |
287 | 289 | ||
288 | /** | 290 | /** |
289 | * amd76x_remove_one - driver shutdown | 291 | * amd76x_remove_one - driver shutdown |
290 | * @pdev: PCI device being handed back | 292 | * @pdev: PCI device being handed back |
291 | * | 293 | * |
292 | * Called when the driver is unloaded. Find the matching mci | 294 | * Called when the driver is unloaded. Find the matching mci |
293 | * structure for the device then delete the mci and free the | 295 | * structure for the device then delete the mci and free the |
294 | * resources. | 296 | * resources. |
295 | */ | 297 | */ |
296 | static void __devexit amd76x_remove_one(struct pci_dev *pdev) | 298 | static void __devexit amd76x_remove_one(struct pci_dev *pdev) |
297 | { | 299 | { |
298 | struct mem_ctl_info *mci; | 300 | struct mem_ctl_info *mci; |
299 | 301 | ||
300 | debugf0("%s()\n", __func__); | 302 | debugf0("%s()\n", __func__); |
301 | 303 | ||
302 | if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) | 304 | if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) |
303 | return; | 305 | return; |
304 | 306 | ||
305 | edac_mc_free(mci); | 307 | edac_mc_free(mci); |
306 | } | 308 | } |
307 | 309 | ||
308 | static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = { | 310 | static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = { |
309 | { | 311 | { |
310 | PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 312 | PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
311 | AMD762 | 313 | AMD762 |
312 | }, | 314 | }, |
313 | { | 315 | { |
314 | PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 316 | PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
315 | AMD761 | 317 | AMD761 |
316 | }, | 318 | }, |
drivers/edac/e752x_edac.c
1 | /* | 1 | /* |
2 | * Intel e752x Memory Controller kernel module | 2 | * Intel e752x Memory Controller kernel module |
3 | * (C) 2004 Linux Networx (http://lnxi.com) | 3 | * (C) 2004 Linux Networx (http://lnxi.com) |
4 | * This file may be distributed under the terms of the | 4 | * This file may be distributed under the terms of the |
5 | * GNU General Public License. | 5 | * GNU General Public License. |
6 | * | 6 | * |
7 | * See "enum e752x_chips" below for supported chipsets | 7 | * See "enum e752x_chips" below for supported chipsets |
8 | * | 8 | * |
9 | * Written by Tom Zimmerman | 9 | * Written by Tom Zimmerman |
10 | * | 10 | * |
11 | * Contributors: | 11 | * Contributors: |
12 | * Thayne Harbaugh at realmsys.com (?) | 12 | * Thayne Harbaugh at realmsys.com (?) |
13 | * Wang Zhenyu at intel.com | 13 | * Wang Zhenyu at intel.com |
14 | * Dave Jiang at mvista.com | 14 | * Dave Jiang at mvista.com |
15 | * | 15 | * |
16 | * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $ | 16 | * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $ |
17 | * | 17 | * |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/config.h> | 20 | #include <linux/config.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
24 | #include <linux/pci_ids.h> | 24 | #include <linux/pci_ids.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include "edac_mc.h" | 26 | #include "edac_mc.h" |
27 | 27 | ||
28 | #define E752X_REVISION " Ver: 2.0.0 " __DATE__ | 28 | #define E752X_REVISION " Ver: 2.0.0 " __DATE__ |
29 | 29 | ||
30 | static int force_function_unhide; | 30 | static int force_function_unhide; |
31 | 31 | ||
32 | #define e752x_printk(level, fmt, arg...) \ | 32 | #define e752x_printk(level, fmt, arg...) \ |
33 | edac_printk(level, "e752x", fmt, ##arg) | 33 | edac_printk(level, "e752x", fmt, ##arg) |
34 | 34 | ||
35 | #define e752x_mc_printk(mci, level, fmt, arg...) \ | 35 | #define e752x_mc_printk(mci, level, fmt, arg...) \ |
36 | edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg) | 36 | edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg) |
37 | 37 | ||
38 | #ifndef PCI_DEVICE_ID_INTEL_7520_0 | 38 | #ifndef PCI_DEVICE_ID_INTEL_7520_0 |
39 | #define PCI_DEVICE_ID_INTEL_7520_0 0x3590 | 39 | #define PCI_DEVICE_ID_INTEL_7520_0 0x3590 |
40 | #endif /* PCI_DEVICE_ID_INTEL_7520_0 */ | 40 | #endif /* PCI_DEVICE_ID_INTEL_7520_0 */ |
41 | 41 | ||
42 | #ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR | 42 | #ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR |
43 | #define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591 | 43 | #define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591 |
44 | #endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */ | 44 | #endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */ |
45 | 45 | ||
46 | #ifndef PCI_DEVICE_ID_INTEL_7525_0 | 46 | #ifndef PCI_DEVICE_ID_INTEL_7525_0 |
47 | #define PCI_DEVICE_ID_INTEL_7525_0 0x359E | 47 | #define PCI_DEVICE_ID_INTEL_7525_0 0x359E |
48 | #endif /* PCI_DEVICE_ID_INTEL_7525_0 */ | 48 | #endif /* PCI_DEVICE_ID_INTEL_7525_0 */ |
49 | 49 | ||
50 | #ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR | 50 | #ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR |
51 | #define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593 | 51 | #define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593 |
52 | #endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */ | 52 | #endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */ |
53 | 53 | ||
54 | #ifndef PCI_DEVICE_ID_INTEL_7320_0 | 54 | #ifndef PCI_DEVICE_ID_INTEL_7320_0 |
55 | #define PCI_DEVICE_ID_INTEL_7320_0 0x3592 | 55 | #define PCI_DEVICE_ID_INTEL_7320_0 0x3592 |
56 | #endif /* PCI_DEVICE_ID_INTEL_7320_0 */ | 56 | #endif /* PCI_DEVICE_ID_INTEL_7320_0 */ |
57 | 57 | ||
58 | #ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR | 58 | #ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR |
59 | #define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593 | 59 | #define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593 |
60 | #endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */ | 60 | #endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */ |
61 | 61 | ||
62 | #define E752X_NR_CSROWS 8 /* number of csrows */ | 62 | #define E752X_NR_CSROWS 8 /* number of csrows */ |
63 | 63 | ||
64 | /* E752X register addresses - device 0 function 0 */ | 64 | /* E752X register addresses - device 0 function 0 */ |
65 | #define E752X_DRB 0x60 /* DRAM row boundary register (8b) */ | 65 | #define E752X_DRB 0x60 /* DRAM row boundary register (8b) */ |
66 | #define E752X_DRA 0x70 /* DRAM row attribute register (8b) */ | 66 | #define E752X_DRA 0x70 /* DRAM row attribute register (8b) */ |
67 | /* | 67 | /* |
68 | * 31:30 Device width row 7 | 68 | * 31:30 Device width row 7 |
69 | * 01=x8 10=x4 11=x8 DDR2 | 69 | * 01=x8 10=x4 11=x8 DDR2 |
70 | * 27:26 Device width row 6 | 70 | * 27:26 Device width row 6 |
71 | * 23:22 Device width row 5 | 71 | * 23:22 Device width row 5 |
72 | * 19:20 Device width row 4 | 72 | * 19:20 Device width row 4 |
73 | * 15:14 Device width row 3 | 73 | * 15:14 Device width row 3 |
74 | * 11:10 Device width row 2 | 74 | * 11:10 Device width row 2 |
75 | * 7:6 Device width row 1 | 75 | * 7:6 Device width row 1 |
76 | * 3:2 Device width row 0 | 76 | * 3:2 Device width row 0 |
77 | */ | 77 | */ |
78 | #define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */ | 78 | #define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */ |
79 | /* FIXME:IS THIS RIGHT? */ | 79 | /* FIXME:IS THIS RIGHT? */ |
80 | /* | 80 | /* |
81 | * 22 Number channels 0=1,1=2 | 81 | * 22 Number channels 0=1,1=2 |
82 | * 19:18 DRB Granularity 32/64MB | 82 | * 19:18 DRB Granularity 32/64MB |
83 | */ | 83 | */ |
84 | #define E752X_DRM 0x80 /* Dimm mapping register */ | 84 | #define E752X_DRM 0x80 /* Dimm mapping register */ |
85 | #define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */ | 85 | #define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */ |
86 | /* | 86 | /* |
87 | * 14:12 1 single A, 2 single B, 3 dual | 87 | * 14:12 1 single A, 2 single B, 3 dual |
88 | */ | 88 | */ |
89 | #define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */ | 89 | #define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */ |
90 | #define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */ | 90 | #define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */ |
91 | #define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */ | 91 | #define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */ |
92 | #define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */ | 92 | #define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */ |
93 | 93 | ||
94 | /* E752X register addresses - device 0 function 1 */ | 94 | /* E752X register addresses - device 0 function 1 */ |
95 | #define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */ | 95 | #define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */ |
96 | #define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */ | 96 | #define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */ |
97 | #define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */ | 97 | #define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */ |
98 | #define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */ | 98 | #define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */ |
99 | #define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */ | 99 | #define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */ |
100 | #define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */ | 100 | #define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */ |
101 | #define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */ | 101 | #define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */ |
102 | #define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */ | 102 | #define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */ |
103 | #define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */ | 103 | #define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */ |
104 | #define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */ | 104 | #define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */ |
105 | #define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */ | 105 | #define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */ |
106 | #define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */ | 106 | #define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */ |
107 | #define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */ | 107 | #define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */ |
108 | #define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI command reg (8b) */ | 108 | #define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI command reg (8b) */ |
109 | #define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */ | 109 | #define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */ |
110 | #define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */ | 110 | #define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */ |
111 | #define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */ | 111 | #define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */ |
112 | #define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */ | 112 | #define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */ |
113 | #define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */ | 113 | #define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */ |
114 | #define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */ | 114 | #define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */ |
115 | /* error address register (32b) */ | 115 | /* error address register (32b) */ |
116 | /* | 116 | /* |
117 | * 31 Reserved | 117 | * 31 Reserved |
118 | * 30:2 CE address (64 byte block 34:6) | 118 | * 30:2 CE address (64 byte block 34:6) |
119 | * 1 Reserved | 119 | * 1 Reserved |
120 | * 0 HiLoCS | 120 | * 0 HiLoCS |
121 | */ | 121 | */ |
122 | #define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */ | 122 | #define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */ |
123 | /* error address register (32b) */ | 123 | /* error address register (32b) */ |
124 | /* | 124 | /* |
125 | * 31 Reserved | 125 | * 31 Reserved |
126 | * 30:2 CE address (64 byte block 34:6) | 126 | * 30:2 CE address (64 byte block 34:6) |
127 | * 1 Reserved | 127 | * 1 Reserved |
128 | * 0 HiLoCS | 128 | * 0 HiLoCS |
129 | */ | 129 | */ |
130 | #define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */ | 130 | #define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */ |
131 | /* error address register (32b) */ | 131 | /* error address register (32b) */ |
132 | /* | 132 | /* |
133 | * 31 Reserved | 133 | * 31 Reserved |
134 | * 30:2 CE address (64 byte block 34:6) | 134 | * 30:2 CE address (64 byte block 34:6) |
135 | * 1 Reserved | 135 | * 1 Reserved |
136 | * 0 HiLoCS | 136 | * 0 HiLoCS |
137 | */ | 137 | */ |
138 | #define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM first uncorrectable scrub memory */ | 138 | #define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM first uncorrectable scrub memory */ |
139 | /* error address register (32b) */ | 139 | /* error address register (32b) */ |
140 | /* | 140 | /* |
141 | * 31 Reserved | 141 | * 31 Reserved |
142 | * 30:2 CE address (64 byte block 34:6) | 142 | * 30:2 CE address (64 byte block 34:6) |
143 | * 1 Reserved | 143 | * 1 Reserved |
144 | * 0 HiLoCS | 144 | * 0 HiLoCS |
145 | */ | 145 | */ |
146 | #define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */ | 146 | #define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */ |
147 | /* error syndrome register (16b) */ | 147 | /* error syndrome register (16b) */ |
148 | #define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */ | 148 | #define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */ |
149 | /* error syndrome register (16b) */ | 149 | /* error syndrome register (16b) */ |
150 | #define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */ | 150 | #define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */ |
151 | 151 | ||
152 | /* ICH5R register addresses - device 30 function 0 */ | 152 | /* ICH5R register addresses - device 30 function 0 */ |
153 | #define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */ | 153 | #define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */ |
154 | #define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */ | 154 | #define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */ |
155 | #define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */ | 155 | #define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */ |
156 | 156 | ||
157 | enum e752x_chips { | 157 | enum e752x_chips { |
158 | E7520 = 0, | 158 | E7520 = 0, |
159 | E7525 = 1, | 159 | E7525 = 1, |
160 | E7320 = 2 | 160 | E7320 = 2 |
161 | }; | 161 | }; |
162 | 162 | ||
163 | struct e752x_pvt { | 163 | struct e752x_pvt { |
164 | struct pci_dev *bridge_ck; | 164 | struct pci_dev *bridge_ck; |
165 | struct pci_dev *dev_d0f0; | 165 | struct pci_dev *dev_d0f0; |
166 | struct pci_dev *dev_d0f1; | 166 | struct pci_dev *dev_d0f1; |
167 | u32 tolm; | 167 | u32 tolm; |
168 | u32 remapbase; | 168 | u32 remapbase; |
169 | u32 remaplimit; | 169 | u32 remaplimit; |
170 | int mc_symmetric; | 170 | int mc_symmetric; |
171 | u8 map[8]; | 171 | u8 map[8]; |
172 | int map_type; | 172 | int map_type; |
173 | const struct e752x_dev_info *dev_info; | 173 | const struct e752x_dev_info *dev_info; |
174 | }; | 174 | }; |
175 | 175 | ||
176 | struct e752x_dev_info { | 176 | struct e752x_dev_info { |
177 | u16 err_dev; | 177 | u16 err_dev; |
178 | u16 ctl_dev; | 178 | u16 ctl_dev; |
179 | const char *ctl_name; | 179 | const char *ctl_name; |
180 | }; | 180 | }; |
181 | 181 | ||
182 | struct e752x_error_info { | 182 | struct e752x_error_info { |
183 | u32 ferr_global; | 183 | u32 ferr_global; |
184 | u32 nerr_global; | 184 | u32 nerr_global; |
185 | u8 hi_ferr; | 185 | u8 hi_ferr; |
186 | u8 hi_nerr; | 186 | u8 hi_nerr; |
187 | u16 sysbus_ferr; | 187 | u16 sysbus_ferr; |
188 | u16 sysbus_nerr; | 188 | u16 sysbus_nerr; |
189 | u8 buf_ferr; | 189 | u8 buf_ferr; |
190 | u8 buf_nerr; | 190 | u8 buf_nerr; |
191 | u16 dram_ferr; | 191 | u16 dram_ferr; |
192 | u16 dram_nerr; | 192 | u16 dram_nerr; |
193 | u32 dram_sec1_add; | 193 | u32 dram_sec1_add; |
194 | u32 dram_sec2_add; | 194 | u32 dram_sec2_add; |
195 | u16 dram_sec1_syndrome; | 195 | u16 dram_sec1_syndrome; |
196 | u16 dram_sec2_syndrome; | 196 | u16 dram_sec2_syndrome; |
197 | u32 dram_ded_add; | 197 | u32 dram_ded_add; |
198 | u32 dram_scrb_add; | 198 | u32 dram_scrb_add; |
199 | u32 dram_retr_add; | 199 | u32 dram_retr_add; |
200 | }; | 200 | }; |
201 | 201 | ||
202 | static const struct e752x_dev_info e752x_devs[] = { | 202 | static const struct e752x_dev_info e752x_devs[] = { |
203 | [E7520] = { | 203 | [E7520] = { |
204 | .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR, | 204 | .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR, |
205 | .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0, | 205 | .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0, |
206 | .ctl_name = "E7520" | 206 | .ctl_name = "E7520" |
207 | }, | 207 | }, |
208 | [E7525] = { | 208 | [E7525] = { |
209 | .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR, | 209 | .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR, |
210 | .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0, | 210 | .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0, |
211 | .ctl_name = "E7525" | 211 | .ctl_name = "E7525" |
212 | }, | 212 | }, |
213 | [E7320] = { | 213 | [E7320] = { |
214 | .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR, | 214 | .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR, |
215 | .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0, | 215 | .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0, |
216 | .ctl_name = "E7320" | 216 | .ctl_name = "E7320" |
217 | }, | 217 | }, |
218 | }; | 218 | }; |
219 | 219 | ||
220 | static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, | 220 | static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, |
221 | unsigned long page) | 221 | unsigned long page) |
222 | { | 222 | { |
223 | u32 remap; | 223 | u32 remap; |
224 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; | 224 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; |
225 | 225 | ||
226 | debugf3("%s()\n", __func__); | 226 | debugf3("%s()\n", __func__); |
227 | 227 | ||
228 | if (page < pvt->tolm) | 228 | if (page < pvt->tolm) |
229 | return page; | 229 | return page; |
230 | 230 | ||
231 | if ((page >= 0x100000) && (page < pvt->remapbase)) | 231 | if ((page >= 0x100000) && (page < pvt->remapbase)) |
232 | return page; | 232 | return page; |
233 | 233 | ||
234 | remap = (page - pvt->tolm) + pvt->remapbase; | 234 | remap = (page - pvt->tolm) + pvt->remapbase; |
235 | 235 | ||
236 | if (remap < pvt->remaplimit) | 236 | if (remap < pvt->remaplimit) |
237 | return remap; | 237 | return remap; |
238 | 238 | ||
239 | e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page); | 239 | e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page); |
240 | return pvt->tolm - 1; | 240 | return pvt->tolm - 1; |
241 | } | 241 | } |
242 | 242 | ||
243 | static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, | 243 | static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, |
244 | u32 sec1_add, u16 sec1_syndrome) | 244 | u32 sec1_add, u16 sec1_syndrome) |
245 | { | 245 | { |
246 | u32 page; | 246 | u32 page; |
247 | int row; | 247 | int row; |
248 | int channel; | 248 | int channel; |
249 | int i; | 249 | int i; |
250 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; | 250 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; |
251 | 251 | ||
252 | debugf3("%s()\n", __func__); | 252 | debugf3("%s()\n", __func__); |
253 | 253 | ||
254 | /* convert the addr to 4k page */ | 254 | /* convert the addr to 4k page */ |
255 | page = sec1_add >> (PAGE_SHIFT - 4); | 255 | page = sec1_add >> (PAGE_SHIFT - 4); |
256 | 256 | ||
257 | /* FIXME - check for -1 */ | 257 | /* FIXME - check for -1 */ |
258 | if (pvt->mc_symmetric) { | 258 | if (pvt->mc_symmetric) { |
259 | /* chip select are bits 14 & 13 */ | 259 | /* chip select are bits 14 & 13 */ |
260 | row = ((page >> 1) & 3); | 260 | row = ((page >> 1) & 3); |
261 | e752x_printk(KERN_WARNING, | 261 | e752x_printk(KERN_WARNING, |
262 | "Test row %d Table %d %d %d %d %d %d %d %d\n", row, | 262 | "Test row %d Table %d %d %d %d %d %d %d %d\n", row, |
263 | pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3], | 263 | pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3], |
264 | pvt->map[4], pvt->map[5], pvt->map[6], pvt->map[7]); | 264 | pvt->map[4], pvt->map[5], pvt->map[6], pvt->map[7]); |
265 | 265 | ||
266 | /* test for channel remapping */ | 266 | /* test for channel remapping */ |
267 | for (i = 0; i < 8; i++) { | 267 | for (i = 0; i < 8; i++) { |
268 | if (pvt->map[i] == row) | 268 | if (pvt->map[i] == row) |
269 | break; | 269 | break; |
270 | } | 270 | } |
271 | 271 | ||
272 | e752x_printk(KERN_WARNING, "Test computed row %d\n", i); | 272 | e752x_printk(KERN_WARNING, "Test computed row %d\n", i); |
273 | 273 | ||
274 | if (i < 8) | 274 | if (i < 8) |
275 | row = i; | 275 | row = i; |
276 | else | 276 | else |
277 | e752x_mc_printk(mci, KERN_WARNING, | 277 | e752x_mc_printk(mci, KERN_WARNING, |
278 | "row %d not found in remap table\n", row); | 278 | "row %d not found in remap table\n", row); |
279 | } else | 279 | } else |
280 | row = edac_mc_find_csrow_by_page(mci, page); | 280 | row = edac_mc_find_csrow_by_page(mci, page); |
281 | 281 | ||
282 | /* 0 = channel A, 1 = channel B */ | 282 | /* 0 = channel A, 1 = channel B */ |
283 | channel = !(error_one & 1); | 283 | channel = !(error_one & 1); |
284 | 284 | ||
285 | if (!pvt->map_type) | 285 | if (!pvt->map_type) |
286 | row = 7 - row; | 286 | row = 7 - row; |
287 | 287 | ||
288 | edac_mc_handle_ce(mci, page, 0, sec1_syndrome, row, channel, | 288 | edac_mc_handle_ce(mci, page, 0, sec1_syndrome, row, channel, |
289 | "e752x CE"); | 289 | "e752x CE"); |
290 | } | 290 | } |
291 | 291 | ||
292 | static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, | 292 | static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, |
293 | u32 sec1_add, u16 sec1_syndrome, int *error_found, | 293 | u32 sec1_add, u16 sec1_syndrome, int *error_found, |
294 | int handle_error) | 294 | int handle_error) |
295 | { | 295 | { |
296 | *error_found = 1; | 296 | *error_found = 1; |
297 | 297 | ||
298 | if (handle_error) | 298 | if (handle_error) |
299 | do_process_ce(mci, error_one, sec1_add, sec1_syndrome); | 299 | do_process_ce(mci, error_one, sec1_add, sec1_syndrome); |
300 | } | 300 | } |
301 | 301 | ||
302 | static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, | 302 | static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, |
303 | u32 ded_add, u32 scrb_add) | 303 | u32 ded_add, u32 scrb_add) |
304 | { | 304 | { |
305 | u32 error_2b, block_page; | 305 | u32 error_2b, block_page; |
306 | int row; | 306 | int row; |
307 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; | 307 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; |
308 | 308 | ||
309 | debugf3("%s()\n", __func__); | 309 | debugf3("%s()\n", __func__); |
310 | 310 | ||
311 | if (error_one & 0x0202) { | 311 | if (error_one & 0x0202) { |
312 | error_2b = ded_add; | 312 | error_2b = ded_add; |
313 | 313 | ||
314 | /* convert to 4k address */ | 314 | /* convert to 4k address */ |
315 | block_page = error_2b >> (PAGE_SHIFT - 4); | 315 | block_page = error_2b >> (PAGE_SHIFT - 4); |
316 | 316 | ||
317 | row = pvt->mc_symmetric ? | 317 | row = pvt->mc_symmetric ? |
318 | /* chip select are bits 14 & 13 */ | 318 | /* chip select are bits 14 & 13 */ |
319 | ((block_page >> 1) & 3) : | 319 | ((block_page >> 1) & 3) : |
320 | edac_mc_find_csrow_by_page(mci, block_page); | 320 | edac_mc_find_csrow_by_page(mci, block_page); |
321 | 321 | ||
322 | edac_mc_handle_ue(mci, block_page, 0, row, | 322 | edac_mc_handle_ue(mci, block_page, 0, row, |
323 | "e752x UE from Read"); | 323 | "e752x UE from Read"); |
324 | } | 324 | } |
325 | if (error_one & 0x0404) { | 325 | if (error_one & 0x0404) { |
326 | error_2b = scrb_add; | 326 | error_2b = scrb_add; |
327 | 327 | ||
328 | /* convert to 4k address */ | 328 | /* convert to 4k address */ |
329 | block_page = error_2b >> (PAGE_SHIFT - 4); | 329 | block_page = error_2b >> (PAGE_SHIFT - 4); |
330 | 330 | ||
331 | row = pvt->mc_symmetric ? | 331 | row = pvt->mc_symmetric ? |
332 | /* chip select are bits 14 & 13 */ | 332 | /* chip select are bits 14 & 13 */ |
333 | ((block_page >> 1) & 3) : | 333 | ((block_page >> 1) & 3) : |
334 | edac_mc_find_csrow_by_page(mci, block_page); | 334 | edac_mc_find_csrow_by_page(mci, block_page); |
335 | 335 | ||
336 | edac_mc_handle_ue(mci, block_page, 0, row, | 336 | edac_mc_handle_ue(mci, block_page, 0, row, |
337 | "e752x UE from Scruber"); | 337 | "e752x UE from Scruber"); |
338 | } | 338 | } |
339 | } | 339 | } |
340 | 340 | ||
341 | static inline void process_ue(struct mem_ctl_info *mci, u16 error_one, | 341 | static inline void process_ue(struct mem_ctl_info *mci, u16 error_one, |
342 | u32 ded_add, u32 scrb_add, int *error_found, int handle_error) | 342 | u32 ded_add, u32 scrb_add, int *error_found, int handle_error) |
343 | { | 343 | { |
344 | *error_found = 1; | 344 | *error_found = 1; |
345 | 345 | ||
346 | if (handle_error) | 346 | if (handle_error) |
347 | do_process_ue(mci, error_one, ded_add, scrb_add); | 347 | do_process_ue(mci, error_one, ded_add, scrb_add); |
348 | } | 348 | } |
349 | 349 | ||
350 | static inline void process_ue_no_info_wr(struct mem_ctl_info *mci, | 350 | static inline void process_ue_no_info_wr(struct mem_ctl_info *mci, |
351 | int *error_found, int handle_error) | 351 | int *error_found, int handle_error) |
352 | { | 352 | { |
353 | *error_found = 1; | 353 | *error_found = 1; |
354 | 354 | ||
355 | if (!handle_error) | 355 | if (!handle_error) |
356 | return; | 356 | return; |
357 | 357 | ||
358 | debugf3("%s()\n", __func__); | 358 | debugf3("%s()\n", __func__); |
359 | edac_mc_handle_ue_no_info(mci, "e752x UE log memory write"); | 359 | edac_mc_handle_ue_no_info(mci, "e752x UE log memory write"); |
360 | } | 360 | } |
361 | 361 | ||
362 | static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error, | 362 | static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error, |
363 | u32 retry_add) | 363 | u32 retry_add) |
364 | { | 364 | { |
365 | u32 error_1b, page; | 365 | u32 error_1b, page; |
366 | int row; | 366 | int row; |
367 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; | 367 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; |
368 | 368 | ||
369 | error_1b = retry_add; | 369 | error_1b = retry_add; |
370 | page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */ | 370 | page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */ |
371 | row = pvt->mc_symmetric ? | 371 | row = pvt->mc_symmetric ? |
372 | ((page >> 1) & 3) : /* chip select are bits 14 & 13 */ | 372 | ((page >> 1) & 3) : /* chip select are bits 14 & 13 */ |
373 | edac_mc_find_csrow_by_page(mci, page); | 373 | edac_mc_find_csrow_by_page(mci, page); |
374 | e752x_mc_printk(mci, KERN_WARNING, | 374 | e752x_mc_printk(mci, KERN_WARNING, |
375 | "CE page 0x%lx, row %d : Memory read retry\n", | 375 | "CE page 0x%lx, row %d : Memory read retry\n", |
376 | (long unsigned int) page, row); | 376 | (long unsigned int) page, row); |
377 | } | 377 | } |
378 | 378 | ||
379 | static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error, | 379 | static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error, |
380 | u32 retry_add, int *error_found, int handle_error) | 380 | u32 retry_add, int *error_found, int handle_error) |
381 | { | 381 | { |
382 | *error_found = 1; | 382 | *error_found = 1; |
383 | 383 | ||
384 | if (handle_error) | 384 | if (handle_error) |
385 | do_process_ded_retry(mci, error, retry_add); | 385 | do_process_ded_retry(mci, error, retry_add); |
386 | } | 386 | } |
387 | 387 | ||
388 | static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error, | 388 | static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error, |
389 | int *error_found, int handle_error) | 389 | int *error_found, int handle_error) |
390 | { | 390 | { |
391 | *error_found = 1; | 391 | *error_found = 1; |
392 | 392 | ||
393 | if (handle_error) | 393 | if (handle_error) |
394 | e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n"); | 394 | e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n"); |
395 | } | 395 | } |
396 | 396 | ||
397 | static char *global_message[11] = { | 397 | static char *global_message[11] = { |
398 | "PCI Express C1", "PCI Express C", "PCI Express B1", | 398 | "PCI Express C1", "PCI Express C", "PCI Express B1", |
399 | "PCI Express B", "PCI Express A1", "PCI Express A", | 399 | "PCI Express B", "PCI Express A1", "PCI Express A", |
400 | "DMA Controler", "HUB Interface", "System Bus", | 400 | "DMA Controler", "HUB Interface", "System Bus", |
401 | "DRAM Controler", "Internal Buffer" | 401 | "DRAM Controler", "Internal Buffer" |
402 | }; | 402 | }; |
403 | 403 | ||
404 | static char *fatal_message[2] = { "Non-Fatal ", "Fatal " }; | 404 | static char *fatal_message[2] = { "Non-Fatal ", "Fatal " }; |
405 | 405 | ||
406 | static void do_global_error(int fatal, u32 errors) | 406 | static void do_global_error(int fatal, u32 errors) |
407 | { | 407 | { |
408 | int i; | 408 | int i; |
409 | 409 | ||
410 | for (i = 0; i < 11; i++) { | 410 | for (i = 0; i < 11; i++) { |
411 | if (errors & (1 << i)) | 411 | if (errors & (1 << i)) |
412 | e752x_printk(KERN_WARNING, "%sError %s\n", | 412 | e752x_printk(KERN_WARNING, "%sError %s\n", |
413 | fatal_message[fatal], global_message[i]); | 413 | fatal_message[fatal], global_message[i]); |
414 | } | 414 | } |
415 | } | 415 | } |
416 | 416 | ||
417 | static inline void global_error(int fatal, u32 errors, int *error_found, | 417 | static inline void global_error(int fatal, u32 errors, int *error_found, |
418 | int handle_error) | 418 | int handle_error) |
419 | { | 419 | { |
420 | *error_found = 1; | 420 | *error_found = 1; |
421 | 421 | ||
422 | if (handle_error) | 422 | if (handle_error) |
423 | do_global_error(fatal, errors); | 423 | do_global_error(fatal, errors); |
424 | } | 424 | } |
425 | 425 | ||
426 | static char *hub_message[7] = { | 426 | static char *hub_message[7] = { |
427 | "HI Address or Command Parity", "HI Illegal Access", | 427 | "HI Address or Command Parity", "HI Illegal Access", |
428 | "HI Internal Parity", "Out of Range Access", | 428 | "HI Internal Parity", "Out of Range Access", |
429 | "HI Data Parity", "Enhanced Config Access", | 429 | "HI Data Parity", "Enhanced Config Access", |
430 | "Hub Interface Target Abort" | 430 | "Hub Interface Target Abort" |
431 | }; | 431 | }; |
432 | 432 | ||
433 | static void do_hub_error(int fatal, u8 errors) | 433 | static void do_hub_error(int fatal, u8 errors) |
434 | { | 434 | { |
435 | int i; | 435 | int i; |
436 | 436 | ||
437 | for (i = 0; i < 7; i++) { | 437 | for (i = 0; i < 7; i++) { |
438 | if (errors & (1 << i)) | 438 | if (errors & (1 << i)) |
439 | e752x_printk(KERN_WARNING, "%sError %s\n", | 439 | e752x_printk(KERN_WARNING, "%sError %s\n", |
440 | fatal_message[fatal], hub_message[i]); | 440 | fatal_message[fatal], hub_message[i]); |
441 | } | 441 | } |
442 | } | 442 | } |
443 | 443 | ||
444 | static inline void hub_error(int fatal, u8 errors, int *error_found, | 444 | static inline void hub_error(int fatal, u8 errors, int *error_found, |
445 | int handle_error) | 445 | int handle_error) |
446 | { | 446 | { |
447 | *error_found = 1; | 447 | *error_found = 1; |
448 | 448 | ||
449 | if (handle_error) | 449 | if (handle_error) |
450 | do_hub_error(fatal, errors); | 450 | do_hub_error(fatal, errors); |
451 | } | 451 | } |
452 | 452 | ||
453 | static char *membuf_message[4] = { | 453 | static char *membuf_message[4] = { |
454 | "Internal PMWB to DRAM parity", | 454 | "Internal PMWB to DRAM parity", |
455 | "Internal PMWB to System Bus Parity", | 455 | "Internal PMWB to System Bus Parity", |
456 | "Internal System Bus or IO to PMWB Parity", | 456 | "Internal System Bus or IO to PMWB Parity", |
457 | "Internal DRAM to PMWB Parity" | 457 | "Internal DRAM to PMWB Parity" |
458 | }; | 458 | }; |
459 | 459 | ||
460 | static void do_membuf_error(u8 errors) | 460 | static void do_membuf_error(u8 errors) |
461 | { | 461 | { |
462 | int i; | 462 | int i; |
463 | 463 | ||
464 | for (i = 0; i < 4; i++) { | 464 | for (i = 0; i < 4; i++) { |
465 | if (errors & (1 << i)) | 465 | if (errors & (1 << i)) |
466 | e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n", | 466 | e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n", |
467 | membuf_message[i]); | 467 | membuf_message[i]); |
468 | } | 468 | } |
469 | } | 469 | } |
470 | 470 | ||
471 | static inline void membuf_error(u8 errors, int *error_found, int handle_error) | 471 | static inline void membuf_error(u8 errors, int *error_found, int handle_error) |
472 | { | 472 | { |
473 | *error_found = 1; | 473 | *error_found = 1; |
474 | 474 | ||
475 | if (handle_error) | 475 | if (handle_error) |
476 | do_membuf_error(errors); | 476 | do_membuf_error(errors); |
477 | } | 477 | } |
478 | 478 | ||
479 | static char *sysbus_message[10] = { | 479 | static char *sysbus_message[10] = { |
480 | "Addr or Request Parity", | 480 | "Addr or Request Parity", |
481 | "Data Strobe Glitch", | 481 | "Data Strobe Glitch", |
482 | "Addr Strobe Glitch", | 482 | "Addr Strobe Glitch", |
483 | "Data Parity", | 483 | "Data Parity", |
484 | "Addr Above TOM", | 484 | "Addr Above TOM", |
485 | "Non DRAM Lock Error", | 485 | "Non DRAM Lock Error", |
486 | "MCERR", "BINIT", | 486 | "MCERR", "BINIT", |
487 | "Memory Parity", | 487 | "Memory Parity", |
488 | "IO Subsystem Parity" | 488 | "IO Subsystem Parity" |
489 | }; | 489 | }; |
490 | 490 | ||
491 | static void do_sysbus_error(int fatal, u32 errors) | 491 | static void do_sysbus_error(int fatal, u32 errors) |
492 | { | 492 | { |
493 | int i; | 493 | int i; |
494 | 494 | ||
495 | for (i = 0; i < 10; i++) { | 495 | for (i = 0; i < 10; i++) { |
496 | if (errors & (1 << i)) | 496 | if (errors & (1 << i)) |
497 | e752x_printk(KERN_WARNING, "%sError System Bus %s\n", | 497 | e752x_printk(KERN_WARNING, "%sError System Bus %s\n", |
498 | fatal_message[fatal], sysbus_message[i]); | 498 | fatal_message[fatal], sysbus_message[i]); |
499 | } | 499 | } |
500 | } | 500 | } |
501 | 501 | ||
502 | static inline void sysbus_error(int fatal, u32 errors, int *error_found, | 502 | static inline void sysbus_error(int fatal, u32 errors, int *error_found, |
503 | int handle_error) | 503 | int handle_error) |
504 | { | 504 | { |
505 | *error_found = 1; | 505 | *error_found = 1; |
506 | 506 | ||
507 | if (handle_error) | 507 | if (handle_error) |
508 | do_sysbus_error(fatal, errors); | 508 | do_sysbus_error(fatal, errors); |
509 | } | 509 | } |
510 | 510 | ||
511 | static void e752x_check_hub_interface(struct e752x_error_info *info, | 511 | static void e752x_check_hub_interface(struct e752x_error_info *info, |
512 | int *error_found, int handle_error) | 512 | int *error_found, int handle_error) |
513 | { | 513 | { |
514 | u8 stat8; | 514 | u8 stat8; |
515 | 515 | ||
516 | //pci_read_config_byte(dev,E752X_HI_FERR,&stat8); | 516 | //pci_read_config_byte(dev,E752X_HI_FERR,&stat8); |
517 | 517 | ||
518 | stat8 = info->hi_ferr; | 518 | stat8 = info->hi_ferr; |
519 | 519 | ||
520 | if(stat8 & 0x7f) { /* Error, so process */ | 520 | if(stat8 & 0x7f) { /* Error, so process */ |
521 | stat8 &= 0x7f; | 521 | stat8 &= 0x7f; |
522 | 522 | ||
523 | if(stat8 & 0x2b) | 523 | if(stat8 & 0x2b) |
524 | hub_error(1, stat8 & 0x2b, error_found, handle_error); | 524 | hub_error(1, stat8 & 0x2b, error_found, handle_error); |
525 | 525 | ||
526 | if(stat8 & 0x54) | 526 | if(stat8 & 0x54) |
527 | hub_error(0, stat8 & 0x54, error_found, handle_error); | 527 | hub_error(0, stat8 & 0x54, error_found, handle_error); |
528 | } | 528 | } |
529 | 529 | ||
530 | //pci_read_config_byte(dev,E752X_HI_NERR,&stat8); | 530 | //pci_read_config_byte(dev,E752X_HI_NERR,&stat8); |
531 | 531 | ||
532 | stat8 = info->hi_nerr; | 532 | stat8 = info->hi_nerr; |
533 | 533 | ||
534 | if(stat8 & 0x7f) { /* Error, so process */ | 534 | if(stat8 & 0x7f) { /* Error, so process */ |
535 | stat8 &= 0x7f; | 535 | stat8 &= 0x7f; |
536 | 536 | ||
537 | if (stat8 & 0x2b) | 537 | if (stat8 & 0x2b) |
538 | hub_error(1, stat8 & 0x2b, error_found, handle_error); | 538 | hub_error(1, stat8 & 0x2b, error_found, handle_error); |
539 | 539 | ||
540 | if(stat8 & 0x54) | 540 | if(stat8 & 0x54) |
541 | hub_error(0, stat8 & 0x54, error_found, handle_error); | 541 | hub_error(0, stat8 & 0x54, error_found, handle_error); |
542 | } | 542 | } |
543 | } | 543 | } |
544 | 544 | ||
545 | static void e752x_check_sysbus(struct e752x_error_info *info, | 545 | static void e752x_check_sysbus(struct e752x_error_info *info, |
546 | int *error_found, int handle_error) | 546 | int *error_found, int handle_error) |
547 | { | 547 | { |
548 | u32 stat32, error32; | 548 | u32 stat32, error32; |
549 | 549 | ||
550 | //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32); | 550 | //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32); |
551 | stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16); | 551 | stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16); |
552 | 552 | ||
553 | if (stat32 == 0) | 553 | if (stat32 == 0) |
554 | return; /* no errors */ | 554 | return; /* no errors */ |
555 | 555 | ||
556 | error32 = (stat32 >> 16) & 0x3ff; | 556 | error32 = (stat32 >> 16) & 0x3ff; |
557 | stat32 = stat32 & 0x3ff; | 557 | stat32 = stat32 & 0x3ff; |
558 | 558 | ||
559 | if(stat32 & 0x083) | 559 | if(stat32 & 0x083) |
560 | sysbus_error(1, stat32 & 0x083, error_found, handle_error); | 560 | sysbus_error(1, stat32 & 0x083, error_found, handle_error); |
561 | 561 | ||
562 | if(stat32 & 0x37c) | 562 | if(stat32 & 0x37c) |
563 | sysbus_error(0, stat32 & 0x37c, error_found, handle_error); | 563 | sysbus_error(0, stat32 & 0x37c, error_found, handle_error); |
564 | 564 | ||
565 | if(error32 & 0x083) | 565 | if(error32 & 0x083) |
566 | sysbus_error(1, error32 & 0x083, error_found, handle_error); | 566 | sysbus_error(1, error32 & 0x083, error_found, handle_error); |
567 | 567 | ||
568 | if(error32 & 0x37c) | 568 | if(error32 & 0x37c) |
569 | sysbus_error(0, error32 & 0x37c, error_found, handle_error); | 569 | sysbus_error(0, error32 & 0x37c, error_found, handle_error); |
570 | } | 570 | } |
571 | 571 | ||
572 | static void e752x_check_membuf (struct e752x_error_info *info, | 572 | static void e752x_check_membuf (struct e752x_error_info *info, |
573 | int *error_found, int handle_error) | 573 | int *error_found, int handle_error) |
574 | { | 574 | { |
575 | u8 stat8; | 575 | u8 stat8; |
576 | 576 | ||
577 | stat8 = info->buf_ferr; | 577 | stat8 = info->buf_ferr; |
578 | 578 | ||
579 | if (stat8 & 0x0f) { /* Error, so process */ | 579 | if (stat8 & 0x0f) { /* Error, so process */ |
580 | stat8 &= 0x0f; | 580 | stat8 &= 0x0f; |
581 | membuf_error(stat8, error_found, handle_error); | 581 | membuf_error(stat8, error_found, handle_error); |
582 | } | 582 | } |
583 | 583 | ||
584 | stat8 = info->buf_nerr; | 584 | stat8 = info->buf_nerr; |
585 | 585 | ||
586 | if (stat8 & 0x0f) { /* Error, so process */ | 586 | if (stat8 & 0x0f) { /* Error, so process */ |
587 | stat8 &= 0x0f; | 587 | stat8 &= 0x0f; |
588 | membuf_error(stat8, error_found, handle_error); | 588 | membuf_error(stat8, error_found, handle_error); |
589 | } | 589 | } |
590 | } | 590 | } |
591 | 591 | ||
592 | static void e752x_check_dram (struct mem_ctl_info *mci, | 592 | static void e752x_check_dram (struct mem_ctl_info *mci, |
593 | struct e752x_error_info *info, int *error_found, | 593 | struct e752x_error_info *info, int *error_found, |
594 | int handle_error) | 594 | int handle_error) |
595 | { | 595 | { |
596 | u16 error_one, error_next; | 596 | u16 error_one, error_next; |
597 | 597 | ||
598 | error_one = info->dram_ferr; | 598 | error_one = info->dram_ferr; |
599 | error_next = info->dram_nerr; | 599 | error_next = info->dram_nerr; |
600 | 600 | ||
601 | /* decode and report errors */ | 601 | /* decode and report errors */ |
602 | if(error_one & 0x0101) /* check first error correctable */ | 602 | if(error_one & 0x0101) /* check first error correctable */ |
603 | process_ce(mci, error_one, info->dram_sec1_add, | 603 | process_ce(mci, error_one, info->dram_sec1_add, |
604 | info->dram_sec1_syndrome, error_found, | 604 | info->dram_sec1_syndrome, error_found, |
605 | handle_error); | 605 | handle_error); |
606 | 606 | ||
607 | if(error_next & 0x0101) /* check next error correctable */ | 607 | if(error_next & 0x0101) /* check next error correctable */ |
608 | process_ce(mci, error_next, info->dram_sec2_add, | 608 | process_ce(mci, error_next, info->dram_sec2_add, |
609 | info->dram_sec2_syndrome, error_found, | 609 | info->dram_sec2_syndrome, error_found, |
610 | handle_error); | 610 | handle_error); |
611 | 611 | ||
612 | if(error_one & 0x4040) | 612 | if(error_one & 0x4040) |
613 | process_ue_no_info_wr(mci, error_found, handle_error); | 613 | process_ue_no_info_wr(mci, error_found, handle_error); |
614 | 614 | ||
615 | if(error_next & 0x4040) | 615 | if(error_next & 0x4040) |
616 | process_ue_no_info_wr(mci, error_found, handle_error); | 616 | process_ue_no_info_wr(mci, error_found, handle_error); |
617 | 617 | ||
618 | if(error_one & 0x2020) | 618 | if(error_one & 0x2020) |
619 | process_ded_retry(mci, error_one, info->dram_retr_add, | 619 | process_ded_retry(mci, error_one, info->dram_retr_add, |
620 | error_found, handle_error); | 620 | error_found, handle_error); |
621 | 621 | ||
622 | if(error_next & 0x2020) | 622 | if(error_next & 0x2020) |
623 | process_ded_retry(mci, error_next, info->dram_retr_add, | 623 | process_ded_retry(mci, error_next, info->dram_retr_add, |
624 | error_found, handle_error); | 624 | error_found, handle_error); |
625 | 625 | ||
626 | if(error_one & 0x0808) | 626 | if(error_one & 0x0808) |
627 | process_threshold_ce(mci, error_one, error_found, | 627 | process_threshold_ce(mci, error_one, error_found, |
628 | handle_error); | 628 | handle_error); |
629 | 629 | ||
630 | if(error_next & 0x0808) | 630 | if(error_next & 0x0808) |
631 | process_threshold_ce(mci, error_next, error_found, | 631 | process_threshold_ce(mci, error_next, error_found, |
632 | handle_error); | 632 | handle_error); |
633 | 633 | ||
634 | if(error_one & 0x0606) | 634 | if(error_one & 0x0606) |
635 | process_ue(mci, error_one, info->dram_ded_add, | 635 | process_ue(mci, error_one, info->dram_ded_add, |
636 | info->dram_scrb_add, error_found, handle_error); | 636 | info->dram_scrb_add, error_found, handle_error); |
637 | 637 | ||
638 | if(error_next & 0x0606) | 638 | if(error_next & 0x0606) |
639 | process_ue(mci, error_next, info->dram_ded_add, | 639 | process_ue(mci, error_next, info->dram_ded_add, |
640 | info->dram_scrb_add, error_found, handle_error); | 640 | info->dram_scrb_add, error_found, handle_error); |
641 | } | 641 | } |
642 | 642 | ||
643 | static void e752x_get_error_info (struct mem_ctl_info *mci, | 643 | static void e752x_get_error_info (struct mem_ctl_info *mci, |
644 | struct e752x_error_info *info) | 644 | struct e752x_error_info *info) |
645 | { | 645 | { |
646 | struct pci_dev *dev; | 646 | struct pci_dev *dev; |
647 | struct e752x_pvt *pvt; | 647 | struct e752x_pvt *pvt; |
648 | 648 | ||
649 | memset(info, 0, sizeof(*info)); | 649 | memset(info, 0, sizeof(*info)); |
650 | pvt = (struct e752x_pvt *) mci->pvt_info; | 650 | pvt = (struct e752x_pvt *) mci->pvt_info; |
651 | dev = pvt->dev_d0f1; | 651 | dev = pvt->dev_d0f1; |
652 | pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global); | 652 | pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global); |
653 | 653 | ||
654 | if (info->ferr_global) { | 654 | if (info->ferr_global) { |
655 | pci_read_config_byte(dev, E752X_HI_FERR, &info->hi_ferr); | 655 | pci_read_config_byte(dev, E752X_HI_FERR, &info->hi_ferr); |
656 | pci_read_config_word(dev, E752X_SYSBUS_FERR, | 656 | pci_read_config_word(dev, E752X_SYSBUS_FERR, |
657 | &info->sysbus_ferr); | 657 | &info->sysbus_ferr); |
658 | pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr); | 658 | pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr); |
659 | pci_read_config_word(dev, E752X_DRAM_FERR, | 659 | pci_read_config_word(dev, E752X_DRAM_FERR, |
660 | &info->dram_ferr); | 660 | &info->dram_ferr); |
661 | pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD, | 661 | pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD, |
662 | &info->dram_sec1_add); | 662 | &info->dram_sec1_add); |
663 | pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME, | 663 | pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME, |
664 | &info->dram_sec1_syndrome); | 664 | &info->dram_sec1_syndrome); |
665 | pci_read_config_dword(dev, E752X_DRAM_DED_ADD, | 665 | pci_read_config_dword(dev, E752X_DRAM_DED_ADD, |
666 | &info->dram_ded_add); | 666 | &info->dram_ded_add); |
667 | pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD, | 667 | pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD, |
668 | &info->dram_scrb_add); | 668 | &info->dram_scrb_add); |
669 | pci_read_config_dword(dev, E752X_DRAM_RETR_ADD, | 669 | pci_read_config_dword(dev, E752X_DRAM_RETR_ADD, |
670 | &info->dram_retr_add); | 670 | &info->dram_retr_add); |
671 | 671 | ||
672 | if (info->hi_ferr & 0x7f) | 672 | if (info->hi_ferr & 0x7f) |
673 | pci_write_config_byte(dev, E752X_HI_FERR, | 673 | pci_write_config_byte(dev, E752X_HI_FERR, |
674 | info->hi_ferr); | 674 | info->hi_ferr); |
675 | 675 | ||
676 | if (info->sysbus_ferr) | 676 | if (info->sysbus_ferr) |
677 | pci_write_config_word(dev, E752X_SYSBUS_FERR, | 677 | pci_write_config_word(dev, E752X_SYSBUS_FERR, |
678 | info->sysbus_ferr); | 678 | info->sysbus_ferr); |
679 | 679 | ||
680 | if (info->buf_ferr & 0x0f) | 680 | if (info->buf_ferr & 0x0f) |
681 | pci_write_config_byte(dev, E752X_BUF_FERR, | 681 | pci_write_config_byte(dev, E752X_BUF_FERR, |
682 | info->buf_ferr); | 682 | info->buf_ferr); |
683 | 683 | ||
684 | if (info->dram_ferr) | 684 | if (info->dram_ferr) |
685 | pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR, | 685 | pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR, |
686 | info->dram_ferr, info->dram_ferr); | 686 | info->dram_ferr, info->dram_ferr); |
687 | 687 | ||
688 | pci_write_config_dword(dev, E752X_FERR_GLOBAL, | 688 | pci_write_config_dword(dev, E752X_FERR_GLOBAL, |
689 | info->ferr_global); | 689 | info->ferr_global); |
690 | } | 690 | } |
691 | 691 | ||
692 | pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global); | 692 | pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global); |
693 | 693 | ||
694 | if (info->nerr_global) { | 694 | if (info->nerr_global) { |
695 | pci_read_config_byte(dev, E752X_HI_NERR, &info->hi_nerr); | 695 | pci_read_config_byte(dev, E752X_HI_NERR, &info->hi_nerr); |
696 | pci_read_config_word(dev, E752X_SYSBUS_NERR, | 696 | pci_read_config_word(dev, E752X_SYSBUS_NERR, |
697 | &info->sysbus_nerr); | 697 | &info->sysbus_nerr); |
698 | pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr); | 698 | pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr); |
699 | pci_read_config_word(dev, E752X_DRAM_NERR, | 699 | pci_read_config_word(dev, E752X_DRAM_NERR, |
700 | &info->dram_nerr); | 700 | &info->dram_nerr); |
701 | pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD, | 701 | pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD, |
702 | &info->dram_sec2_add); | 702 | &info->dram_sec2_add); |
703 | pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME, | 703 | pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME, |
704 | &info->dram_sec2_syndrome); | 704 | &info->dram_sec2_syndrome); |
705 | 705 | ||
706 | if (info->hi_nerr & 0x7f) | 706 | if (info->hi_nerr & 0x7f) |
707 | pci_write_config_byte(dev, E752X_HI_NERR, | 707 | pci_write_config_byte(dev, E752X_HI_NERR, |
708 | info->hi_nerr); | 708 | info->hi_nerr); |
709 | 709 | ||
710 | if (info->sysbus_nerr) | 710 | if (info->sysbus_nerr) |
711 | pci_write_config_word(dev, E752X_SYSBUS_NERR, | 711 | pci_write_config_word(dev, E752X_SYSBUS_NERR, |
712 | info->sysbus_nerr); | 712 | info->sysbus_nerr); |
713 | 713 | ||
714 | if (info->buf_nerr & 0x0f) | 714 | if (info->buf_nerr & 0x0f) |
715 | pci_write_config_byte(dev, E752X_BUF_NERR, | 715 | pci_write_config_byte(dev, E752X_BUF_NERR, |
716 | info->buf_nerr); | 716 | info->buf_nerr); |
717 | 717 | ||
718 | if (info->dram_nerr) | 718 | if (info->dram_nerr) |
719 | pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR, | 719 | pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR, |
720 | info->dram_nerr, info->dram_nerr); | 720 | info->dram_nerr, info->dram_nerr); |
721 | 721 | ||
722 | pci_write_config_dword(dev, E752X_NERR_GLOBAL, | 722 | pci_write_config_dword(dev, E752X_NERR_GLOBAL, |
723 | info->nerr_global); | 723 | info->nerr_global); |
724 | } | 724 | } |
725 | } | 725 | } |
726 | 726 | ||
727 | static int e752x_process_error_info (struct mem_ctl_info *mci, | 727 | static int e752x_process_error_info (struct mem_ctl_info *mci, |
728 | struct e752x_error_info *info, int handle_errors) | 728 | struct e752x_error_info *info, int handle_errors) |
729 | { | 729 | { |
730 | u32 error32, stat32; | 730 | u32 error32, stat32; |
731 | int error_found; | 731 | int error_found; |
732 | 732 | ||
733 | error_found = 0; | 733 | error_found = 0; |
734 | error32 = (info->ferr_global >> 18) & 0x3ff; | 734 | error32 = (info->ferr_global >> 18) & 0x3ff; |
735 | stat32 = (info->ferr_global >> 4) & 0x7ff; | 735 | stat32 = (info->ferr_global >> 4) & 0x7ff; |
736 | 736 | ||
737 | if (error32) | 737 | if (error32) |
738 | global_error(1, error32, &error_found, handle_errors); | 738 | global_error(1, error32, &error_found, handle_errors); |
739 | 739 | ||
740 | if (stat32) | 740 | if (stat32) |
741 | global_error(0, stat32, &error_found, handle_errors); | 741 | global_error(0, stat32, &error_found, handle_errors); |
742 | 742 | ||
743 | error32 = (info->nerr_global >> 18) & 0x3ff; | 743 | error32 = (info->nerr_global >> 18) & 0x3ff; |
744 | stat32 = (info->nerr_global >> 4) & 0x7ff; | 744 | stat32 = (info->nerr_global >> 4) & 0x7ff; |
745 | 745 | ||
746 | if (error32) | 746 | if (error32) |
747 | global_error(1, error32, &error_found, handle_errors); | 747 | global_error(1, error32, &error_found, handle_errors); |
748 | 748 | ||
749 | if (stat32) | 749 | if (stat32) |
750 | global_error(0, stat32, &error_found, handle_errors); | 750 | global_error(0, stat32, &error_found, handle_errors); |
751 | 751 | ||
752 | e752x_check_hub_interface(info, &error_found, handle_errors); | 752 | e752x_check_hub_interface(info, &error_found, handle_errors); |
753 | e752x_check_sysbus(info, &error_found, handle_errors); | 753 | e752x_check_sysbus(info, &error_found, handle_errors); |
754 | e752x_check_membuf(info, &error_found, handle_errors); | 754 | e752x_check_membuf(info, &error_found, handle_errors); |
755 | e752x_check_dram(mci, info, &error_found, handle_errors); | 755 | e752x_check_dram(mci, info, &error_found, handle_errors); |
756 | return error_found; | 756 | return error_found; |
757 | } | 757 | } |
758 | 758 | ||
759 | static void e752x_check(struct mem_ctl_info *mci) | 759 | static void e752x_check(struct mem_ctl_info *mci) |
760 | { | 760 | { |
761 | struct e752x_error_info info; | 761 | struct e752x_error_info info; |
762 | 762 | ||
763 | debugf3("%s()\n", __func__); | 763 | debugf3("%s()\n", __func__); |
764 | e752x_get_error_info(mci, &info); | 764 | e752x_get_error_info(mci, &info); |
765 | e752x_process_error_info(mci, &info, 1); | 765 | e752x_process_error_info(mci, &info, 1); |
766 | } | 766 | } |
767 | 767 | ||
768 | static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | 768 | /* Return 1 if dual channel mode is active. Else return 0. */ |
769 | static inline int dual_channel_active(u16 ddrcsr) | ||
769 | { | 770 | { |
770 | int rc = -ENODEV; | 771 | return (((ddrcsr >> 12) & 3) == 3); |
772 | } | ||
773 | |||
774 | static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | ||
775 | u16 ddrcsr) | ||
776 | { | ||
777 | struct csrow_info *csrow; | ||
778 | unsigned long last_cumul_size; | ||
779 | int index, mem_dev, drc_chan; | ||
780 | int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ | ||
781 | int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */ | ||
782 | u8 value; | ||
783 | u32 dra, drc, cumul_size; | ||
784 | |||
785 | pci_read_config_dword(pdev, E752X_DRA, &dra); | ||
786 | pci_read_config_dword(pdev, E752X_DRC, &drc); | ||
787 | drc_chan = dual_channel_active(ddrcsr); | ||
788 | drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */ | ||
789 | drc_ddim = (drc >> 20) & 0x3; | ||
790 | |||
791 | /* The dram row boundary (DRB) reg values are boundary address for | ||
792 | * each DRAM row with a granularity of 64 or 128MB (single/dual | ||
793 | * channel operation). DRB regs are cumulative; therefore DRB7 will | ||
794 | * contain the total memory contained in all eight rows. | ||
795 | */ | ||
796 | for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { | ||
797 | /* mem_dev 0=x8, 1=x4 */ | ||
798 | mem_dev = (dra >> (index * 4 + 2)) & 0x3; | ||
799 | csrow = &mci->csrows[index]; | ||
800 | |||
801 | mem_dev = (mem_dev == 2); | ||
802 | pci_read_config_byte(pdev, E752X_DRB + index, &value); | ||
803 | /* convert a 128 or 64 MiB DRB to a page size. */ | ||
804 | cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); | ||
805 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, | ||
806 | cumul_size); | ||
807 | if (cumul_size == last_cumul_size) | ||
808 | continue; /* not populated */ | ||
809 | |||
810 | csrow->first_page = last_cumul_size; | ||
811 | csrow->last_page = cumul_size - 1; | ||
812 | csrow->nr_pages = cumul_size - last_cumul_size; | ||
813 | last_cumul_size = cumul_size; | ||
814 | csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ | ||
815 | csrow->mtype = MEM_RDDR; /* only one type supported */ | ||
816 | csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; | ||
817 | |||
818 | /* | ||
819 | * if single channel or x8 devices then SECDED | ||
820 | * if dual channel and x4 then S4ECD4ED | ||
821 | */ | ||
822 | if (drc_ddim) { | ||
823 | if (drc_chan && mem_dev) { | ||
824 | csrow->edac_mode = EDAC_S4ECD4ED; | ||
825 | mci->edac_cap |= EDAC_FLAG_S4ECD4ED; | ||
826 | } else { | ||
827 | csrow->edac_mode = EDAC_SECDED; | ||
828 | mci->edac_cap |= EDAC_FLAG_SECDED; | ||
829 | } | ||
830 | } else | ||
831 | csrow->edac_mode = EDAC_NONE; | ||
832 | } | ||
833 | } | ||
834 | |||
835 | static void e752x_init_mem_map_table(struct pci_dev *pdev, | ||
836 | struct e752x_pvt *pvt) | ||
837 | { | ||
771 | int index; | 838 | int index; |
839 | u8 value, last, row, stat8; | ||
840 | |||
841 | last = 0; | ||
842 | row = 0; | ||
843 | |||
844 | for (index = 0; index < 8; index += 2) { | ||
845 | pci_read_config_byte(pdev, E752X_DRB + index, &value); | ||
846 | /* test if there is a dimm in this slot */ | ||
847 | if (value == last) { | ||
848 | /* no dimm in the slot, so flag it as empty */ | ||
849 | pvt->map[index] = 0xff; | ||
850 | pvt->map[index + 1] = 0xff; | ||
851 | } else { /* there is a dimm in the slot */ | ||
852 | pvt->map[index] = row; | ||
853 | row++; | ||
854 | last = value; | ||
855 | /* test the next value to see if the dimm is double | ||
856 | * sided | ||
857 | */ | ||
858 | pci_read_config_byte(pdev, E752X_DRB + index + 1, | ||
859 | &value); | ||
860 | pvt->map[index + 1] = (value == last) ? | ||
861 | 0xff : /* the dimm is single sided, | ||
862 | so flag as empty */ | ||
863 | row; /* this is a double sided dimm | ||
864 | to save the next row # */ | ||
865 | row++; | ||
866 | last = value; | ||
867 | } | ||
868 | } | ||
869 | |||
870 | /* set the map type. 1 = normal, 0 = reversed */ | ||
871 | pci_read_config_byte(pdev, E752X_DRM, &stat8); | ||
872 | pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f)); | ||
873 | } | ||
874 | |||
875 | /* Return 0 on success or 1 on failure. */ | ||
876 | static int e752x_get_devs(struct pci_dev *pdev, int dev_idx, | ||
877 | struct e752x_pvt *pvt) | ||
878 | { | ||
879 | struct pci_dev *dev; | ||
880 | |||
881 | pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
882 | pvt->dev_info->err_dev, | ||
883 | pvt->bridge_ck); | ||
884 | |||
885 | if (pvt->bridge_ck == NULL) | ||
886 | pvt->bridge_ck = pci_scan_single_device(pdev->bus, | ||
887 | PCI_DEVFN(0, 1)); | ||
888 | |||
889 | if (pvt->bridge_ck == NULL) { | ||
890 | e752x_printk(KERN_ERR, "error reporting device not found:" | ||
891 | "vendor %x device 0x%x (broken BIOS?)\n", | ||
892 | PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev); | ||
893 | return 1; | ||
894 | } | ||
895 | |||
896 | dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev, | ||
897 | NULL); | ||
898 | |||
899 | if (dev == NULL) | ||
900 | goto fail; | ||
901 | |||
902 | pvt->dev_d0f0 = dev; | ||
903 | pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck); | ||
904 | |||
905 | return 0; | ||
906 | |||
907 | fail: | ||
908 | pci_dev_put(pvt->bridge_ck); | ||
909 | return 1; | ||
910 | } | ||
911 | |||
912 | static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt) | ||
913 | { | ||
914 | struct pci_dev *dev; | ||
915 | |||
916 | dev = pvt->dev_d0f1; | ||
917 | /* Turn off error disable & SMI in case the BIOS turned it on */ | ||
918 | pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00); | ||
919 | pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00); | ||
920 | pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00); | ||
921 | pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00); | ||
922 | pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00); | ||
923 | pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00); | ||
924 | pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00); | ||
925 | pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00); | ||
926 | } | ||
927 | |||
928 | static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | ||
929 | { | ||
772 | u16 pci_data; | 930 | u16 pci_data; |
773 | u8 stat8; | 931 | u8 stat8; |
774 | struct mem_ctl_info *mci = NULL; | 932 | struct mem_ctl_info *mci; |
775 | struct e752x_pvt *pvt = NULL; | 933 | struct e752x_pvt *pvt; |
776 | u16 ddrcsr; | 934 | u16 ddrcsr; |
777 | u32 drc; | ||
778 | int drc_chan; /* Number of channels 0=1chan,1=2chan */ | 935 | int drc_chan; /* Number of channels 0=1chan,1=2chan */ |
779 | int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ | ||
780 | int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | ||
781 | u32 dra; | ||
782 | unsigned long last_cumul_size; | ||
783 | struct pci_dev *dev = NULL; | ||
784 | struct e752x_error_info discard; | 936 | struct e752x_error_info discard; |
785 | 937 | ||
786 | debugf0("%s(): mci\n", __func__); | 938 | debugf0("%s(): mci\n", __func__); |
787 | debugf0("Starting Probe1\n"); | 939 | debugf0("Starting Probe1\n"); |
788 | 940 | ||
789 | /* check to see if device 0 function 1 is enabled; if it isn't, we | 941 | /* check to see if device 0 function 1 is enabled; if it isn't, we |
790 | * assume the BIOS has reserved it for a reason and is expecting | 942 | * assume the BIOS has reserved it for a reason and is expecting |
791 | * exclusive access, we take care not to violate that assumption and | 943 | * exclusive access, we take care not to violate that assumption and |
792 | * fail the probe. */ | 944 | * fail the probe. */ |
793 | pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8); | 945 | pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8); |
794 | if (!force_function_unhide && !(stat8 & (1 << 5))) { | 946 | if (!force_function_unhide && !(stat8 & (1 << 5))) { |
795 | printk(KERN_INFO "Contact your BIOS vendor to see if the " | 947 | printk(KERN_INFO "Contact your BIOS vendor to see if the " |
796 | "E752x error registers can be safely un-hidden\n"); | 948 | "E752x error registers can be safely un-hidden\n"); |
797 | goto fail; | 949 | return -ENOMEM; |
798 | } | 950 | } |
799 | stat8 |= (1 << 5); | 951 | stat8 |= (1 << 5); |
800 | pci_write_config_byte(pdev, E752X_DEVPRES1, stat8); | 952 | pci_write_config_byte(pdev, E752X_DEVPRES1, stat8); |
801 | 953 | ||
802 | /* need to find out the number of channels */ | ||
803 | pci_read_config_dword(pdev, E752X_DRC, &drc); | ||
804 | pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr); | 954 | pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr); |
805 | /* FIXME: should check >>12 or 0xf, true for all? */ | 955 | /* FIXME: should check >>12 or 0xf, true for all? */ |
806 | /* Dual channel = 1, Single channel = 0 */ | 956 | /* Dual channel = 1, Single channel = 0 */ |
807 | drc_chan = (((ddrcsr >> 12) & 3) == 3); | 957 | drc_chan = dual_channel_active(ddrcsr); |
808 | drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */ | ||
809 | drc_ddim = (drc >> 20) & 0x3; | ||
810 | 958 | ||
811 | mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1); | 959 | mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1); |
812 | 960 | ||
813 | if (mci == NULL) { | 961 | if (mci == NULL) { |
814 | rc = -ENOMEM; | 962 | return -ENOMEM; |
815 | goto fail; | ||
816 | } | 963 | } |
817 | 964 | ||
818 | debugf3("%s(): init mci\n", __func__); | 965 | debugf3("%s(): init mci\n", __func__); |
819 | mci->mtype_cap = MEM_FLAG_RDDR; | 966 | mci->mtype_cap = MEM_FLAG_RDDR; |
820 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | | 967 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | |
821 | EDAC_FLAG_S4ECD4ED; | 968 | EDAC_FLAG_S4ECD4ED; |
822 | /* FIXME - what if different memory types are in different csrows? */ | 969 | /* FIXME - what if different memory types are in different csrows? */ |
823 | mci->mod_name = EDAC_MOD_STR; | 970 | mci->mod_name = EDAC_MOD_STR; |
824 | mci->mod_ver = E752X_REVISION; | 971 | mci->mod_ver = E752X_REVISION; |
825 | mci->dev = &pdev->dev; | 972 | mci->dev = &pdev->dev; |
826 | 973 | ||
827 | debugf3("%s(): init pvt\n", __func__); | 974 | debugf3("%s(): init pvt\n", __func__); |
828 | pvt = (struct e752x_pvt *) mci->pvt_info; | 975 | pvt = (struct e752x_pvt *) mci->pvt_info; |
829 | pvt->dev_info = &e752x_devs[dev_idx]; | 976 | pvt->dev_info = &e752x_devs[dev_idx]; |
830 | pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, | 977 | pvt->mc_symmetric = ((ddrcsr & 0x10) != 0); |
831 | pvt->dev_info->err_dev, | ||
832 | pvt->bridge_ck); | ||
833 | 978 | ||
834 | if (pvt->bridge_ck == NULL) | 979 | if (e752x_get_devs(pdev, dev_idx, pvt)) { |
835 | pvt->bridge_ck = pci_scan_single_device(pdev->bus, | 980 | edac_mc_free(mci); |
836 | PCI_DEVFN(0, 1)); | 981 | return -ENODEV; |
837 | |||
838 | if (pvt->bridge_ck == NULL) { | ||
839 | e752x_printk(KERN_ERR, "error reporting device not found:" | ||
840 | "vendor %x device 0x%x (broken BIOS?)\n", | ||
841 | PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev); | ||
842 | goto fail; | ||
843 | } | 982 | } |
844 | 983 | ||
845 | pvt->mc_symmetric = ((ddrcsr & 0x10) != 0); | ||
846 | debugf3("%s(): more mci init\n", __func__); | 984 | debugf3("%s(): more mci init\n", __func__); |
847 | mci->ctl_name = pvt->dev_info->ctl_name; | 985 | mci->ctl_name = pvt->dev_info->ctl_name; |
848 | mci->edac_check = e752x_check; | 986 | mci->edac_check = e752x_check; |
849 | mci->ctl_page_to_phys = ctl_page_to_phys; | 987 | mci->ctl_page_to_phys = ctl_page_to_phys; |
850 | 988 | ||
851 | /* find out the device types */ | 989 | e752x_init_csrows(mci, pdev, ddrcsr); |
852 | pci_read_config_dword(pdev, E752X_DRA, &dra); | 990 | e752x_init_mem_map_table(pdev, pvt); |
853 | 991 | ||
854 | /* | ||
855 | * The dram row boundary (DRB) reg values are boundary address for | ||
856 | * each DRAM row with a granularity of 64 or 128MB (single/dual | ||
857 | * channel operation). DRB regs are cumulative; therefore DRB7 will | ||
858 | * contain the total memory contained in all eight rows. | ||
859 | */ | ||
860 | for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { | ||
861 | u8 value; | ||
862 | u32 cumul_size; | ||
863 | |||
864 | /* mem_dev 0=x8, 1=x4 */ | ||
865 | int mem_dev = (dra >> (index * 4 + 2)) & 0x3; | ||
866 | struct csrow_info *csrow = &mci->csrows[index]; | ||
867 | |||
868 | mem_dev = (mem_dev == 2); | ||
869 | pci_read_config_byte(pdev, E752X_DRB + index, &value); | ||
870 | /* convert a 128 or 64 MiB DRB to a page size. */ | ||
871 | cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); | ||
872 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, | ||
873 | cumul_size); | ||
874 | |||
875 | if (cumul_size == last_cumul_size) | ||
876 | continue; /* not populated */ | ||
877 | |||
878 | csrow->first_page = last_cumul_size; | ||
879 | csrow->last_page = cumul_size - 1; | ||
880 | csrow->nr_pages = cumul_size - last_cumul_size; | ||
881 | last_cumul_size = cumul_size; | ||
882 | csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ | ||
883 | csrow->mtype = MEM_RDDR; /* only one type supported */ | ||
884 | csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; | ||
885 | |||
886 | /* | ||
887 | * if single channel or x8 devices then SECDED | ||
888 | * if dual channel and x4 then S4ECD4ED | ||
889 | */ | ||
890 | if (drc_ddim) { | ||
891 | if (drc_chan && mem_dev) { | ||
892 | csrow->edac_mode = EDAC_S4ECD4ED; | ||
893 | mci->edac_cap |= EDAC_FLAG_S4ECD4ED; | ||
894 | } else { | ||
895 | csrow->edac_mode = EDAC_SECDED; | ||
896 | mci->edac_cap |= EDAC_FLAG_SECDED; | ||
897 | } | ||
898 | } else | ||
899 | csrow->edac_mode = EDAC_NONE; | ||
900 | } | ||
901 | |||
902 | /* Fill in the memory map table */ | ||
903 | { | ||
904 | u8 value; | ||
905 | u8 last = 0; | ||
906 | u8 row = 0; | ||
907 | |||
908 | for (index = 0; index < 8; index += 2) { | ||
909 | pci_read_config_byte(pdev, E752X_DRB + index, &value); | ||
910 | |||
911 | /* test if there is a dimm in this slot */ | ||
912 | if (value == last) { | ||
913 | /* no dimm in the slot, so flag it as empty */ | ||
914 | pvt->map[index] = 0xff; | ||
915 | pvt->map[index + 1] = 0xff; | ||
916 | } else { /* there is a dimm in the slot */ | ||
917 | pvt->map[index] = row; | ||
918 | row++; | ||
919 | last = value; | ||
920 | /* test the next value to see if the dimm is | ||
921 | double sided */ | ||
922 | pci_read_config_byte(pdev, | ||
923 | E752X_DRB + index + 1, | ||
924 | &value); | ||
925 | pvt->map[index + 1] = (value == last) ? | ||
926 | 0xff : /* the dimm is single sided, | ||
927 | * so flag as empty | ||
928 | */ | ||
929 | row; /* this is a double sided dimm | ||
930 | * to save the next row # | ||
931 | */ | ||
932 | row++; | ||
933 | last = value; | ||
934 | } | ||
935 | } | ||
936 | } | ||
937 | |||
938 | /* set the map type. 1 = normal, 0 = reversed */ | 992 | /* set the map type. 1 = normal, 0 = reversed */ |
939 | pci_read_config_byte(pdev, E752X_DRM, &stat8); | 993 | pci_read_config_byte(pdev, E752X_DRM, &stat8); |
940 | pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f)); | 994 | pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f)); |
941 | 995 | ||
942 | mci->edac_cap |= EDAC_FLAG_NONE; | 996 | mci->edac_cap |= EDAC_FLAG_NONE; |
943 | debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); | 997 | debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); |
944 | 998 | ||
945 | /* load the top of low memory, remap base, and remap limit vars */ | 999 | /* load the top of low memory, remap base, and remap limit vars */ |
946 | pci_read_config_word(pdev, E752X_TOLM, &pci_data); | 1000 | pci_read_config_word(pdev, E752X_TOLM, &pci_data); |
947 | pvt->tolm = ((u32) pci_data) << 4; | 1001 | pvt->tolm = ((u32) pci_data) << 4; |
948 | pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data); | 1002 | pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data); |
949 | pvt->remapbase = ((u32) pci_data) << 14; | 1003 | pvt->remapbase = ((u32) pci_data) << 14; |
950 | pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data); | 1004 | pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data); |
951 | pvt->remaplimit = ((u32) pci_data) << 14; | 1005 | pvt->remaplimit = ((u32) pci_data) << 14; |
952 | e752x_printk(KERN_INFO, | 1006 | e752x_printk(KERN_INFO, |
953 | "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, | 1007 | "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, |
954 | pvt->remapbase, pvt->remaplimit); | 1008 | pvt->remapbase, pvt->remaplimit); |
955 | 1009 | ||
956 | /* Here we assume that we will never see multiple instances of this | 1010 | /* Here we assume that we will never see multiple instances of this |
957 | * type of memory controller. The ID is therefore hardcoded to 0. | 1011 | * type of memory controller. The ID is therefore hardcoded to 0. |
958 | */ | 1012 | */ |
959 | if (edac_mc_add_mc(mci,0)) { | 1013 | if (edac_mc_add_mc(mci,0)) { |
drivers/edac/e7xxx_edac.c
1 | /* | 1 | /* |
2 | * Intel e7xxx Memory Controller kernel module | 2 | * Intel e7xxx Memory Controller kernel module |
3 | * (C) 2003 Linux Networx (http://lnxi.com) | 3 | * (C) 2003 Linux Networx (http://lnxi.com) |
4 | * This file may be distributed under the terms of the | 4 | * This file may be distributed under the terms of the |
5 | * GNU General Public License. | 5 | * GNU General Public License. |
6 | * | 6 | * |
7 | * See "enum e7xxx_chips" below for supported chipsets | 7 | * See "enum e7xxx_chips" below for supported chipsets |
8 | * | 8 | * |
9 | * Written by Thayne Harbaugh | 9 | * Written by Thayne Harbaugh |
10 | * Based on work by Dan Hollis <goemon at anime dot net> and others. | 10 | * Based on work by Dan Hollis <goemon at anime dot net> and others. |
11 | * http://www.anime.net/~goemon/linux-ecc/ | 11 | * http://www.anime.net/~goemon/linux-ecc/ |
12 | * | 12 | * |
13 | * Contributors: | 13 | * Contributors: |
14 | * Eric Biederman (Linux Networx) | 14 | * Eric Biederman (Linux Networx) |
15 | * Tom Zimmerman (Linux Networx) | 15 | * Tom Zimmerman (Linux Networx) |
16 | * Jim Garlick (Lawrence Livermore National Labs) | 16 | * Jim Garlick (Lawrence Livermore National Labs) |
17 | * Dave Peterson (Lawrence Livermore National Labs) | 17 | * Dave Peterson (Lawrence Livermore National Labs) |
18 | * That One Guy (Some other place) | 18 | * That One Guy (Some other place) |
19 | * Wang Zhenyu (intel.com) | 19 | * Wang Zhenyu (intel.com) |
20 | * | 20 | * |
21 | * $Id: edac_e7xxx.c,v 1.5.2.9 2005/10/05 00:43:44 dsp_llnl Exp $ | 21 | * $Id: edac_e7xxx.c,v 1.5.2.9 2005/10/05 00:43:44 dsp_llnl Exp $ |
22 | * | 22 | * |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/config.h> | 25 | #include <linux/config.h> |
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
29 | #include <linux/pci_ids.h> | 29 | #include <linux/pci_ids.h> |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | #include "edac_mc.h" | 31 | #include "edac_mc.h" |
32 | 32 | ||
33 | #define E7XXX_REVISION " Ver: 2.0.0 " __DATE__ | 33 | #define E7XXX_REVISION " Ver: 2.0.0 " __DATE__ |
34 | 34 | ||
35 | #define e7xxx_printk(level, fmt, arg...) \ | 35 | #define e7xxx_printk(level, fmt, arg...) \ |
36 | edac_printk(level, "e7xxx", fmt, ##arg) | 36 | edac_printk(level, "e7xxx", fmt, ##arg) |
37 | 37 | ||
38 | #define e7xxx_mc_printk(mci, level, fmt, arg...) \ | 38 | #define e7xxx_mc_printk(mci, level, fmt, arg...) \ |
39 | edac_mc_chipset_printk(mci, level, "e7xxx", fmt, ##arg) | 39 | edac_mc_chipset_printk(mci, level, "e7xxx", fmt, ##arg) |
40 | 40 | ||
41 | #ifndef PCI_DEVICE_ID_INTEL_7205_0 | 41 | #ifndef PCI_DEVICE_ID_INTEL_7205_0 |
42 | #define PCI_DEVICE_ID_INTEL_7205_0 0x255d | 42 | #define PCI_DEVICE_ID_INTEL_7205_0 0x255d |
43 | #endif /* PCI_DEVICE_ID_INTEL_7205_0 */ | 43 | #endif /* PCI_DEVICE_ID_INTEL_7205_0 */ |
44 | 44 | ||
45 | #ifndef PCI_DEVICE_ID_INTEL_7205_1_ERR | 45 | #ifndef PCI_DEVICE_ID_INTEL_7205_1_ERR |
46 | #define PCI_DEVICE_ID_INTEL_7205_1_ERR 0x2551 | 46 | #define PCI_DEVICE_ID_INTEL_7205_1_ERR 0x2551 |
47 | #endif /* PCI_DEVICE_ID_INTEL_7205_1_ERR */ | 47 | #endif /* PCI_DEVICE_ID_INTEL_7205_1_ERR */ |
48 | 48 | ||
49 | #ifndef PCI_DEVICE_ID_INTEL_7500_0 | 49 | #ifndef PCI_DEVICE_ID_INTEL_7500_0 |
50 | #define PCI_DEVICE_ID_INTEL_7500_0 0x2540 | 50 | #define PCI_DEVICE_ID_INTEL_7500_0 0x2540 |
51 | #endif /* PCI_DEVICE_ID_INTEL_7500_0 */ | 51 | #endif /* PCI_DEVICE_ID_INTEL_7500_0 */ |
52 | 52 | ||
53 | #ifndef PCI_DEVICE_ID_INTEL_7500_1_ERR | 53 | #ifndef PCI_DEVICE_ID_INTEL_7500_1_ERR |
54 | #define PCI_DEVICE_ID_INTEL_7500_1_ERR 0x2541 | 54 | #define PCI_DEVICE_ID_INTEL_7500_1_ERR 0x2541 |
55 | #endif /* PCI_DEVICE_ID_INTEL_7500_1_ERR */ | 55 | #endif /* PCI_DEVICE_ID_INTEL_7500_1_ERR */ |
56 | 56 | ||
57 | #ifndef PCI_DEVICE_ID_INTEL_7501_0 | 57 | #ifndef PCI_DEVICE_ID_INTEL_7501_0 |
58 | #define PCI_DEVICE_ID_INTEL_7501_0 0x254c | 58 | #define PCI_DEVICE_ID_INTEL_7501_0 0x254c |
59 | #endif /* PCI_DEVICE_ID_INTEL_7501_0 */ | 59 | #endif /* PCI_DEVICE_ID_INTEL_7501_0 */ |
60 | 60 | ||
61 | #ifndef PCI_DEVICE_ID_INTEL_7501_1_ERR | 61 | #ifndef PCI_DEVICE_ID_INTEL_7501_1_ERR |
62 | #define PCI_DEVICE_ID_INTEL_7501_1_ERR 0x2541 | 62 | #define PCI_DEVICE_ID_INTEL_7501_1_ERR 0x2541 |
63 | #endif /* PCI_DEVICE_ID_INTEL_7501_1_ERR */ | 63 | #endif /* PCI_DEVICE_ID_INTEL_7501_1_ERR */ |
64 | 64 | ||
65 | #ifndef PCI_DEVICE_ID_INTEL_7505_0 | 65 | #ifndef PCI_DEVICE_ID_INTEL_7505_0 |
66 | #define PCI_DEVICE_ID_INTEL_7505_0 0x2550 | 66 | #define PCI_DEVICE_ID_INTEL_7505_0 0x2550 |
67 | #endif /* PCI_DEVICE_ID_INTEL_7505_0 */ | 67 | #endif /* PCI_DEVICE_ID_INTEL_7505_0 */ |
68 | 68 | ||
69 | #ifndef PCI_DEVICE_ID_INTEL_7505_1_ERR | 69 | #ifndef PCI_DEVICE_ID_INTEL_7505_1_ERR |
70 | #define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551 | 70 | #define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551 |
71 | #endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */ | 71 | #endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */ |
72 | 72 | ||
73 | #define E7XXX_NR_CSROWS 8 /* number of csrows */ | 73 | #define E7XXX_NR_CSROWS 8 /* number of csrows */ |
74 | #define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */ | 74 | #define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */ |
75 | 75 | ||
76 | /* E7XXX register addresses - device 0 function 0 */ | 76 | /* E7XXX register addresses - device 0 function 0 */ |
77 | #define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */ | 77 | #define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */ |
78 | #define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */ | 78 | #define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */ |
79 | /* | 79 | /* |
80 | * 31 Device width row 7 0=x8 1=x4 | 80 | * 31 Device width row 7 0=x8 1=x4 |
81 | * 27 Device width row 6 | 81 | * 27 Device width row 6 |
82 | * 23 Device width row 5 | 82 | * 23 Device width row 5 |
83 | * 19 Device width row 4 | 83 | * 19 Device width row 4 |
84 | * 15 Device width row 3 | 84 | * 15 Device width row 3 |
85 | * 11 Device width row 2 | 85 | * 11 Device width row 2 |
86 | * 7 Device width row 1 | 86 | * 7 Device width row 1 |
87 | * 3 Device width row 0 | 87 | * 3 Device width row 0 |
88 | */ | 88 | */ |
89 | #define E7XXX_DRC 0x7C /* DRAM controller mode reg (32b) */ | 89 | #define E7XXX_DRC 0x7C /* DRAM controller mode reg (32b) */ |
90 | /* | 90 | /* |
91 | * 22 Number channels 0=1,1=2 | 91 | * 22 Number channels 0=1,1=2 |
92 | * 19:18 DRB Granularity 32/64MB | 92 | * 19:18 DRB Granularity 32/64MB |
93 | */ | 93 | */ |
94 | #define E7XXX_TOLM 0xC4 /* DRAM top of low memory reg (16b) */ | 94 | #define E7XXX_TOLM 0xC4 /* DRAM top of low memory reg (16b) */ |
95 | #define E7XXX_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */ | 95 | #define E7XXX_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */ |
96 | #define E7XXX_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */ | 96 | #define E7XXX_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */ |
97 | 97 | ||
98 | /* E7XXX register addresses - device 0 function 1 */ | 98 | /* E7XXX register addresses - device 0 function 1 */ |
99 | #define E7XXX_DRAM_FERR 0x80 /* DRAM first error register (8b) */ | 99 | #define E7XXX_DRAM_FERR 0x80 /* DRAM first error register (8b) */ |
100 | #define E7XXX_DRAM_NERR 0x82 /* DRAM next error register (8b) */ | 100 | #define E7XXX_DRAM_NERR 0x82 /* DRAM next error register (8b) */ |
101 | #define E7XXX_DRAM_CELOG_ADD 0xA0 /* DRAM first correctable memory */ | 101 | #define E7XXX_DRAM_CELOG_ADD 0xA0 /* DRAM first correctable memory */ |
102 | /* error address register (32b) */ | 102 | /* error address register (32b) */ |
103 | /* | 103 | /* |
104 | * 31:28 Reserved | 104 | * 31:28 Reserved |
105 | * 27:6 CE address (4k block 33:12) | 105 | * 27:6 CE address (4k block 33:12) |
106 | * 5:0 Reserved | 106 | * 5:0 Reserved |
107 | */ | 107 | */ |
108 | #define E7XXX_DRAM_UELOG_ADD 0xB0 /* DRAM first uncorrectable memory */ | 108 | #define E7XXX_DRAM_UELOG_ADD 0xB0 /* DRAM first uncorrectable memory */ |
109 | /* error address register (32b) */ | 109 | /* error address register (32b) */ |
110 | /* | 110 | /* |
111 | * 31:28 Reserved | 111 | * 31:28 Reserved |
112 | * 27:6 CE address (4k block 33:12) | 112 | * 27:6 CE address (4k block 33:12) |
113 | * 5:0 Reserved | 113 | * 5:0 Reserved |
114 | */ | 114 | */ |
115 | #define E7XXX_DRAM_CELOG_SYNDROME 0xD0 /* DRAM first correctable memory */ | 115 | #define E7XXX_DRAM_CELOG_SYNDROME 0xD0 /* DRAM first correctable memory */ |
116 | /* error syndrome register (16b) */ | 116 | /* error syndrome register (16b) */ |
117 | 117 | ||
118 | enum e7xxx_chips { | 118 | enum e7xxx_chips { |
119 | E7500 = 0, | 119 | E7500 = 0, |
120 | E7501, | 120 | E7501, |
121 | E7505, | 121 | E7505, |
122 | E7205, | 122 | E7205, |
123 | }; | 123 | }; |
124 | 124 | ||
125 | struct e7xxx_pvt { | 125 | struct e7xxx_pvt { |
126 | struct pci_dev *bridge_ck; | 126 | struct pci_dev *bridge_ck; |
127 | u32 tolm; | 127 | u32 tolm; |
128 | u32 remapbase; | 128 | u32 remapbase; |
129 | u32 remaplimit; | 129 | u32 remaplimit; |
130 | const struct e7xxx_dev_info *dev_info; | 130 | const struct e7xxx_dev_info *dev_info; |
131 | }; | 131 | }; |
132 | 132 | ||
133 | struct e7xxx_dev_info { | 133 | struct e7xxx_dev_info { |
134 | u16 err_dev; | 134 | u16 err_dev; |
135 | const char *ctl_name; | 135 | const char *ctl_name; |
136 | }; | 136 | }; |
137 | 137 | ||
138 | struct e7xxx_error_info { | 138 | struct e7xxx_error_info { |
139 | u8 dram_ferr; | 139 | u8 dram_ferr; |
140 | u8 dram_nerr; | 140 | u8 dram_nerr; |
141 | u32 dram_celog_add; | 141 | u32 dram_celog_add; |
142 | u16 dram_celog_syndrome; | 142 | u16 dram_celog_syndrome; |
143 | u32 dram_uelog_add; | 143 | u32 dram_uelog_add; |
144 | }; | 144 | }; |
145 | 145 | ||
146 | static const struct e7xxx_dev_info e7xxx_devs[] = { | 146 | static const struct e7xxx_dev_info e7xxx_devs[] = { |
147 | [E7500] = { | 147 | [E7500] = { |
148 | .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR, | 148 | .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR, |
149 | .ctl_name = "E7500" | 149 | .ctl_name = "E7500" |
150 | }, | 150 | }, |
151 | [E7501] = { | 151 | [E7501] = { |
152 | .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR, | 152 | .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR, |
153 | .ctl_name = "E7501" | 153 | .ctl_name = "E7501" |
154 | }, | 154 | }, |
155 | [E7505] = { | 155 | [E7505] = { |
156 | .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR, | 156 | .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR, |
157 | .ctl_name = "E7505" | 157 | .ctl_name = "E7505" |
158 | }, | 158 | }, |
159 | [E7205] = { | 159 | [E7205] = { |
160 | .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR, | 160 | .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR, |
161 | .ctl_name = "E7205" | 161 | .ctl_name = "E7205" |
162 | }, | 162 | }, |
163 | }; | 163 | }; |
164 | 164 | ||
165 | /* FIXME - is this valid for both SECDED and S4ECD4ED? */ | 165 | /* FIXME - is this valid for both SECDED and S4ECD4ED? */ |
166 | static inline int e7xxx_find_channel(u16 syndrome) | 166 | static inline int e7xxx_find_channel(u16 syndrome) |
167 | { | 167 | { |
168 | debugf3("%s()\n", __func__); | 168 | debugf3("%s()\n", __func__); |
169 | 169 | ||
170 | if ((syndrome & 0xff00) == 0) | 170 | if ((syndrome & 0xff00) == 0) |
171 | return 0; | 171 | return 0; |
172 | 172 | ||
173 | if ((syndrome & 0x00ff) == 0) | 173 | if ((syndrome & 0x00ff) == 0) |
174 | return 1; | 174 | return 1; |
175 | 175 | ||
176 | if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0) | 176 | if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0) |
177 | return 0; | 177 | return 0; |
178 | 178 | ||
179 | return 1; | 179 | return 1; |
180 | } | 180 | } |
181 | 181 | ||
182 | static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, | 182 | static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, |
183 | unsigned long page) | 183 | unsigned long page) |
184 | { | 184 | { |
185 | u32 remap; | 185 | u32 remap; |
186 | struct e7xxx_pvt *pvt = (struct e7xxx_pvt *) mci->pvt_info; | 186 | struct e7xxx_pvt *pvt = (struct e7xxx_pvt *) mci->pvt_info; |
187 | 187 | ||
188 | debugf3("%s()\n", __func__); | 188 | debugf3("%s()\n", __func__); |
189 | 189 | ||
190 | if ((page < pvt->tolm) || | 190 | if ((page < pvt->tolm) || |
191 | ((page >= 0x100000) && (page < pvt->remapbase))) | 191 | ((page >= 0x100000) && (page < pvt->remapbase))) |
192 | return page; | 192 | return page; |
193 | 193 | ||
194 | remap = (page - pvt->tolm) + pvt->remapbase; | 194 | remap = (page - pvt->tolm) + pvt->remapbase; |
195 | 195 | ||
196 | if (remap < pvt->remaplimit) | 196 | if (remap < pvt->remaplimit) |
197 | return remap; | 197 | return remap; |
198 | 198 | ||
199 | e7xxx_printk(KERN_ERR, "Invalid page %lx - out of range\n", page); | 199 | e7xxx_printk(KERN_ERR, "Invalid page %lx - out of range\n", page); |
200 | return pvt->tolm - 1; | 200 | return pvt->tolm - 1; |
201 | } | 201 | } |
202 | 202 | ||
203 | static void process_ce(struct mem_ctl_info *mci, | 203 | static void process_ce(struct mem_ctl_info *mci, |
204 | struct e7xxx_error_info *info) | 204 | struct e7xxx_error_info *info) |
205 | { | 205 | { |
206 | u32 error_1b, page; | 206 | u32 error_1b, page; |
207 | u16 syndrome; | 207 | u16 syndrome; |
208 | int row; | 208 | int row; |
209 | int channel; | 209 | int channel; |
210 | 210 | ||
211 | debugf3("%s()\n", __func__); | 211 | debugf3("%s()\n", __func__); |
212 | /* read the error address */ | 212 | /* read the error address */ |
213 | error_1b = info->dram_celog_add; | 213 | error_1b = info->dram_celog_add; |
214 | /* FIXME - should use PAGE_SHIFT */ | 214 | /* FIXME - should use PAGE_SHIFT */ |
215 | page = error_1b >> 6; /* convert the address to 4k page */ | 215 | page = error_1b >> 6; /* convert the address to 4k page */ |
216 | /* read the syndrome */ | 216 | /* read the syndrome */ |
217 | syndrome = info->dram_celog_syndrome; | 217 | syndrome = info->dram_celog_syndrome; |
218 | /* FIXME - check for -1 */ | 218 | /* FIXME - check for -1 */ |
219 | row = edac_mc_find_csrow_by_page(mci, page); | 219 | row = edac_mc_find_csrow_by_page(mci, page); |
220 | /* convert syndrome to channel */ | 220 | /* convert syndrome to channel */ |
221 | channel = e7xxx_find_channel(syndrome); | 221 | channel = e7xxx_find_channel(syndrome); |
222 | edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE"); | 222 | edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE"); |
223 | } | 223 | } |
224 | 224 | ||
225 | static void process_ce_no_info(struct mem_ctl_info *mci) | 225 | static void process_ce_no_info(struct mem_ctl_info *mci) |
226 | { | 226 | { |
227 | debugf3("%s()\n", __func__); | 227 | debugf3("%s()\n", __func__); |
228 | edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow"); | 228 | edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow"); |
229 | } | 229 | } |
230 | 230 | ||
231 | static void process_ue(struct mem_ctl_info *mci, | 231 | static void process_ue(struct mem_ctl_info *mci, |
232 | struct e7xxx_error_info *info) | 232 | struct e7xxx_error_info *info) |
233 | { | 233 | { |
234 | u32 error_2b, block_page; | 234 | u32 error_2b, block_page; |
235 | int row; | 235 | int row; |
236 | 236 | ||
237 | debugf3("%s()\n", __func__); | 237 | debugf3("%s()\n", __func__); |
238 | /* read the error address */ | 238 | /* read the error address */ |
239 | error_2b = info->dram_uelog_add; | 239 | error_2b = info->dram_uelog_add; |
240 | /* FIXME - should use PAGE_SHIFT */ | 240 | /* FIXME - should use PAGE_SHIFT */ |
241 | block_page = error_2b >> 6; /* convert to 4k address */ | 241 | block_page = error_2b >> 6; /* convert to 4k address */ |
242 | row = edac_mc_find_csrow_by_page(mci, block_page); | 242 | row = edac_mc_find_csrow_by_page(mci, block_page); |
243 | edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE"); | 243 | edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE"); |
244 | } | 244 | } |
245 | 245 | ||
246 | static void process_ue_no_info(struct mem_ctl_info *mci) | 246 | static void process_ue_no_info(struct mem_ctl_info *mci) |
247 | { | 247 | { |
248 | debugf3("%s()\n", __func__); | 248 | debugf3("%s()\n", __func__); |
249 | edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow"); | 249 | edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow"); |
250 | } | 250 | } |
251 | 251 | ||
252 | static void e7xxx_get_error_info (struct mem_ctl_info *mci, | 252 | static void e7xxx_get_error_info (struct mem_ctl_info *mci, |
253 | struct e7xxx_error_info *info) | 253 | struct e7xxx_error_info *info) |
254 | { | 254 | { |
255 | struct e7xxx_pvt *pvt; | 255 | struct e7xxx_pvt *pvt; |
256 | 256 | ||
257 | pvt = (struct e7xxx_pvt *) mci->pvt_info; | 257 | pvt = (struct e7xxx_pvt *) mci->pvt_info; |
258 | pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, | 258 | pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, |
259 | &info->dram_ferr); | 259 | &info->dram_ferr); |
260 | pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, | 260 | pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, |
261 | &info->dram_nerr); | 261 | &info->dram_nerr); |
262 | 262 | ||
263 | if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) { | 263 | if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) { |
264 | pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD, | 264 | pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD, |
265 | &info->dram_celog_add); | 265 | &info->dram_celog_add); |
266 | pci_read_config_word(pvt->bridge_ck, | 266 | pci_read_config_word(pvt->bridge_ck, |
267 | E7XXX_DRAM_CELOG_SYNDROME, | 267 | E7XXX_DRAM_CELOG_SYNDROME, |
268 | &info->dram_celog_syndrome); | 268 | &info->dram_celog_syndrome); |
269 | } | 269 | } |
270 | 270 | ||
271 | if ((info->dram_ferr & 2) || (info->dram_nerr & 2)) | 271 | if ((info->dram_ferr & 2) || (info->dram_nerr & 2)) |
272 | pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD, | 272 | pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD, |
273 | &info->dram_uelog_add); | 273 | &info->dram_uelog_add); |
274 | 274 | ||
275 | if (info->dram_ferr & 3) | 275 | if (info->dram_ferr & 3) |
276 | pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03); | 276 | pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03); |
277 | 277 | ||
278 | if (info->dram_nerr & 3) | 278 | if (info->dram_nerr & 3) |
279 | pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03); | 279 | pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03); |
280 | } | 280 | } |
281 | 281 | ||
282 | static int e7xxx_process_error_info (struct mem_ctl_info *mci, | 282 | static int e7xxx_process_error_info (struct mem_ctl_info *mci, |
283 | struct e7xxx_error_info *info, int handle_errors) | 283 | struct e7xxx_error_info *info, int handle_errors) |
284 | { | 284 | { |
285 | int error_found; | 285 | int error_found; |
286 | 286 | ||
287 | error_found = 0; | 287 | error_found = 0; |
288 | 288 | ||
289 | /* decode and report errors */ | 289 | /* decode and report errors */ |
290 | if (info->dram_ferr & 1) { /* check first error correctable */ | 290 | if (info->dram_ferr & 1) { /* check first error correctable */ |
291 | error_found = 1; | 291 | error_found = 1; |
292 | 292 | ||
293 | if (handle_errors) | 293 | if (handle_errors) |
294 | process_ce(mci, info); | 294 | process_ce(mci, info); |
295 | } | 295 | } |
296 | 296 | ||
297 | if (info->dram_ferr & 2) { /* check first error uncorrectable */ | 297 | if (info->dram_ferr & 2) { /* check first error uncorrectable */ |
298 | error_found = 1; | 298 | error_found = 1; |
299 | 299 | ||
300 | if (handle_errors) | 300 | if (handle_errors) |
301 | process_ue(mci, info); | 301 | process_ue(mci, info); |
302 | } | 302 | } |
303 | 303 | ||
304 | if (info->dram_nerr & 1) { /* check next error correctable */ | 304 | if (info->dram_nerr & 1) { /* check next error correctable */ |
305 | error_found = 1; | 305 | error_found = 1; |
306 | 306 | ||
307 | if (handle_errors) { | 307 | if (handle_errors) { |
308 | if (info->dram_ferr & 1) | 308 | if (info->dram_ferr & 1) |
309 | process_ce_no_info(mci); | 309 | process_ce_no_info(mci); |
310 | else | 310 | else |
311 | process_ce(mci, info); | 311 | process_ce(mci, info); |
312 | } | 312 | } |
313 | } | 313 | } |
314 | 314 | ||
315 | if (info->dram_nerr & 2) { /* check next error uncorrectable */ | 315 | if (info->dram_nerr & 2) { /* check next error uncorrectable */ |
316 | error_found = 1; | 316 | error_found = 1; |
317 | 317 | ||
318 | if (handle_errors) { | 318 | if (handle_errors) { |
319 | if (info->dram_ferr & 2) | 319 | if (info->dram_ferr & 2) |
320 | process_ue_no_info(mci); | 320 | process_ue_no_info(mci); |
321 | else | 321 | else |
322 | process_ue(mci, info); | 322 | process_ue(mci, info); |
323 | } | 323 | } |
324 | } | 324 | } |
325 | 325 | ||
326 | return error_found; | 326 | return error_found; |
327 | } | 327 | } |
328 | 328 | ||
329 | static void e7xxx_check(struct mem_ctl_info *mci) | 329 | static void e7xxx_check(struct mem_ctl_info *mci) |
330 | { | 330 | { |
331 | struct e7xxx_error_info info; | 331 | struct e7xxx_error_info info; |
332 | 332 | ||
333 | debugf3("%s()\n", __func__); | 333 | debugf3("%s()\n", __func__); |
334 | e7xxx_get_error_info(mci, &info); | 334 | e7xxx_get_error_info(mci, &info); |
335 | e7xxx_process_error_info(mci, &info, 1); | 335 | e7xxx_process_error_info(mci, &info, 1); |
336 | } | 336 | } |
337 | 337 | ||
338 | static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) | 338 | /* Return 1 if dual channel mode is active. Else return 0. */ |
339 | static inline int dual_channel_active(u32 drc, int dev_idx) | ||
339 | { | 340 | { |
340 | int rc = -ENODEV; | 341 | return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1; |
341 | int index; | 342 | } |
342 | u16 pci_data; | ||
343 | struct mem_ctl_info *mci = NULL; | ||
344 | struct e7xxx_pvt *pvt = NULL; | ||
345 | u32 drc; | ||
346 | int drc_chan = 1; /* Number of channels 0=1chan,1=2chan */ | ||
347 | int drc_drbg = 1; /* DRB granularity 0=32mb,1=64mb */ | ||
348 | int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | ||
349 | u32 dra; | ||
350 | unsigned long last_cumul_size; | ||
351 | struct e7xxx_error_info discard; | ||
352 | 343 | ||
353 | debugf0("%s(): mci\n", __func__); | ||
354 | 344 | ||
355 | /* need to find out the number of channels */ | 345 | /* Return DRB granularity (0=32mb, 1=64mb). */ |
356 | pci_read_config_dword(pdev, E7XXX_DRC, &drc); | 346 | static inline int drb_granularity(u32 drc, int dev_idx) |
357 | 347 | { | |
358 | /* only e7501 can be single channel */ | 348 | /* only e7501 can be single channel */ |
359 | if (dev_idx == E7501) { | 349 | return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1; |
360 | drc_chan = ((drc >> 22) & 0x1); | 350 | } |
361 | drc_drbg = (drc >> 18) & 0x3; | ||
362 | } | ||
363 | 351 | ||
364 | drc_ddim = (drc >> 20) & 0x3; | ||
365 | mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1); | ||
366 | 352 | ||
367 | if (mci == NULL) { | 353 | static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, |
368 | rc = -ENOMEM; | 354 | int dev_idx, u32 drc) |
369 | goto fail; | 355 | { |
370 | } | 356 | unsigned long last_cumul_size; |
357 | int index; | ||
358 | u8 value; | ||
359 | u32 dra, cumul_size; | ||
360 | int drc_chan, drc_drbg, drc_ddim, mem_dev; | ||
361 | struct csrow_info *csrow; | ||
371 | 362 | ||
372 | debugf3("%s(): init mci\n", __func__); | ||
373 | mci->mtype_cap = MEM_FLAG_RDDR; | ||
374 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | | ||
375 | EDAC_FLAG_S4ECD4ED; | ||
376 | /* FIXME - what if different memory types are in different csrows? */ | ||
377 | mci->mod_name = EDAC_MOD_STR; | ||
378 | mci->mod_ver = E7XXX_REVISION; | ||
379 | mci->dev = &pdev->dev; | ||
380 | |||
381 | debugf3("%s(): init pvt\n", __func__); | ||
382 | pvt = (struct e7xxx_pvt *) mci->pvt_info; | ||
383 | pvt->dev_info = &e7xxx_devs[dev_idx]; | ||
384 | pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
385 | pvt->dev_info->err_dev, | ||
386 | pvt->bridge_ck); | ||
387 | |||
388 | if (!pvt->bridge_ck) { | ||
389 | e7xxx_printk(KERN_ERR, "error reporting device not found:" | ||
390 | "vendor %x device 0x%x (broken BIOS?)\n", | ||
391 | PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev); | ||
392 | goto fail; | ||
393 | } | ||
394 | |||
395 | debugf3("%s(): more mci init\n", __func__); | ||
396 | mci->ctl_name = pvt->dev_info->ctl_name; | ||
397 | mci->edac_check = e7xxx_check; | ||
398 | mci->ctl_page_to_phys = ctl_page_to_phys; | ||
399 | |||
400 | /* find out the device types */ | ||
401 | pci_read_config_dword(pdev, E7XXX_DRA, &dra); | 363 | pci_read_config_dword(pdev, E7XXX_DRA, &dra); |
364 | drc_chan = dual_channel_active(drc, dev_idx); | ||
365 | drc_drbg = drb_granularity(drc, dev_idx); | ||
366 | drc_ddim = (drc >> 20) & 0x3; | ||
367 | last_cumul_size = 0; | ||
402 | 368 | ||
403 | /* | 369 | /* The dram row boundary (DRB) reg values are boundary address |
404 | * The dram row boundary (DRB) reg values are boundary address | ||
405 | * for each DRAM row with a granularity of 32 or 64MB (single/dual | 370 | * for each DRAM row with a granularity of 32 or 64MB (single/dual |
406 | * channel operation). DRB regs are cumulative; therefore DRB7 will | 371 | * channel operation). DRB regs are cumulative; therefore DRB7 will |
407 | * contain the total memory contained in all eight rows. | 372 | * contain the total memory contained in all eight rows. |
408 | */ | 373 | */ |
409 | for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { | 374 | for (index = 0; index < mci->nr_csrows; index++) { |
410 | u8 value; | ||
411 | u32 cumul_size; | ||
412 | /* mem_dev 0=x8, 1=x4 */ | 375 | /* mem_dev 0=x8, 1=x4 */ |
413 | int mem_dev = (dra >> (index * 4 + 3)) & 0x1; | 376 | mem_dev = (dra >> (index * 4 + 3)) & 0x1; |
414 | struct csrow_info *csrow = &mci->csrows[index]; | 377 | csrow = &mci->csrows[index]; |
415 | 378 | ||
416 | pci_read_config_byte(pdev, E7XXX_DRB + index, &value); | 379 | pci_read_config_byte(pdev, E7XXX_DRB + index, &value); |
417 | /* convert a 64 or 32 MiB DRB to a page size. */ | 380 | /* convert a 64 or 32 MiB DRB to a page size. */ |
418 | cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); | 381 | cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); |
419 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, | 382 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, |
420 | cumul_size); | 383 | cumul_size); |
421 | |||
422 | if (cumul_size == last_cumul_size) | 384 | if (cumul_size == last_cumul_size) |
423 | continue; /* not populated */ | 385 | continue; /* not populated */ |
424 | 386 | ||
425 | csrow->first_page = last_cumul_size; | 387 | csrow->first_page = last_cumul_size; |
426 | csrow->last_page = cumul_size - 1; | 388 | csrow->last_page = cumul_size - 1; |
427 | csrow->nr_pages = cumul_size - last_cumul_size; | 389 | csrow->nr_pages = cumul_size - last_cumul_size; |
428 | last_cumul_size = cumul_size; | 390 | last_cumul_size = cumul_size; |
429 | csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ | 391 | csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ |
430 | csrow->mtype = MEM_RDDR; /* only one type supported */ | 392 | csrow->mtype = MEM_RDDR; /* only one type supported */ |
431 | csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; | 393 | csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; |
432 | 394 | ||
433 | /* | 395 | /* |
434 | * if single channel or x8 devices then SECDED | 396 | * if single channel or x8 devices then SECDED |
435 | * if dual channel and x4 then S4ECD4ED | 397 | * if dual channel and x4 then S4ECD4ED |
436 | */ | 398 | */ |
437 | if (drc_ddim) { | 399 | if (drc_ddim) { |
438 | if (drc_chan && mem_dev) { | 400 | if (drc_chan && mem_dev) { |
439 | csrow->edac_mode = EDAC_S4ECD4ED; | 401 | csrow->edac_mode = EDAC_S4ECD4ED; |
440 | mci->edac_cap |= EDAC_FLAG_S4ECD4ED; | 402 | mci->edac_cap |= EDAC_FLAG_S4ECD4ED; |
441 | } else { | 403 | } else { |
442 | csrow->edac_mode = EDAC_SECDED; | 404 | csrow->edac_mode = EDAC_SECDED; |
443 | mci->edac_cap |= EDAC_FLAG_SECDED; | 405 | mci->edac_cap |= EDAC_FLAG_SECDED; |
444 | } | 406 | } |
445 | } else | 407 | } else |
446 | csrow->edac_mode = EDAC_NONE; | 408 | csrow->edac_mode = EDAC_NONE; |
447 | } | 409 | } |
410 | } | ||
448 | 411 | ||
449 | mci->edac_cap |= EDAC_FLAG_NONE; | 412 | static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) |
413 | { | ||
414 | u16 pci_data; | ||
415 | struct mem_ctl_info *mci = NULL; | ||
416 | struct e7xxx_pvt *pvt = NULL; | ||
417 | u32 drc; | ||
418 | int drc_chan; | ||
419 | struct e7xxx_error_info discard; | ||
450 | 420 | ||
421 | debugf0("%s(): mci\n", __func__); | ||
422 | pci_read_config_dword(pdev, E7XXX_DRC, &drc); | ||
423 | |||
424 | drc_chan = dual_channel_active(drc, dev_idx); | ||
425 | mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1); | ||
426 | |||
427 | if (mci == NULL) | ||
428 | return -ENOMEM; | ||
429 | |||
430 | debugf3("%s(): init mci\n", __func__); | ||
431 | mci->mtype_cap = MEM_FLAG_RDDR; | ||
432 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | | ||
433 | EDAC_FLAG_S4ECD4ED; | ||
434 | /* FIXME - what if different memory types are in different csrows? */ | ||
435 | mci->mod_name = EDAC_MOD_STR; | ||
436 | mci->mod_ver = E7XXX_REVISION; | ||
437 | mci->dev = &pdev->dev; | ||
438 | debugf3("%s(): init pvt\n", __func__); | ||
439 | pvt = (struct e7xxx_pvt *) mci->pvt_info; | ||
440 | pvt->dev_info = &e7xxx_devs[dev_idx]; | ||
441 | pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
442 | pvt->dev_info->err_dev, | ||
443 | pvt->bridge_ck); | ||
444 | |||
445 | if (!pvt->bridge_ck) { | ||
446 | e7xxx_printk(KERN_ERR, "error reporting device not found:" | ||
447 | "vendor %x device 0x%x (broken BIOS?)\n", | ||
448 | PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev); | ||
449 | goto fail0; | ||
450 | } | ||
451 | |||
452 | debugf3("%s(): more mci init\n", __func__); | ||
453 | mci->ctl_name = pvt->dev_info->ctl_name; | ||
454 | mci->edac_check = e7xxx_check; | ||
455 | mci->ctl_page_to_phys = ctl_page_to_phys; | ||
456 | e7xxx_init_csrows(mci, pdev, dev_idx, drc); | ||
457 | mci->edac_cap |= EDAC_FLAG_NONE; | ||
451 | debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); | 458 | debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); |
452 | /* load the top of low memory, remap base, and remap limit vars */ | 459 | /* load the top of low memory, remap base, and remap limit vars */ |
453 | pci_read_config_word(pdev, E7XXX_TOLM, &pci_data); | 460 | pci_read_config_word(pdev, E7XXX_TOLM, &pci_data); |
454 | pvt->tolm = ((u32) pci_data) << 4; | 461 | pvt->tolm = ((u32) pci_data) << 4; |
455 | pci_read_config_word(pdev, E7XXX_REMAPBASE, &pci_data); | 462 | pci_read_config_word(pdev, E7XXX_REMAPBASE, &pci_data); |
456 | pvt->remapbase = ((u32) pci_data) << 14; | 463 | pvt->remapbase = ((u32) pci_data) << 14; |
457 | pci_read_config_word(pdev, E7XXX_REMAPLIMIT, &pci_data); | 464 | pci_read_config_word(pdev, E7XXX_REMAPLIMIT, &pci_data); |
458 | pvt->remaplimit = ((u32) pci_data) << 14; | 465 | pvt->remaplimit = ((u32) pci_data) << 14; |
459 | e7xxx_printk(KERN_INFO, | 466 | e7xxx_printk(KERN_INFO, |
460 | "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, | 467 | "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, |
461 | pvt->remapbase, pvt->remaplimit); | 468 | pvt->remapbase, pvt->remaplimit); |
462 | 469 | ||
463 | /* clear any pending errors, or initial state bits */ | 470 | /* clear any pending errors, or initial state bits */ |
464 | e7xxx_get_error_info(mci, &discard); | 471 | e7xxx_get_error_info(mci, &discard); |
465 | 472 | ||
466 | /* Here we assume that we will never see multiple instances of this | 473 | /* Here we assume that we will never see multiple instances of this |
467 | * type of memory controller. The ID is therefore hardcoded to 0. | 474 | * type of memory controller. The ID is therefore hardcoded to 0. |
468 | */ | 475 | */ |
469 | if (edac_mc_add_mc(mci,0)) { | 476 | if (edac_mc_add_mc(mci,0)) { |
470 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); | 477 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
471 | goto fail; | 478 | goto fail1; |
472 | } | 479 | } |
473 | 480 | ||
474 | /* get this far and it's successful */ | 481 | /* get this far and it's successful */ |
475 | debugf3("%s(): success\n", __func__); | 482 | debugf3("%s(): success\n", __func__); |
476 | return 0; | 483 | return 0; |
477 | 484 | ||
478 | fail: | 485 | fail1: |
479 | if (mci != NULL) { | 486 | pci_dev_put(pvt->bridge_ck); |
480 | if(pvt != NULL && pvt->bridge_ck) | ||
481 | pci_dev_put(pvt->bridge_ck); | ||
482 | edac_mc_free(mci); | ||
483 | } | ||
484 | 487 | ||
485 | return rc; | 488 | fail0: |
489 | edac_mc_free(mci); | ||
490 | |||
491 | return -ENODEV; | ||
486 | } | 492 | } |
487 | 493 | ||
488 | /* returns count (>= 0), or negative on error */ | 494 | /* returns count (>= 0), or negative on error */ |
489 | static int __devinit e7xxx_init_one(struct pci_dev *pdev, | 495 | static int __devinit e7xxx_init_one(struct pci_dev *pdev, |
490 | const struct pci_device_id *ent) | 496 | const struct pci_device_id *ent) |
491 | { | 497 | { |
492 | debugf0("%s()\n", __func__); | 498 | debugf0("%s()\n", __func__); |
493 | 499 | ||
494 | /* wake up and enable device */ | 500 | /* wake up and enable device */ |
495 | return pci_enable_device(pdev) ? | 501 | return pci_enable_device(pdev) ? |
496 | -EIO : e7xxx_probe1(pdev, ent->driver_data); | 502 | -EIO : e7xxx_probe1(pdev, ent->driver_data); |
497 | } | 503 | } |
498 | 504 | ||
499 | static void __devexit e7xxx_remove_one(struct pci_dev *pdev) | 505 | static void __devexit e7xxx_remove_one(struct pci_dev *pdev) |
500 | { | 506 | { |
501 | struct mem_ctl_info *mci; | 507 | struct mem_ctl_info *mci; |
502 | struct e7xxx_pvt *pvt; | 508 | struct e7xxx_pvt *pvt; |
503 | 509 | ||
504 | debugf0("%s()\n", __func__); | 510 | debugf0("%s()\n", __func__); |
505 | 511 | ||
506 | if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) | 512 | if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) |
507 | return; | 513 | return; |
508 | 514 | ||
509 | pvt = (struct e7xxx_pvt *) mci->pvt_info; | 515 | pvt = (struct e7xxx_pvt *) mci->pvt_info; |
510 | pci_dev_put(pvt->bridge_ck); | 516 | pci_dev_put(pvt->bridge_ck); |
drivers/edac/i82860_edac.c
1 | /* | 1 | /* |
2 | * Intel 82860 Memory Controller kernel module | 2 | * Intel 82860 Memory Controller kernel module |
3 | * (C) 2005 Red Hat (http://www.redhat.com) | 3 | * (C) 2005 Red Hat (http://www.redhat.com) |
4 | * This file may be distributed under the terms of the | 4 | * This file may be distributed under the terms of the |
5 | * GNU General Public License. | 5 | * GNU General Public License. |
6 | * | 6 | * |
7 | * Written by Ben Woodard <woodard@redhat.com> | 7 | * Written by Ben Woodard <woodard@redhat.com> |
8 | * shamelessly copied from and based upon the edac_i82875 driver | 8 | * shamelessly copied from and based upon the edac_i82875 driver |
9 | * by Thayne Harbaugh of Linux Networx. (http://lnxi.com) | 9 | * by Thayne Harbaugh of Linux Networx. (http://lnxi.com) |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/config.h> | 12 | #include <linux/config.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/pci.h> | 15 | #include <linux/pci.h> |
16 | #include <linux/pci_ids.h> | 16 | #include <linux/pci_ids.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include "edac_mc.h" | 18 | #include "edac_mc.h" |
19 | 19 | ||
20 | #define I82860_REVISION " Ver: 2.0.0 " __DATE__ | 20 | #define I82860_REVISION " Ver: 2.0.0 " __DATE__ |
21 | 21 | ||
22 | #define i82860_printk(level, fmt, arg...) \ | 22 | #define i82860_printk(level, fmt, arg...) \ |
23 | edac_printk(level, "i82860", fmt, ##arg) | 23 | edac_printk(level, "i82860", fmt, ##arg) |
24 | 24 | ||
25 | #define i82860_mc_printk(mci, level, fmt, arg...) \ | 25 | #define i82860_mc_printk(mci, level, fmt, arg...) \ |
26 | edac_mc_chipset_printk(mci, level, "i82860", fmt, ##arg) | 26 | edac_mc_chipset_printk(mci, level, "i82860", fmt, ##arg) |
27 | 27 | ||
28 | #ifndef PCI_DEVICE_ID_INTEL_82860_0 | 28 | #ifndef PCI_DEVICE_ID_INTEL_82860_0 |
29 | #define PCI_DEVICE_ID_INTEL_82860_0 0x2531 | 29 | #define PCI_DEVICE_ID_INTEL_82860_0 0x2531 |
30 | #endif /* PCI_DEVICE_ID_INTEL_82860_0 */ | 30 | #endif /* PCI_DEVICE_ID_INTEL_82860_0 */ |
31 | 31 | ||
32 | #define I82860_MCHCFG 0x50 | 32 | #define I82860_MCHCFG 0x50 |
33 | #define I82860_GBA 0x60 | 33 | #define I82860_GBA 0x60 |
34 | #define I82860_GBA_MASK 0x7FF | 34 | #define I82860_GBA_MASK 0x7FF |
35 | #define I82860_GBA_SHIFT 24 | 35 | #define I82860_GBA_SHIFT 24 |
36 | #define I82860_ERRSTS 0xC8 | 36 | #define I82860_ERRSTS 0xC8 |
37 | #define I82860_EAP 0xE4 | 37 | #define I82860_EAP 0xE4 |
38 | #define I82860_DERRCTL_STS 0xE2 | 38 | #define I82860_DERRCTL_STS 0xE2 |
39 | 39 | ||
40 | enum i82860_chips { | 40 | enum i82860_chips { |
41 | I82860 = 0, | 41 | I82860 = 0, |
42 | }; | 42 | }; |
43 | 43 | ||
44 | struct i82860_dev_info { | 44 | struct i82860_dev_info { |
45 | const char *ctl_name; | 45 | const char *ctl_name; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | struct i82860_error_info { | 48 | struct i82860_error_info { |
49 | u16 errsts; | 49 | u16 errsts; |
50 | u32 eap; | 50 | u32 eap; |
51 | u16 derrsyn; | 51 | u16 derrsyn; |
52 | u16 errsts2; | 52 | u16 errsts2; |
53 | }; | 53 | }; |
54 | 54 | ||
55 | static const struct i82860_dev_info i82860_devs[] = { | 55 | static const struct i82860_dev_info i82860_devs[] = { |
56 | [I82860] = { | 56 | [I82860] = { |
57 | .ctl_name = "i82860" | 57 | .ctl_name = "i82860" |
58 | }, | 58 | }, |
59 | }; | 59 | }; |
60 | 60 | ||
61 | static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code | 61 | static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code |
62 | * has already registered driver | 62 | * has already registered driver |
63 | */ | 63 | */ |
64 | 64 | ||
65 | static void i82860_get_error_info(struct mem_ctl_info *mci, | 65 | static void i82860_get_error_info(struct mem_ctl_info *mci, |
66 | struct i82860_error_info *info) | 66 | struct i82860_error_info *info) |
67 | { | 67 | { |
68 | struct pci_dev *pdev; | 68 | struct pci_dev *pdev; |
69 | 69 | ||
70 | pdev = to_pci_dev(mci->dev); | 70 | pdev = to_pci_dev(mci->dev); |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * This is a mess because there is no atomic way to read all the | 73 | * This is a mess because there is no atomic way to read all the |
74 | * registers at once and the registers can transition from CE being | 74 | * registers at once and the registers can transition from CE being |
75 | * overwritten by UE. | 75 | * overwritten by UE. |
76 | */ | 76 | */ |
77 | pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts); | 77 | pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts); |
78 | pci_read_config_dword(pdev, I82860_EAP, &info->eap); | 78 | pci_read_config_dword(pdev, I82860_EAP, &info->eap); |
79 | pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn); | 79 | pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn); |
80 | pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts2); | 80 | pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts2); |
81 | 81 | ||
82 | pci_write_bits16(pdev, I82860_ERRSTS, 0x0003, 0x0003); | 82 | pci_write_bits16(pdev, I82860_ERRSTS, 0x0003, 0x0003); |
83 | 83 | ||
84 | /* | 84 | /* |
85 | * If the error is the same for both reads then the first set of reads | 85 | * If the error is the same for both reads then the first set of reads |
86 | * is valid. If there is a change then there is a CE no info and the | 86 | * is valid. If there is a change then there is a CE no info and the |
87 | * second set of reads is valid and should be UE info. | 87 | * second set of reads is valid and should be UE info. |
88 | */ | 88 | */ |
89 | if (!(info->errsts2 & 0x0003)) | 89 | if (!(info->errsts2 & 0x0003)) |
90 | return; | 90 | return; |
91 | 91 | ||
92 | if ((info->errsts ^ info->errsts2) & 0x0003) { | 92 | if ((info->errsts ^ info->errsts2) & 0x0003) { |
93 | pci_read_config_dword(pdev, I82860_EAP, &info->eap); | 93 | pci_read_config_dword(pdev, I82860_EAP, &info->eap); |
94 | pci_read_config_word(pdev, I82860_DERRCTL_STS, | 94 | pci_read_config_word(pdev, I82860_DERRCTL_STS, |
95 | &info->derrsyn); | 95 | &info->derrsyn); |
96 | } | 96 | } |
97 | } | 97 | } |
98 | 98 | ||
99 | static int i82860_process_error_info(struct mem_ctl_info *mci, | 99 | static int i82860_process_error_info(struct mem_ctl_info *mci, |
100 | struct i82860_error_info *info, int handle_errors) | 100 | struct i82860_error_info *info, int handle_errors) |
101 | { | 101 | { |
102 | int row; | 102 | int row; |
103 | 103 | ||
104 | if (!(info->errsts2 & 0x0003)) | 104 | if (!(info->errsts2 & 0x0003)) |
105 | return 0; | 105 | return 0; |
106 | 106 | ||
107 | if (!handle_errors) | 107 | if (!handle_errors) |
108 | return 1; | 108 | return 1; |
109 | 109 | ||
110 | if ((info->errsts ^ info->errsts2) & 0x0003) { | 110 | if ((info->errsts ^ info->errsts2) & 0x0003) { |
111 | edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); | 111 | edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); |
112 | info->errsts = info->errsts2; | 112 | info->errsts = info->errsts2; |
113 | } | 113 | } |
114 | 114 | ||
115 | info->eap >>= PAGE_SHIFT; | 115 | info->eap >>= PAGE_SHIFT; |
116 | row = edac_mc_find_csrow_by_page(mci, info->eap); | 116 | row = edac_mc_find_csrow_by_page(mci, info->eap); |
117 | 117 | ||
118 | if (info->errsts & 0x0002) | 118 | if (info->errsts & 0x0002) |
119 | edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE"); | 119 | edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE"); |
120 | else | 120 | else |
121 | edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0, | 121 | edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0, |
122 | "i82860 UE"); | 122 | "i82860 UE"); |
123 | 123 | ||
124 | return 1; | 124 | return 1; |
125 | } | 125 | } |
126 | 126 | ||
127 | static void i82860_check(struct mem_ctl_info *mci) | 127 | static void i82860_check(struct mem_ctl_info *mci) |
128 | { | 128 | { |
129 | struct i82860_error_info info; | 129 | struct i82860_error_info info; |
130 | 130 | ||
131 | debugf1("MC%d: %s()\n", mci->mc_idx, __func__); | 131 | debugf1("MC%d: %s()\n", mci->mc_idx, __func__); |
132 | i82860_get_error_info(mci, &info); | 132 | i82860_get_error_info(mci, &info); |
133 | i82860_process_error_info(mci, &info, 1); | 133 | i82860_process_error_info(mci, &info, 1); |
134 | } | 134 | } |
135 | 135 | ||
136 | static int i82860_probe1(struct pci_dev *pdev, int dev_idx) | 136 | static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev) |
137 | { | 137 | { |
138 | int rc = -ENODEV; | ||
139 | int index; | ||
140 | struct mem_ctl_info *mci = NULL; | ||
141 | unsigned long last_cumul_size; | 138 | unsigned long last_cumul_size; |
142 | struct i82860_error_info discard; | 139 | u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */ |
140 | u16 value; | ||
141 | u32 cumul_size; | ||
142 | struct csrow_info *csrow; | ||
143 | int index; | ||
143 | 144 | ||
144 | u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | 145 | pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim); |
146 | mchcfg_ddim = mchcfg_ddim & 0x180; | ||
147 | last_cumul_size = 0; | ||
145 | 148 | ||
149 | /* The group row boundary (GRA) reg values are boundary address | ||
150 | * for each DRAM row with a granularity of 16MB. GRA regs are | ||
151 | * cumulative; therefore GRA15 will contain the total memory contained | ||
152 | * in all eight rows. | ||
153 | */ | ||
154 | for (index = 0; index < mci->nr_csrows; index++) { | ||
155 | csrow = &mci->csrows[index]; | ||
156 | pci_read_config_word(pdev, I82860_GBA + index * 2, &value); | ||
157 | cumul_size = (value & I82860_GBA_MASK) << | ||
158 | (I82860_GBA_SHIFT - PAGE_SHIFT); | ||
159 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, | ||
160 | cumul_size); | ||
161 | |||
162 | if (cumul_size == last_cumul_size) | ||
163 | continue; /* not populated */ | ||
164 | |||
165 | csrow->first_page = last_cumul_size; | ||
166 | csrow->last_page = cumul_size - 1; | ||
167 | csrow->nr_pages = cumul_size - last_cumul_size; | ||
168 | last_cumul_size = cumul_size; | ||
169 | csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */ | ||
170 | csrow->mtype = MEM_RMBS; | ||
171 | csrow->dtype = DEV_UNKNOWN; | ||
172 | csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | static int i82860_probe1(struct pci_dev *pdev, int dev_idx) | ||
177 | { | ||
178 | struct mem_ctl_info *mci; | ||
179 | struct i82860_error_info discard; | ||
180 | |||
146 | /* RDRAM has channels but these don't map onto the abstractions that | 181 | /* RDRAM has channels but these don't map onto the abstractions that |
147 | edac uses. | 182 | edac uses. |
148 | The device groups from the GRA registers seem to map reasonably | 183 | The device groups from the GRA registers seem to map reasonably |
149 | well onto the notion of a chip select row. | 184 | well onto the notion of a chip select row. |
150 | There are 16 GRA registers and since the name is associated with | 185 | There are 16 GRA registers and since the name is associated with |
151 | the channel and the GRA registers map to physical devices so we are | 186 | the channel and the GRA registers map to physical devices so we are |
152 | going to make 1 channel for group. | 187 | going to make 1 channel for group. |
153 | */ | 188 | */ |
154 | mci = edac_mc_alloc(0, 16, 1); | 189 | mci = edac_mc_alloc(0, 16, 1); |
155 | 190 | ||
156 | if (!mci) | 191 | if (!mci) |
157 | return -ENOMEM; | 192 | return -ENOMEM; |
158 | 193 | ||
159 | debugf3("%s(): init mci\n", __func__); | 194 | debugf3("%s(): init mci\n", __func__); |
160 | mci->dev = &pdev->dev; | 195 | mci->dev = &pdev->dev; |
161 | mci->mtype_cap = MEM_FLAG_DDR; | 196 | mci->mtype_cap = MEM_FLAG_DDR; |
162 | |||
163 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | 197 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; |
164 | /* I"m not sure about this but I think that all RDRAM is SECDED */ | 198 | /* I"m not sure about this but I think that all RDRAM is SECDED */ |
165 | mci->edac_cap = EDAC_FLAG_SECDED; | 199 | mci->edac_cap = EDAC_FLAG_SECDED; |
166 | /* adjust FLAGS */ | ||
167 | |||
168 | mci->mod_name = EDAC_MOD_STR; | 200 | mci->mod_name = EDAC_MOD_STR; |
169 | mci->mod_ver = I82860_REVISION; | 201 | mci->mod_ver = I82860_REVISION; |
170 | mci->ctl_name = i82860_devs[dev_idx].ctl_name; | 202 | mci->ctl_name = i82860_devs[dev_idx].ctl_name; |
171 | mci->edac_check = i82860_check; | 203 | mci->edac_check = i82860_check; |
172 | mci->ctl_page_to_phys = NULL; | 204 | mci->ctl_page_to_phys = NULL; |
173 | 205 | i82860_init_csrows(mci, pdev); | |
174 | pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim); | ||
175 | mchcfg_ddim = mchcfg_ddim & 0x180; | ||
176 | |||
177 | /* | ||
178 | * The group row boundary (GRA) reg values are boundary address | ||
179 | * for each DRAM row with a granularity of 16MB. GRA regs are | ||
180 | * cumulative; therefore GRA15 will contain the total memory contained | ||
181 | * in all eight rows. | ||
182 | */ | ||
183 | for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { | ||
184 | u16 value; | ||
185 | u32 cumul_size; | ||
186 | struct csrow_info *csrow = &mci->csrows[index]; | ||
187 | |||
188 | pci_read_config_word(pdev, I82860_GBA + index * 2, | ||
189 | &value); | ||
190 | |||
191 | cumul_size = (value & I82860_GBA_MASK) << | ||
192 | (I82860_GBA_SHIFT - PAGE_SHIFT); | ||
193 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, | ||
194 | cumul_size); | ||
195 | |||
196 | if (cumul_size == last_cumul_size) | ||
197 | continue; /* not populated */ | ||
198 | |||
199 | csrow->first_page = last_cumul_size; | ||
200 | csrow->last_page = cumul_size - 1; | ||
201 | csrow->nr_pages = cumul_size - last_cumul_size; | ||
202 | last_cumul_size = cumul_size; | ||
203 | csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */ | ||
204 | csrow->mtype = MEM_RMBS; | ||
205 | csrow->dtype = DEV_UNKNOWN; | ||
206 | csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE; | ||
207 | } | ||
208 | |||
209 | i82860_get_error_info(mci, &discard); /* clear counters */ | 206 | i82860_get_error_info(mci, &discard); /* clear counters */ |
210 | 207 | ||
211 | /* Here we assume that we will never see multiple instances of this | 208 | /* Here we assume that we will never see multiple instances of this |
212 | * type of memory controller. The ID is therefore hardcoded to 0. | 209 | * type of memory controller. The ID is therefore hardcoded to 0. |
213 | */ | 210 | */ |
214 | if (edac_mc_add_mc(mci,0)) { | 211 | if (edac_mc_add_mc(mci,0)) { |
215 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); | 212 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
216 | edac_mc_free(mci); | 213 | goto fail; |
217 | } else { | ||
218 | /* get this far and it's successful */ | ||
219 | debugf3("%s(): success\n", __func__); | ||
220 | rc = 0; | ||
221 | } | 214 | } |
222 | 215 | ||
223 | return rc; | 216 | /* get this far and it's successful */ |
217 | debugf3("%s(): success\n", __func__); | ||
218 | |||
219 | return 0; | ||
220 | |||
221 | fail: | ||
222 | edac_mc_free(mci); | ||
223 | return -ENODEV; | ||
224 | } | 224 | } |
225 | 225 | ||
226 | /* returns count (>= 0), or negative on error */ | 226 | /* returns count (>= 0), or negative on error */ |
227 | static int __devinit i82860_init_one(struct pci_dev *pdev, | 227 | static int __devinit i82860_init_one(struct pci_dev *pdev, |
228 | const struct pci_device_id *ent) | 228 | const struct pci_device_id *ent) |
229 | { | 229 | { |
230 | int rc; | 230 | int rc; |
231 | 231 | ||
232 | debugf0("%s()\n", __func__); | 232 | debugf0("%s()\n", __func__); |
233 | i82860_printk(KERN_INFO, "i82860 init one\n"); | 233 | i82860_printk(KERN_INFO, "i82860 init one\n"); |
234 | 234 | ||
235 | if (pci_enable_device(pdev) < 0) | 235 | if (pci_enable_device(pdev) < 0) |
236 | return -EIO; | 236 | return -EIO; |
237 | 237 | ||
238 | rc = i82860_probe1(pdev, ent->driver_data); | 238 | rc = i82860_probe1(pdev, ent->driver_data); |
239 | 239 | ||
240 | if (rc == 0) | 240 | if (rc == 0) |
241 | mci_pdev = pci_dev_get(pdev); | 241 | mci_pdev = pci_dev_get(pdev); |
242 | 242 | ||
243 | return rc; | 243 | return rc; |
244 | } | 244 | } |
245 | 245 | ||
246 | static void __devexit i82860_remove_one(struct pci_dev *pdev) | 246 | static void __devexit i82860_remove_one(struct pci_dev *pdev) |
247 | { | 247 | { |
248 | struct mem_ctl_info *mci; | 248 | struct mem_ctl_info *mci; |
249 | 249 | ||
250 | debugf0("%s()\n", __func__); | 250 | debugf0("%s()\n", __func__); |
251 | 251 | ||
252 | if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) | 252 | if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) |
253 | return; | 253 | return; |
254 | 254 | ||
255 | edac_mc_free(mci); | 255 | edac_mc_free(mci); |
256 | } | 256 | } |
257 | 257 | ||
258 | static const struct pci_device_id i82860_pci_tbl[] __devinitdata = { | 258 | static const struct pci_device_id i82860_pci_tbl[] __devinitdata = { |
259 | { | 259 | { |
260 | PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 260 | PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
261 | I82860 | 261 | I82860 |
262 | }, | 262 | }, |
263 | { | 263 | { |
264 | 0, | 264 | 0, |
265 | } /* 0 terminated list. */ | 265 | } /* 0 terminated list. */ |
266 | }; | 266 | }; |
267 | 267 | ||
268 | MODULE_DEVICE_TABLE(pci, i82860_pci_tbl); | 268 | MODULE_DEVICE_TABLE(pci, i82860_pci_tbl); |
269 | 269 | ||
270 | static struct pci_driver i82860_driver = { | 270 | static struct pci_driver i82860_driver = { |
271 | .name = EDAC_MOD_STR, | 271 | .name = EDAC_MOD_STR, |
272 | .probe = i82860_init_one, | 272 | .probe = i82860_init_one, |
273 | .remove = __devexit_p(i82860_remove_one), | 273 | .remove = __devexit_p(i82860_remove_one), |
274 | .id_table = i82860_pci_tbl, | 274 | .id_table = i82860_pci_tbl, |
275 | }; | 275 | }; |
276 | 276 | ||
277 | static int __init i82860_init(void) | 277 | static int __init i82860_init(void) |
278 | { | 278 | { |
279 | int pci_rc; | 279 | int pci_rc; |
280 | 280 | ||
281 | debugf3("%s()\n", __func__); | 281 | debugf3("%s()\n", __func__); |
282 | 282 | ||
283 | if ((pci_rc = pci_register_driver(&i82860_driver)) < 0) | 283 | if ((pci_rc = pci_register_driver(&i82860_driver)) < 0) |
284 | goto fail0; | 284 | goto fail0; |
285 | 285 | ||
286 | if (!mci_pdev) { | 286 | if (!mci_pdev) { |
287 | mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, | 287 | mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, |
288 | PCI_DEVICE_ID_INTEL_82860_0, NULL); | 288 | PCI_DEVICE_ID_INTEL_82860_0, NULL); |
289 | 289 |
drivers/edac/i82875p_edac.c
1 | /* | 1 | /* |
2 | * Intel D82875P Memory Controller kernel module | 2 | * Intel D82875P Memory Controller kernel module |
3 | * (C) 2003 Linux Networx (http://lnxi.com) | 3 | * (C) 2003 Linux Networx (http://lnxi.com) |
4 | * This file may be distributed under the terms of the | 4 | * This file may be distributed under the terms of the |
5 | * GNU General Public License. | 5 | * GNU General Public License. |
6 | * | 6 | * |
7 | * Written by Thayne Harbaugh | 7 | * Written by Thayne Harbaugh |
8 | * Contributors: | 8 | * Contributors: |
9 | * Wang Zhenyu at intel.com | 9 | * Wang Zhenyu at intel.com |
10 | * | 10 | * |
11 | * $Id: edac_i82875p.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $ | 11 | * $Id: edac_i82875p.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $ |
12 | * | 12 | * |
13 | * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com | 13 | * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/config.h> | 16 | #include <linux/config.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
20 | #include <linux/pci_ids.h> | 20 | #include <linux/pci_ids.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include "edac_mc.h" | 22 | #include "edac_mc.h" |
23 | 23 | ||
24 | #define I82875P_REVISION " Ver: 2.0.0 " __DATE__ | 24 | #define I82875P_REVISION " Ver: 2.0.0 " __DATE__ |
25 | 25 | ||
26 | #define i82875p_printk(level, fmt, arg...) \ | 26 | #define i82875p_printk(level, fmt, arg...) \ |
27 | edac_printk(level, "i82875p", fmt, ##arg) | 27 | edac_printk(level, "i82875p", fmt, ##arg) |
28 | 28 | ||
29 | #define i82875p_mc_printk(mci, level, fmt, arg...) \ | 29 | #define i82875p_mc_printk(mci, level, fmt, arg...) \ |
30 | edac_mc_chipset_printk(mci, level, "i82875p", fmt, ##arg) | 30 | edac_mc_chipset_printk(mci, level, "i82875p", fmt, ##arg) |
31 | 31 | ||
32 | #ifndef PCI_DEVICE_ID_INTEL_82875_0 | 32 | #ifndef PCI_DEVICE_ID_INTEL_82875_0 |
33 | #define PCI_DEVICE_ID_INTEL_82875_0 0x2578 | 33 | #define PCI_DEVICE_ID_INTEL_82875_0 0x2578 |
34 | #endif /* PCI_DEVICE_ID_INTEL_82875_0 */ | 34 | #endif /* PCI_DEVICE_ID_INTEL_82875_0 */ |
35 | 35 | ||
36 | #ifndef PCI_DEVICE_ID_INTEL_82875_6 | 36 | #ifndef PCI_DEVICE_ID_INTEL_82875_6 |
37 | #define PCI_DEVICE_ID_INTEL_82875_6 0x257e | 37 | #define PCI_DEVICE_ID_INTEL_82875_6 0x257e |
38 | #endif /* PCI_DEVICE_ID_INTEL_82875_6 */ | 38 | #endif /* PCI_DEVICE_ID_INTEL_82875_6 */ |
39 | 39 | ||
40 | /* four csrows in dual channel, eight in single channel */ | 40 | /* four csrows in dual channel, eight in single channel */ |
41 | #define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans)) | 41 | #define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans)) |
42 | 42 | ||
43 | /* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */ | 43 | /* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */ |
44 | #define I82875P_EAP 0x58 /* Error Address Pointer (32b) | 44 | #define I82875P_EAP 0x58 /* Error Address Pointer (32b) |
45 | * | 45 | * |
46 | * 31:12 block address | 46 | * 31:12 block address |
47 | * 11:0 reserved | 47 | * 11:0 reserved |
48 | */ | 48 | */ |
49 | 49 | ||
50 | #define I82875P_DERRSYN 0x5c /* DRAM Error Syndrome (8b) | 50 | #define I82875P_DERRSYN 0x5c /* DRAM Error Syndrome (8b) |
51 | * | 51 | * |
52 | * 7:0 DRAM ECC Syndrome | 52 | * 7:0 DRAM ECC Syndrome |
53 | */ | 53 | */ |
54 | 54 | ||
55 | #define I82875P_DES 0x5d /* DRAM Error Status (8b) | 55 | #define I82875P_DES 0x5d /* DRAM Error Status (8b) |
56 | * | 56 | * |
57 | * 7:1 reserved | 57 | * 7:1 reserved |
58 | * 0 Error channel 0/1 | 58 | * 0 Error channel 0/1 |
59 | */ | 59 | */ |
60 | 60 | ||
61 | #define I82875P_ERRSTS 0xc8 /* Error Status Register (16b) | 61 | #define I82875P_ERRSTS 0xc8 /* Error Status Register (16b) |
62 | * | 62 | * |
63 | * 15:10 reserved | 63 | * 15:10 reserved |
64 | * 9 non-DRAM lock error (ndlock) | 64 | * 9 non-DRAM lock error (ndlock) |
65 | * 8 Sftwr Generated SMI | 65 | * 8 Sftwr Generated SMI |
66 | * 7 ECC UE | 66 | * 7 ECC UE |
67 | * 6 reserved | 67 | * 6 reserved |
68 | * 5 MCH detects unimplemented cycle | 68 | * 5 MCH detects unimplemented cycle |
69 | * 4 AGP access outside GA | 69 | * 4 AGP access outside GA |
70 | * 3 Invalid AGP access | 70 | * 3 Invalid AGP access |
71 | * 2 Invalid GA translation table | 71 | * 2 Invalid GA translation table |
72 | * 1 Unsupported AGP command | 72 | * 1 Unsupported AGP command |
73 | * 0 ECC CE | 73 | * 0 ECC CE |
74 | */ | 74 | */ |
75 | 75 | ||
76 | #define I82875P_ERRCMD 0xca /* Error Command (16b) | 76 | #define I82875P_ERRCMD 0xca /* Error Command (16b) |
77 | * | 77 | * |
78 | * 15:10 reserved | 78 | * 15:10 reserved |
79 | * 9 SERR on non-DRAM lock | 79 | * 9 SERR on non-DRAM lock |
80 | * 8 SERR on ECC UE | 80 | * 8 SERR on ECC UE |
81 | * 7 SERR on ECC CE | 81 | * 7 SERR on ECC CE |
82 | * 6 target abort on high exception | 82 | * 6 target abort on high exception |
83 | * 5 detect unimplemented cyc | 83 | * 5 detect unimplemented cyc |
84 | * 4 AGP access outside of GA | 84 | * 4 AGP access outside of GA |
85 | * 3 SERR on invalid AGP access | 85 | * 3 SERR on invalid AGP access |
86 | * 2 invalid translation table | 86 | * 2 invalid translation table |
87 | * 1 SERR on unsupported AGP command | 87 | * 1 SERR on unsupported AGP command |
88 | * 0 reserved | 88 | * 0 reserved |
89 | */ | 89 | */ |
90 | 90 | ||
91 | /* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */ | 91 | /* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */ |
92 | #define I82875P_PCICMD6 0x04 /* PCI Command Register (16b) | 92 | #define I82875P_PCICMD6 0x04 /* PCI Command Register (16b) |
93 | * | 93 | * |
94 | * 15:10 reserved | 94 | * 15:10 reserved |
95 | * 9 fast back-to-back - ro 0 | 95 | * 9 fast back-to-back - ro 0 |
96 | * 8 SERR enable - ro 0 | 96 | * 8 SERR enable - ro 0 |
97 | * 7 addr/data stepping - ro 0 | 97 | * 7 addr/data stepping - ro 0 |
98 | * 6 parity err enable - ro 0 | 98 | * 6 parity err enable - ro 0 |
99 | * 5 VGA palette snoop - ro 0 | 99 | * 5 VGA palette snoop - ro 0 |
100 | * 4 mem wr & invalidate - ro 0 | 100 | * 4 mem wr & invalidate - ro 0 |
101 | * 3 special cycle - ro 0 | 101 | * 3 special cycle - ro 0 |
102 | * 2 bus master - ro 0 | 102 | * 2 bus master - ro 0 |
103 | * 1 mem access dev6 - 0(dis),1(en) | 103 | * 1 mem access dev6 - 0(dis),1(en) |
104 | * 0 IO access dev3 - 0(dis),1(en) | 104 | * 0 IO access dev3 - 0(dis),1(en) |
105 | */ | 105 | */ |
106 | 106 | ||
107 | #define I82875P_BAR6 0x10 /* Mem Delays Base ADDR Reg (32b) | 107 | #define I82875P_BAR6 0x10 /* Mem Delays Base ADDR Reg (32b) |
108 | * | 108 | * |
109 | * 31:12 mem base addr [31:12] | 109 | * 31:12 mem base addr [31:12] |
110 | * 11:4 address mask - ro 0 | 110 | * 11:4 address mask - ro 0 |
111 | * 3 prefetchable - ro 0(non),1(pre) | 111 | * 3 prefetchable - ro 0(non),1(pre) |
112 | * 2:1 mem type - ro 0 | 112 | * 2:1 mem type - ro 0 |
113 | * 0 mem space - ro 0 | 113 | * 0 mem space - ro 0 |
114 | */ | 114 | */ |
115 | 115 | ||
116 | /* Intel 82875p MMIO register space - device 0 function 0 - MMR space */ | 116 | /* Intel 82875p MMIO register space - device 0 function 0 - MMR space */ |
117 | 117 | ||
118 | #define I82875P_DRB_SHIFT 26 /* 64MiB grain */ | 118 | #define I82875P_DRB_SHIFT 26 /* 64MiB grain */ |
119 | #define I82875P_DRB 0x00 /* DRAM Row Boundary (8b x 8) | 119 | #define I82875P_DRB 0x00 /* DRAM Row Boundary (8b x 8) |
120 | * | 120 | * |
121 | * 7 reserved | 121 | * 7 reserved |
122 | * 6:0 64MiB row boundary addr | 122 | * 6:0 64MiB row boundary addr |
123 | */ | 123 | */ |
124 | 124 | ||
125 | #define I82875P_DRA 0x10 /* DRAM Row Attribute (4b x 8) | 125 | #define I82875P_DRA 0x10 /* DRAM Row Attribute (4b x 8) |
126 | * | 126 | * |
127 | * 7 reserved | 127 | * 7 reserved |
128 | * 6:4 row attr row 1 | 128 | * 6:4 row attr row 1 |
129 | * 3 reserved | 129 | * 3 reserved |
130 | * 2:0 row attr row 0 | 130 | * 2:0 row attr row 0 |
131 | * | 131 | * |
132 | * 000 = 4KiB | 132 | * 000 = 4KiB |
133 | * 001 = 8KiB | 133 | * 001 = 8KiB |
134 | * 010 = 16KiB | 134 | * 010 = 16KiB |
135 | * 011 = 32KiB | 135 | * 011 = 32KiB |
136 | */ | 136 | */ |
137 | 137 | ||
138 | #define I82875P_DRC 0x68 /* DRAM Controller Mode (32b) | 138 | #define I82875P_DRC 0x68 /* DRAM Controller Mode (32b) |
139 | * | 139 | * |
140 | * 31:30 reserved | 140 | * 31:30 reserved |
141 | * 29 init complete | 141 | * 29 init complete |
142 | * 28:23 reserved | 142 | * 28:23 reserved |
143 | * 22:21 nr chan 00=1,01=2 | 143 | * 22:21 nr chan 00=1,01=2 |
144 | * 20 reserved | 144 | * 20 reserved |
145 | * 19:18 Data Integ Mode 00=none,01=ecc | 145 | * 19:18 Data Integ Mode 00=none,01=ecc |
146 | * 17:11 reserved | 146 | * 17:11 reserved |
147 | * 10:8 refresh mode | 147 | * 10:8 refresh mode |
148 | * 7 reserved | 148 | * 7 reserved |
149 | * 6:4 mode select | 149 | * 6:4 mode select |
150 | * 3:2 reserved | 150 | * 3:2 reserved |
151 | * 1:0 DRAM type 01=DDR | 151 | * 1:0 DRAM type 01=DDR |
152 | */ | 152 | */ |
153 | 153 | ||
154 | enum i82875p_chips { | 154 | enum i82875p_chips { |
155 | I82875P = 0, | 155 | I82875P = 0, |
156 | }; | 156 | }; |
157 | 157 | ||
158 | struct i82875p_pvt { | 158 | struct i82875p_pvt { |
159 | struct pci_dev *ovrfl_pdev; | 159 | struct pci_dev *ovrfl_pdev; |
160 | void __iomem *ovrfl_window; | 160 | void __iomem *ovrfl_window; |
161 | }; | 161 | }; |
162 | 162 | ||
163 | struct i82875p_dev_info { | 163 | struct i82875p_dev_info { |
164 | const char *ctl_name; | 164 | const char *ctl_name; |
165 | }; | 165 | }; |
166 | 166 | ||
167 | struct i82875p_error_info { | 167 | struct i82875p_error_info { |
168 | u16 errsts; | 168 | u16 errsts; |
169 | u32 eap; | 169 | u32 eap; |
170 | u8 des; | 170 | u8 des; |
171 | u8 derrsyn; | 171 | u8 derrsyn; |
172 | u16 errsts2; | 172 | u16 errsts2; |
173 | }; | 173 | }; |
174 | 174 | ||
175 | static const struct i82875p_dev_info i82875p_devs[] = { | 175 | static const struct i82875p_dev_info i82875p_devs[] = { |
176 | [I82875P] = { | 176 | [I82875P] = { |
177 | .ctl_name = "i82875p" | 177 | .ctl_name = "i82875p" |
178 | }, | 178 | }, |
179 | }; | 179 | }; |
180 | 180 | ||
181 | static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code has | 181 | static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code has |
182 | * already registered driver | 182 | * already registered driver |
183 | */ | 183 | */ |
184 | 184 | ||
185 | static int i82875p_registered = 1; | 185 | static int i82875p_registered = 1; |
186 | 186 | ||
187 | static void i82875p_get_error_info(struct mem_ctl_info *mci, | 187 | static void i82875p_get_error_info(struct mem_ctl_info *mci, |
188 | struct i82875p_error_info *info) | 188 | struct i82875p_error_info *info) |
189 | { | 189 | { |
190 | struct pci_dev *pdev; | 190 | struct pci_dev *pdev; |
191 | 191 | ||
192 | pdev = to_pci_dev(mci->dev); | 192 | pdev = to_pci_dev(mci->dev); |
193 | 193 | ||
194 | /* | 194 | /* |
195 | * This is a mess because there is no atomic way to read all the | 195 | * This is a mess because there is no atomic way to read all the |
196 | * registers at once and the registers can transition from CE being | 196 | * registers at once and the registers can transition from CE being |
197 | * overwritten by UE. | 197 | * overwritten by UE. |
198 | */ | 198 | */ |
199 | pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts); | 199 | pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts); |
200 | pci_read_config_dword(pdev, I82875P_EAP, &info->eap); | 200 | pci_read_config_dword(pdev, I82875P_EAP, &info->eap); |
201 | pci_read_config_byte(pdev, I82875P_DES, &info->des); | 201 | pci_read_config_byte(pdev, I82875P_DES, &info->des); |
202 | pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn); | 202 | pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn); |
203 | pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts2); | 203 | pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts2); |
204 | 204 | ||
205 | pci_write_bits16(pdev, I82875P_ERRSTS, 0x0081, 0x0081); | 205 | pci_write_bits16(pdev, I82875P_ERRSTS, 0x0081, 0x0081); |
206 | 206 | ||
207 | /* | 207 | /* |
208 | * If the error is the same then we can for both reads then | 208 | * If the error is the same then we can for both reads then |
209 | * the first set of reads is valid. If there is a change then | 209 | * the first set of reads is valid. If there is a change then |
210 | * there is a CE no info and the second set of reads is valid | 210 | * there is a CE no info and the second set of reads is valid |
211 | * and should be UE info. | 211 | * and should be UE info. |
212 | */ | 212 | */ |
213 | if (!(info->errsts2 & 0x0081)) | 213 | if (!(info->errsts2 & 0x0081)) |
214 | return; | 214 | return; |
215 | 215 | ||
216 | if ((info->errsts ^ info->errsts2) & 0x0081) { | 216 | if ((info->errsts ^ info->errsts2) & 0x0081) { |
217 | pci_read_config_dword(pdev, I82875P_EAP, &info->eap); | 217 | pci_read_config_dword(pdev, I82875P_EAP, &info->eap); |
218 | pci_read_config_byte(pdev, I82875P_DES, &info->des); | 218 | pci_read_config_byte(pdev, I82875P_DES, &info->des); |
219 | pci_read_config_byte(pdev, I82875P_DERRSYN, | 219 | pci_read_config_byte(pdev, I82875P_DERRSYN, |
220 | &info->derrsyn); | 220 | &info->derrsyn); |
221 | } | 221 | } |
222 | } | 222 | } |
223 | 223 | ||
224 | static int i82875p_process_error_info(struct mem_ctl_info *mci, | 224 | static int i82875p_process_error_info(struct mem_ctl_info *mci, |
225 | struct i82875p_error_info *info, int handle_errors) | 225 | struct i82875p_error_info *info, int handle_errors) |
226 | { | 226 | { |
227 | int row, multi_chan; | 227 | int row, multi_chan; |
228 | 228 | ||
229 | multi_chan = mci->csrows[0].nr_channels - 1; | 229 | multi_chan = mci->csrows[0].nr_channels - 1; |
230 | 230 | ||
231 | if (!(info->errsts2 & 0x0081)) | 231 | if (!(info->errsts2 & 0x0081)) |
232 | return 0; | 232 | return 0; |
233 | 233 | ||
234 | if (!handle_errors) | 234 | if (!handle_errors) |
235 | return 1; | 235 | return 1; |
236 | 236 | ||
237 | if ((info->errsts ^ info->errsts2) & 0x0081) { | 237 | if ((info->errsts ^ info->errsts2) & 0x0081) { |
238 | edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); | 238 | edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); |
239 | info->errsts = info->errsts2; | 239 | info->errsts = info->errsts2; |
240 | } | 240 | } |
241 | 241 | ||
242 | info->eap >>= PAGE_SHIFT; | 242 | info->eap >>= PAGE_SHIFT; |
243 | row = edac_mc_find_csrow_by_page(mci, info->eap); | 243 | row = edac_mc_find_csrow_by_page(mci, info->eap); |
244 | 244 | ||
245 | if (info->errsts & 0x0080) | 245 | if (info->errsts & 0x0080) |
246 | edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE"); | 246 | edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE"); |
247 | else | 247 | else |
248 | edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, | 248 | edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, |
249 | multi_chan ? (info->des & 0x1) : 0, | 249 | multi_chan ? (info->des & 0x1) : 0, |
250 | "i82875p CE"); | 250 | "i82875p CE"); |
251 | 251 | ||
252 | return 1; | 252 | return 1; |
253 | } | 253 | } |
254 | 254 | ||
255 | static void i82875p_check(struct mem_ctl_info *mci) | 255 | static void i82875p_check(struct mem_ctl_info *mci) |
256 | { | 256 | { |
257 | struct i82875p_error_info info; | 257 | struct i82875p_error_info info; |
258 | 258 | ||
259 | debugf1("MC%d: %s()\n", mci->mc_idx, __func__); | 259 | debugf1("MC%d: %s()\n", mci->mc_idx, __func__); |
260 | i82875p_get_error_info(mci, &info); | 260 | i82875p_get_error_info(mci, &info); |
261 | i82875p_process_error_info(mci, &info, 1); | 261 | i82875p_process_error_info(mci, &info, 1); |
262 | } | 262 | } |
263 | 263 | ||
264 | #ifdef CONFIG_PROC_FS | 264 | #ifdef CONFIG_PROC_FS |
265 | extern int pci_proc_attach_device(struct pci_dev *); | 265 | extern int pci_proc_attach_device(struct pci_dev *); |
266 | #endif | 266 | #endif |
267 | 267 | ||
268 | static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | 268 | /* Return 0 on success or 1 on failure. */ |
269 | static int i82875p_setup_overfl_dev(struct pci_dev *pdev, | ||
270 | struct pci_dev **ovrfl_pdev, void __iomem **ovrfl_window) | ||
269 | { | 271 | { |
270 | int rc = -ENODEV; | 272 | struct pci_dev *dev; |
271 | int index; | 273 | void __iomem *window; |
272 | struct mem_ctl_info *mci = NULL; | ||
273 | struct i82875p_pvt *pvt = NULL; | ||
274 | unsigned long last_cumul_size; | ||
275 | struct pci_dev *ovrfl_pdev; | ||
276 | void __iomem *ovrfl_window = NULL; | ||
277 | u32 drc; | ||
278 | u32 drc_chan; /* Number of channels 0=1chan,1=2chan */ | ||
279 | u32 nr_chans; | ||
280 | u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | ||
281 | struct i82875p_error_info discard; | ||
282 | 274 | ||
283 | debugf0("%s()\n", __func__); | 275 | *ovrfl_pdev = NULL; |
284 | ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); | 276 | *ovrfl_window = NULL; |
277 | dev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); | ||
285 | 278 | ||
286 | if (!ovrfl_pdev) { | 279 | if (dev == NULL) { |
287 | /* | 280 | /* Intel tells BIOS developers to hide device 6 which |
288 | * Intel tells BIOS developers to hide device 6 which | ||
289 | * configures the overflow device access containing | 281 | * configures the overflow device access containing |
290 | * the DRBs - this is where we expose device 6. | 282 | * the DRBs - this is where we expose device 6. |
291 | * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm | 283 | * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm |
292 | */ | 284 | */ |
293 | pci_write_bits8(pdev, 0xf4, 0x2, 0x2); | 285 | pci_write_bits8(pdev, 0xf4, 0x2, 0x2); |
294 | ovrfl_pdev = | 286 | dev = pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0)); |
295 | pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0)); | ||
296 | 287 | ||
297 | if (!ovrfl_pdev) | 288 | if (dev == NULL) |
298 | return -ENODEV; | 289 | return 1; |
299 | } | 290 | } |
300 | 291 | ||
292 | *ovrfl_pdev = dev; | ||
293 | |||
301 | #ifdef CONFIG_PROC_FS | 294 | #ifdef CONFIG_PROC_FS |
302 | if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) { | 295 | if ((dev->procent == NULL) && pci_proc_attach_device(dev)) { |
303 | i82875p_printk(KERN_ERR, | 296 | i82875p_printk(KERN_ERR, "%s(): Failed to attach overflow " |
304 | "%s(): Failed to attach overflow device\n", __func__); | 297 | "device\n", __func__); |
305 | return -ENODEV; | 298 | return 1; |
306 | } | 299 | } |
307 | #endif | 300 | #endif /* CONFIG_PROC_FS */ |
308 | /* CONFIG_PROC_FS */ | 301 | if (pci_enable_device(dev)) { |
309 | if (pci_enable_device(ovrfl_pdev)) { | 302 | i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow " |
310 | i82875p_printk(KERN_ERR, | 303 | "device\n", __func__); |
311 | "%s(): Failed to enable overflow device\n", __func__); | 304 | return 1; |
312 | return -ENODEV; | ||
313 | } | 305 | } |
314 | 306 | ||
315 | if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) { | 307 | if (pci_request_regions(dev, pci_name(dev))) { |
316 | #ifdef CORRECT_BIOS | 308 | #ifdef CORRECT_BIOS |
317 | goto fail0; | 309 | goto fail0; |
318 | #endif | 310 | #endif |
319 | } | 311 | } |
320 | 312 | ||
321 | /* cache is irrelevant for PCI bus reads/writes */ | 313 | /* cache is irrelevant for PCI bus reads/writes */ |
322 | ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0), | 314 | window = ioremap_nocache(pci_resource_start(dev, 0), |
323 | pci_resource_len(ovrfl_pdev, 0)); | 315 | pci_resource_len(dev, 0)); |
324 | 316 | ||
325 | if (!ovrfl_window) { | 317 | if (window == NULL) { |
326 | i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n", | 318 | i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n", |
327 | __func__); | 319 | __func__); |
328 | goto fail1; | 320 | goto fail1; |
329 | } | 321 | } |
330 | 322 | ||
331 | /* need to find out the number of channels */ | 323 | *ovrfl_window = window; |
332 | drc = readl(ovrfl_window + I82875P_DRC); | 324 | return 0; |
333 | drc_chan = ((drc >> 21) & 0x1); | ||
334 | nr_chans = drc_chan + 1; | ||
335 | 325 | ||
336 | drc_ddim = (drc >> 18) & 0x1; | 326 | fail1: |
337 | mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans), | 327 | pci_release_regions(dev); |
338 | nr_chans); | ||
339 | 328 | ||
340 | if (!mci) { | 329 | #ifdef CORRECT_BIOS |
341 | rc = -ENOMEM; | 330 | fail0: |
342 | goto fail2; | 331 | pci_disable_device(dev); |
343 | } | 332 | #endif |
333 | /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ | ||
334 | return 1; | ||
335 | } | ||
344 | 336 | ||
345 | debugf3("%s(): init mci\n", __func__); | ||
346 | mci->dev = &pdev->dev; | ||
347 | mci->mtype_cap = MEM_FLAG_DDR; | ||
348 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | ||
349 | mci->edac_cap = EDAC_FLAG_UNKNOWN; | ||
350 | /* adjust FLAGS */ | ||
351 | 337 | ||
352 | mci->mod_name = EDAC_MOD_STR; | 338 | /* Return 1 if dual channel mode is active. Else return 0. */ |
353 | mci->mod_ver = I82875P_REVISION; | 339 | static inline int dual_channel_active(u32 drc) |
354 | mci->ctl_name = i82875p_devs[dev_idx].ctl_name; | 340 | { |
355 | mci->edac_check = i82875p_check; | 341 | return (drc >> 21) & 0x1; |
356 | mci->ctl_page_to_phys = NULL; | 342 | } |
357 | debugf3("%s(): init pvt\n", __func__); | ||
358 | pvt = (struct i82875p_pvt *) mci->pvt_info; | ||
359 | pvt->ovrfl_pdev = ovrfl_pdev; | ||
360 | pvt->ovrfl_window = ovrfl_window; | ||
361 | 343 | ||
362 | /* | 344 | |
363 | * The dram row boundary (DRB) reg values are boundary address | 345 | static void i82875p_init_csrows(struct mem_ctl_info *mci, |
346 | struct pci_dev *pdev, void __iomem *ovrfl_window, u32 drc) | ||
347 | { | ||
348 | struct csrow_info *csrow; | ||
349 | unsigned long last_cumul_size; | ||
350 | u8 value; | ||
351 | u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | ||
352 | u32 cumul_size; | ||
353 | int index; | ||
354 | |||
355 | drc_ddim = (drc >> 18) & 0x1; | ||
356 | last_cumul_size = 0; | ||
357 | |||
358 | /* The dram row boundary (DRB) reg values are boundary address | ||
364 | * for each DRAM row with a granularity of 32 or 64MB (single/dual | 359 | * for each DRAM row with a granularity of 32 or 64MB (single/dual |
365 | * channel operation). DRB regs are cumulative; therefore DRB7 will | 360 | * channel operation). DRB regs are cumulative; therefore DRB7 will |
366 | * contain the total memory contained in all eight rows. | 361 | * contain the total memory contained in all eight rows. |
367 | */ | 362 | */ |
368 | for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { | ||
369 | u8 value; | ||
370 | u32 cumul_size; | ||
371 | struct csrow_info *csrow = &mci->csrows[index]; | ||
372 | 363 | ||
364 | for (index = 0; index < mci->nr_csrows; index++) { | ||
365 | csrow = &mci->csrows[index]; | ||
366 | |||
373 | value = readb(ovrfl_window + I82875P_DRB + index); | 367 | value = readb(ovrfl_window + I82875P_DRB + index); |
374 | cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); | 368 | cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); |
375 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, | 369 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, |
376 | cumul_size); | 370 | cumul_size); |
377 | |||
378 | if (cumul_size == last_cumul_size) | 371 | if (cumul_size == last_cumul_size) |
379 | continue; /* not populated */ | 372 | continue; /* not populated */ |
380 | 373 | ||
381 | csrow->first_page = last_cumul_size; | 374 | csrow->first_page = last_cumul_size; |
382 | csrow->last_page = cumul_size - 1; | 375 | csrow->last_page = cumul_size - 1; |
383 | csrow->nr_pages = cumul_size - last_cumul_size; | 376 | csrow->nr_pages = cumul_size - last_cumul_size; |
384 | last_cumul_size = cumul_size; | 377 | last_cumul_size = cumul_size; |
385 | csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ | 378 | csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ |
386 | csrow->mtype = MEM_DDR; | 379 | csrow->mtype = MEM_DDR; |
387 | csrow->dtype = DEV_UNKNOWN; | 380 | csrow->dtype = DEV_UNKNOWN; |
388 | csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; | 381 | csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; |
389 | } | 382 | } |
383 | } | ||
390 | 384 | ||
385 | static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | ||
386 | { | ||
387 | int rc = -ENODEV; | ||
388 | struct mem_ctl_info *mci; | ||
389 | struct i82875p_pvt *pvt; | ||
390 | struct pci_dev *ovrfl_pdev; | ||
391 | void __iomem *ovrfl_window; | ||
392 | u32 drc; | ||
393 | u32 nr_chans; | ||
394 | struct i82875p_error_info discard; | ||
395 | |||
396 | debugf0("%s()\n", __func__); | ||
397 | ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); | ||
398 | |||
399 | if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window)) | ||
400 | return -ENODEV; | ||
401 | drc = readl(ovrfl_window + I82875P_DRC); | ||
402 | nr_chans = dual_channel_active(drc) + 1; | ||
403 | mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans), | ||
404 | nr_chans); | ||
405 | |||
406 | if (!mci) { | ||
407 | rc = -ENOMEM; | ||
408 | goto fail0; | ||
409 | } | ||
410 | |||
411 | debugf3("%s(): init mci\n", __func__); | ||
412 | mci->dev = &pdev->dev; | ||
413 | mci->mtype_cap = MEM_FLAG_DDR; | ||
414 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | ||
415 | mci->edac_cap = EDAC_FLAG_UNKNOWN; | ||
416 | mci->mod_name = EDAC_MOD_STR; | ||
417 | mci->mod_ver = I82875P_REVISION; | ||
418 | mci->ctl_name = i82875p_devs[dev_idx].ctl_name; | ||
419 | mci->edac_check = i82875p_check; | ||
420 | mci->ctl_page_to_phys = NULL; | ||
421 | debugf3("%s(): init pvt\n", __func__); | ||
422 | pvt = (struct i82875p_pvt *) mci->pvt_info; | ||
423 | pvt->ovrfl_pdev = ovrfl_pdev; | ||
424 | pvt->ovrfl_window = ovrfl_window; | ||
425 | i82875p_init_csrows(mci, pdev, ovrfl_window, drc); | ||
391 | i82875p_get_error_info(mci, &discard); /* clear counters */ | 426 | i82875p_get_error_info(mci, &discard); /* clear counters */ |
392 | 427 | ||
393 | /* Here we assume that we will never see multiple instances of this | 428 | /* Here we assume that we will never see multiple instances of this |
394 | * type of memory controller. The ID is therefore hardcoded to 0. | 429 | * type of memory controller. The ID is therefore hardcoded to 0. |
395 | */ | 430 | */ |
396 | if (edac_mc_add_mc(mci,0)) { | 431 | if (edac_mc_add_mc(mci,0)) { |
397 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); | 432 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
398 | goto fail3; | 433 | goto fail1; |
399 | } | 434 | } |
400 | 435 | ||
401 | /* get this far and it's successful */ | 436 | /* get this far and it's successful */ |
402 | debugf3("%s(): success\n", __func__); | 437 | debugf3("%s(): success\n", __func__); |
403 | return 0; | 438 | return 0; |
404 | 439 | ||
405 | fail3: | 440 | fail1: |
406 | edac_mc_free(mci); | 441 | edac_mc_free(mci); |
407 | 442 | ||
408 | fail2: | 443 | fail0: |
409 | iounmap(ovrfl_window); | 444 | iounmap(ovrfl_window); |
410 | |||
411 | fail1: | ||
412 | pci_release_regions(ovrfl_pdev); | 445 | pci_release_regions(ovrfl_pdev); |
413 | 446 | ||
414 | #ifdef CORRECT_BIOS | ||
415 | fail0: | ||
416 | #endif | ||
417 | pci_disable_device(ovrfl_pdev); | 447 | pci_disable_device(ovrfl_pdev); |
418 | /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ | 448 | /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ |
419 | return rc; | 449 | return rc; |
420 | } | 450 | } |
421 | 451 | ||
422 | /* returns count (>= 0), or negative on error */ | 452 | /* returns count (>= 0), or negative on error */ |
423 | static int __devinit i82875p_init_one(struct pci_dev *pdev, | 453 | static int __devinit i82875p_init_one(struct pci_dev *pdev, |
424 | const struct pci_device_id *ent) | 454 | const struct pci_device_id *ent) |
425 | { | 455 | { |
426 | int rc; | 456 | int rc; |
427 | 457 | ||
428 | debugf0("%s()\n", __func__); | 458 | debugf0("%s()\n", __func__); |
429 | i82875p_printk(KERN_INFO, "i82875p init one\n"); | 459 | i82875p_printk(KERN_INFO, "i82875p init one\n"); |
430 | 460 | ||
431 | if (pci_enable_device(pdev) < 0) | 461 | if (pci_enable_device(pdev) < 0) |
432 | return -EIO; | 462 | return -EIO; |
433 | 463 | ||
434 | rc = i82875p_probe1(pdev, ent->driver_data); | 464 | rc = i82875p_probe1(pdev, ent->driver_data); |
435 | 465 | ||
436 | if (mci_pdev == NULL) | 466 | if (mci_pdev == NULL) |
437 | mci_pdev = pci_dev_get(pdev); | 467 | mci_pdev = pci_dev_get(pdev); |
438 | 468 | ||
439 | return rc; | 469 | return rc; |
440 | } | 470 | } |
441 | 471 | ||
442 | static void __devexit i82875p_remove_one(struct pci_dev *pdev) | 472 | static void __devexit i82875p_remove_one(struct pci_dev *pdev) |
443 | { | 473 | { |
444 | struct mem_ctl_info *mci; | 474 | struct mem_ctl_info *mci; |
445 | struct i82875p_pvt *pvt = NULL; | 475 | struct i82875p_pvt *pvt = NULL; |
446 | 476 | ||
447 | debugf0("%s()\n", __func__); | 477 | debugf0("%s()\n", __func__); |
448 | 478 | ||
449 | if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) | 479 | if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) |
450 | return; | 480 | return; |
451 | 481 | ||
452 | pvt = (struct i82875p_pvt *) mci->pvt_info; | 482 | pvt = (struct i82875p_pvt *) mci->pvt_info; |
453 | 483 | ||
454 | if (pvt->ovrfl_window) | 484 | if (pvt->ovrfl_window) |
455 | iounmap(pvt->ovrfl_window); | 485 | iounmap(pvt->ovrfl_window); |
456 | 486 | ||
457 | if (pvt->ovrfl_pdev) { | 487 | if (pvt->ovrfl_pdev) { |
458 | #ifdef CORRECT_BIOS | 488 | #ifdef CORRECT_BIOS |
459 | pci_release_regions(pvt->ovrfl_pdev); | 489 | pci_release_regions(pvt->ovrfl_pdev); |
460 | #endif /*CORRECT_BIOS */ | 490 | #endif /*CORRECT_BIOS */ |
461 | pci_disable_device(pvt->ovrfl_pdev); | 491 | pci_disable_device(pvt->ovrfl_pdev); |
462 | pci_dev_put(pvt->ovrfl_pdev); | 492 | pci_dev_put(pvt->ovrfl_pdev); |
463 | } | 493 | } |
464 | 494 | ||
465 | edac_mc_free(mci); | 495 | edac_mc_free(mci); |
466 | } | 496 | } |
467 | 497 | ||
468 | static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = { | 498 | static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = { |
469 | { | 499 | { |
470 | PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 500 | PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
471 | I82875P | 501 | I82875P |
472 | }, | 502 | }, |
473 | { | 503 | { |
474 | 0, | 504 | 0, |
475 | } /* 0 terminated list. */ | 505 | } /* 0 terminated list. */ |
476 | }; | 506 | }; |
477 | 507 | ||
478 | MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl); | 508 | MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl); |
479 | 509 | ||
480 | static struct pci_driver i82875p_driver = { | 510 | static struct pci_driver i82875p_driver = { |
481 | .name = EDAC_MOD_STR, | 511 | .name = EDAC_MOD_STR, |
482 | .probe = i82875p_init_one, | 512 | .probe = i82875p_init_one, |
483 | .remove = __devexit_p(i82875p_remove_one), | 513 | .remove = __devexit_p(i82875p_remove_one), |
484 | .id_table = i82875p_pci_tbl, | 514 | .id_table = i82875p_pci_tbl, |
485 | }; | 515 | }; |
486 | 516 | ||
487 | static int __init i82875p_init(void) | 517 | static int __init i82875p_init(void) |
488 | { | 518 | { |
489 | int pci_rc; | 519 | int pci_rc; |
490 | 520 | ||
491 | debugf3("%s()\n", __func__); | 521 | debugf3("%s()\n", __func__); |
492 | pci_rc = pci_register_driver(&i82875p_driver); | 522 | pci_rc = pci_register_driver(&i82875p_driver); |
493 | 523 | ||
494 | if (pci_rc < 0) | 524 | if (pci_rc < 0) |
495 | goto fail0; | 525 | goto fail0; |
496 | 526 | ||
497 | if (mci_pdev == NULL) { | 527 | if (mci_pdev == NULL) { |
498 | mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, | 528 | mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, |
499 | PCI_DEVICE_ID_INTEL_82875_0, NULL); | 529 | PCI_DEVICE_ID_INTEL_82875_0, NULL); |
500 | 530 | ||
501 | if (!mci_pdev) { | 531 | if (!mci_pdev) { |
502 | debugf0("875p pci_get_device fail\n"); | 532 | debugf0("875p pci_get_device fail\n"); |
503 | pci_rc = -ENODEV; | 533 | pci_rc = -ENODEV; |
504 | goto fail1; | 534 | goto fail1; |
505 | } | 535 | } |
506 | 536 | ||
507 | pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl); | 537 | pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl); |
508 | 538 | ||
509 | if (pci_rc < 0) { | 539 | if (pci_rc < 0) { |
510 | debugf0("875p init fail\n"); | 540 | debugf0("875p init fail\n"); |
drivers/edac/r82600_edac.c
1 | /* | 1 | /* |
2 | * Radisys 82600 Embedded chipset Memory Controller kernel module | 2 | * Radisys 82600 Embedded chipset Memory Controller kernel module |
3 | * (C) 2005 EADS Astrium | 3 | * (C) 2005 EADS Astrium |
4 | * This file may be distributed under the terms of the | 4 | * This file may be distributed under the terms of the |
5 | * GNU General Public License. | 5 | * GNU General Public License. |
6 | * | 6 | * |
7 | * Written by Tim Small <tim@buttersideup.com>, based on work by Thayne | 7 | * Written by Tim Small <tim@buttersideup.com>, based on work by Thayne |
8 | * Harbaugh, Dan Hollis <goemon at anime dot net> and others. | 8 | * Harbaugh, Dan Hollis <goemon at anime dot net> and others. |
9 | * | 9 | * |
10 | * $Id: edac_r82600.c,v 1.1.2.6 2005/10/05 00:43:44 dsp_llnl Exp $ | 10 | * $Id: edac_r82600.c,v 1.1.2.6 2005/10/05 00:43:44 dsp_llnl Exp $ |
11 | * | 11 | * |
12 | * Written with reference to 82600 High Integration Dual PCI System | 12 | * Written with reference to 82600 High Integration Dual PCI System |
13 | * Controller Data Book: | 13 | * Controller Data Book: |
14 | * http://www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf | 14 | * http://www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf |
15 | * references to this document given in [] | 15 | * references to this document given in [] |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/config.h> | 18 | #include <linux/config.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/pci.h> | 21 | #include <linux/pci.h> |
22 | #include <linux/pci_ids.h> | 22 | #include <linux/pci_ids.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include "edac_mc.h" | 24 | #include "edac_mc.h" |
25 | 25 | ||
26 | #define R82600_REVISION " Ver: 2.0.0 " __DATE__ | 26 | #define R82600_REVISION " Ver: 2.0.0 " __DATE__ |
27 | 27 | ||
28 | #define r82600_printk(level, fmt, arg...) \ | 28 | #define r82600_printk(level, fmt, arg...) \ |
29 | edac_printk(level, "r82600", fmt, ##arg) | 29 | edac_printk(level, "r82600", fmt, ##arg) |
30 | 30 | ||
31 | #define r82600_mc_printk(mci, level, fmt, arg...) \ | 31 | #define r82600_mc_printk(mci, level, fmt, arg...) \ |
32 | edac_mc_chipset_printk(mci, level, "r82600", fmt, ##arg) | 32 | edac_mc_chipset_printk(mci, level, "r82600", fmt, ##arg) |
33 | 33 | ||
34 | /* Radisys say "The 82600 integrates a main memory SDRAM controller that | 34 | /* Radisys say "The 82600 integrates a main memory SDRAM controller that |
35 | * supports up to four banks of memory. The four banks can support a mix of | 35 | * supports up to four banks of memory. The four banks can support a mix of |
36 | * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs, | 36 | * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs, |
37 | * each of which can be any size from 16MB to 512MB. Both registered (control | 37 | * each of which can be any size from 16MB to 512MB. Both registered (control |
38 | * signals buffered) and unbuffered DIMM types are supported. Mixing of | 38 | * signals buffered) and unbuffered DIMM types are supported. Mixing of |
39 | * registered and unbuffered DIMMs as well as mixing of ECC and non-ECC DIMMs | 39 | * registered and unbuffered DIMMs as well as mixing of ECC and non-ECC DIMMs |
40 | * is not allowed. The 82600 SDRAM interface operates at the same frequency as | 40 | * is not allowed. The 82600 SDRAM interface operates at the same frequency as |
41 | * the CPU bus, 66MHz, 100MHz or 133MHz." | 41 | * the CPU bus, 66MHz, 100MHz or 133MHz." |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #define R82600_NR_CSROWS 4 | 44 | #define R82600_NR_CSROWS 4 |
45 | #define R82600_NR_CHANS 1 | 45 | #define R82600_NR_CHANS 1 |
46 | #define R82600_NR_DIMMS 4 | 46 | #define R82600_NR_DIMMS 4 |
47 | 47 | ||
48 | #define R82600_BRIDGE_ID 0x8200 | 48 | #define R82600_BRIDGE_ID 0x8200 |
49 | 49 | ||
50 | /* Radisys 82600 register addresses - device 0 function 0 - PCI bridge */ | 50 | /* Radisys 82600 register addresses - device 0 function 0 - PCI bridge */ |
51 | #define R82600_DRAMC 0x57 /* Various SDRAM related control bits | 51 | #define R82600_DRAMC 0x57 /* Various SDRAM related control bits |
52 | * all bits are R/W | 52 | * all bits are R/W |
53 | * | 53 | * |
54 | * 7 SDRAM ISA Hole Enable | 54 | * 7 SDRAM ISA Hole Enable |
55 | * 6 Flash Page Mode Enable | 55 | * 6 Flash Page Mode Enable |
56 | * 5 ECC Enable: 1=ECC 0=noECC | 56 | * 5 ECC Enable: 1=ECC 0=noECC |
57 | * 4 DRAM DIMM Type: 1= | 57 | * 4 DRAM DIMM Type: 1= |
58 | * 3 BIOS Alias Disable | 58 | * 3 BIOS Alias Disable |
59 | * 2 SDRAM BIOS Flash Write Enable | 59 | * 2 SDRAM BIOS Flash Write Enable |
60 | * 1:0 SDRAM Refresh Rate: 00=Disabled | 60 | * 1:0 SDRAM Refresh Rate: 00=Disabled |
61 | * 01=7.8usec (256Mbit SDRAMs) | 61 | * 01=7.8usec (256Mbit SDRAMs) |
62 | * 10=15.6us 11=125usec | 62 | * 10=15.6us 11=125usec |
63 | */ | 63 | */ |
64 | 64 | ||
65 | #define R82600_SDRAMC 0x76 /* "SDRAM Control Register" | 65 | #define R82600_SDRAMC 0x76 /* "SDRAM Control Register" |
66 | * More SDRAM related control bits | 66 | * More SDRAM related control bits |
67 | * all bits are R/W | 67 | * all bits are R/W |
68 | * | 68 | * |
69 | * 15:8 Reserved. | 69 | * 15:8 Reserved. |
70 | * | 70 | * |
71 | * 7:5 Special SDRAM Mode Select | 71 | * 7:5 Special SDRAM Mode Select |
72 | * | 72 | * |
73 | * 4 Force ECC | 73 | * 4 Force ECC |
74 | * | 74 | * |
75 | * 1=Drive ECC bits to 0 during | 75 | * 1=Drive ECC bits to 0 during |
76 | * write cycles (i.e. ECC test mode) | 76 | * write cycles (i.e. ECC test mode) |
77 | * | 77 | * |
78 | * 0=Normal ECC functioning | 78 | * 0=Normal ECC functioning |
79 | * | 79 | * |
80 | * 3 Enhanced Paging Enable | 80 | * 3 Enhanced Paging Enable |
81 | * | 81 | * |
82 | * 2 CAS# Latency 0=3clks 1=2clks | 82 | * 2 CAS# Latency 0=3clks 1=2clks |
83 | * | 83 | * |
84 | * 1 RAS# to CAS# Delay 0=3 1=2 | 84 | * 1 RAS# to CAS# Delay 0=3 1=2 |
85 | * | 85 | * |
86 | * 0 RAS# Precharge 0=3 1=2 | 86 | * 0 RAS# Precharge 0=3 1=2 |
87 | */ | 87 | */ |
88 | 88 | ||
89 | #define R82600_EAP 0x80 /* ECC Error Address Pointer Register | 89 | #define R82600_EAP 0x80 /* ECC Error Address Pointer Register |
90 | * | 90 | * |
91 | * 31 Disable Hardware Scrubbing (RW) | 91 | * 31 Disable Hardware Scrubbing (RW) |
92 | * 0=Scrub on corrected read | 92 | * 0=Scrub on corrected read |
93 | * 1=Don't scrub on corrected read | 93 | * 1=Don't scrub on corrected read |
94 | * | 94 | * |
95 | * 30:12 Error Address Pointer (RO) | 95 | * 30:12 Error Address Pointer (RO) |
96 | * Upper 19 bits of error address | 96 | * Upper 19 bits of error address |
97 | * | 97 | * |
98 | * 11:4 Syndrome Bits (RO) | 98 | * 11:4 Syndrome Bits (RO) |
99 | * | 99 | * |
100 | * 3 BSERR# on multibit error (RW) | 100 | * 3 BSERR# on multibit error (RW) |
101 | * 1=enable 0=disable | 101 | * 1=enable 0=disable |
102 | * | 102 | * |
103 | * 2 NMI on Single Bit Eror (RW) | 103 | * 2 NMI on Single Bit Eror (RW) |
104 | * 1=NMI triggered by SBE n.b. other | 104 | * 1=NMI triggered by SBE n.b. other |
105 | * prerequeists | 105 | * prerequeists |
106 | * 0=NMI not triggered | 106 | * 0=NMI not triggered |
107 | * | 107 | * |
108 | * 1 MBE (R/WC) | 108 | * 1 MBE (R/WC) |
109 | * read 1=MBE at EAP (see above) | 109 | * read 1=MBE at EAP (see above) |
110 | * read 0=no MBE, or SBE occurred first | 110 | * read 0=no MBE, or SBE occurred first |
111 | * write 1=Clear MBE status (must also | 111 | * write 1=Clear MBE status (must also |
112 | * clear SBE) | 112 | * clear SBE) |
113 | * write 0=NOP | 113 | * write 0=NOP |
114 | * | 114 | * |
115 | * 1 SBE (R/WC) | 115 | * 1 SBE (R/WC) |
116 | * read 1=SBE at EAP (see above) | 116 | * read 1=SBE at EAP (see above) |
117 | * read 0=no SBE, or MBE occurred first | 117 | * read 0=no SBE, or MBE occurred first |
118 | * write 1=Clear SBE status (must also | 118 | * write 1=Clear SBE status (must also |
119 | * clear MBE) | 119 | * clear MBE) |
120 | * write 0=NOP | 120 | * write 0=NOP |
121 | */ | 121 | */ |
122 | 122 | ||
123 | #define R82600_DRBA 0x60 /* + 0x60..0x63 SDRAM Row Boundry Address | 123 | #define R82600_DRBA 0x60 /* + 0x60..0x63 SDRAM Row Boundry Address |
124 | * Registers | 124 | * Registers |
125 | * | 125 | * |
126 | * 7:0 Address lines 30:24 - upper limit of | 126 | * 7:0 Address lines 30:24 - upper limit of |
127 | * each row [p57] | 127 | * each row [p57] |
128 | */ | 128 | */ |
129 | 129 | ||
130 | struct r82600_error_info { | 130 | struct r82600_error_info { |
131 | u32 eapr; | 131 | u32 eapr; |
132 | }; | 132 | }; |
133 | 133 | ||
134 | static unsigned int disable_hardware_scrub = 0; | 134 | static unsigned int disable_hardware_scrub = 0; |
135 | 135 | ||
136 | static void r82600_get_error_info (struct mem_ctl_info *mci, | 136 | static void r82600_get_error_info (struct mem_ctl_info *mci, |
137 | struct r82600_error_info *info) | 137 | struct r82600_error_info *info) |
138 | { | 138 | { |
139 | struct pci_dev *pdev; | 139 | struct pci_dev *pdev; |
140 | 140 | ||
141 | pdev = to_pci_dev(mci->dev); | 141 | pdev = to_pci_dev(mci->dev); |
142 | pci_read_config_dword(pdev, R82600_EAP, &info->eapr); | 142 | pci_read_config_dword(pdev, R82600_EAP, &info->eapr); |
143 | 143 | ||
144 | if (info->eapr & BIT(0)) | 144 | if (info->eapr & BIT(0)) |
145 | /* Clear error to allow next error to be reported [p.62] */ | 145 | /* Clear error to allow next error to be reported [p.62] */ |
146 | pci_write_bits32(pdev, R82600_EAP, | 146 | pci_write_bits32(pdev, R82600_EAP, |
147 | ((u32) BIT(0) & (u32) BIT(1)), | 147 | ((u32) BIT(0) & (u32) BIT(1)), |
148 | ((u32) BIT(0) & (u32) BIT(1))); | 148 | ((u32) BIT(0) & (u32) BIT(1))); |
149 | 149 | ||
150 | if (info->eapr & BIT(1)) | 150 | if (info->eapr & BIT(1)) |
151 | /* Clear error to allow next error to be reported [p.62] */ | 151 | /* Clear error to allow next error to be reported [p.62] */ |
152 | pci_write_bits32(pdev, R82600_EAP, | 152 | pci_write_bits32(pdev, R82600_EAP, |
153 | ((u32) BIT(0) & (u32) BIT(1)), | 153 | ((u32) BIT(0) & (u32) BIT(1)), |
154 | ((u32) BIT(0) & (u32) BIT(1))); | 154 | ((u32) BIT(0) & (u32) BIT(1))); |
155 | } | 155 | } |
156 | 156 | ||
157 | static int r82600_process_error_info (struct mem_ctl_info *mci, | 157 | static int r82600_process_error_info (struct mem_ctl_info *mci, |
158 | struct r82600_error_info *info, int handle_errors) | 158 | struct r82600_error_info *info, int handle_errors) |
159 | { | 159 | { |
160 | int error_found; | 160 | int error_found; |
161 | u32 eapaddr, page; | 161 | u32 eapaddr, page; |
162 | u32 syndrome; | 162 | u32 syndrome; |
163 | 163 | ||
164 | error_found = 0; | 164 | error_found = 0; |
165 | 165 | ||
166 | /* bits 30:12 store the upper 19 bits of the 32 bit error address */ | 166 | /* bits 30:12 store the upper 19 bits of the 32 bit error address */ |
167 | eapaddr = ((info->eapr >> 12) & 0x7FFF) << 13; | 167 | eapaddr = ((info->eapr >> 12) & 0x7FFF) << 13; |
168 | /* Syndrome in bits 11:4 [p.62] */ | 168 | /* Syndrome in bits 11:4 [p.62] */ |
169 | syndrome = (info->eapr >> 4) & 0xFF; | 169 | syndrome = (info->eapr >> 4) & 0xFF; |
170 | 170 | ||
171 | /* the R82600 reports at less than page * | 171 | /* the R82600 reports at less than page * |
172 | * granularity (upper 19 bits only) */ | 172 | * granularity (upper 19 bits only) */ |
173 | page = eapaddr >> PAGE_SHIFT; | 173 | page = eapaddr >> PAGE_SHIFT; |
174 | 174 | ||
175 | if (info->eapr & BIT(0)) { /* CE? */ | 175 | if (info->eapr & BIT(0)) { /* CE? */ |
176 | error_found = 1; | 176 | error_found = 1; |
177 | 177 | ||
178 | if (handle_errors) | 178 | if (handle_errors) |
179 | edac_mc_handle_ce(mci, page, 0, /* not avail */ | 179 | edac_mc_handle_ce(mci, page, 0, /* not avail */ |
180 | syndrome, | 180 | syndrome, |
181 | edac_mc_find_csrow_by_page(mci, page), | 181 | edac_mc_find_csrow_by_page(mci, page), |
182 | 0, /* channel */ | 182 | 0, /* channel */ |
183 | mci->ctl_name); | 183 | mci->ctl_name); |
184 | } | 184 | } |
185 | 185 | ||
186 | if (info->eapr & BIT(1)) { /* UE? */ | 186 | if (info->eapr & BIT(1)) { /* UE? */ |
187 | error_found = 1; | 187 | error_found = 1; |
188 | 188 | ||
189 | if (handle_errors) | 189 | if (handle_errors) |
190 | /* 82600 doesn't give enough info */ | 190 | /* 82600 doesn't give enough info */ |
191 | edac_mc_handle_ue(mci, page, 0, | 191 | edac_mc_handle_ue(mci, page, 0, |
192 | edac_mc_find_csrow_by_page(mci, page), | 192 | edac_mc_find_csrow_by_page(mci, page), |
193 | mci->ctl_name); | 193 | mci->ctl_name); |
194 | } | 194 | } |
195 | 195 | ||
196 | return error_found; | 196 | return error_found; |
197 | } | 197 | } |
198 | 198 | ||
199 | static void r82600_check(struct mem_ctl_info *mci) | 199 | static void r82600_check(struct mem_ctl_info *mci) |
200 | { | 200 | { |
201 | struct r82600_error_info info; | 201 | struct r82600_error_info info; |
202 | 202 | ||
203 | debugf1("MC%d: %s()\n", mci->mc_idx, __func__); | 203 | debugf1("MC%d: %s()\n", mci->mc_idx, __func__); |
204 | r82600_get_error_info(mci, &info); | 204 | r82600_get_error_info(mci, &info); |
205 | r82600_process_error_info(mci, &info, 1); | 205 | r82600_process_error_info(mci, &info, 1); |
206 | } | 206 | } |
207 | 207 | ||
208 | static int r82600_probe1(struct pci_dev *pdev, int dev_idx) | 208 | static inline int ecc_enabled(u8 dramcr) |
209 | { | 209 | { |
210 | int rc = -ENODEV; | 210 | return dramcr & BIT(5); |
211 | } | ||
212 | |||
213 | static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, | ||
214 | u8 dramcr) | ||
215 | { | ||
216 | struct csrow_info *csrow; | ||
211 | int index; | 217 | int index; |
212 | struct mem_ctl_info *mci = NULL; | 218 | u8 drbar; /* SDRAM Row Boundry Address Register */ |
219 | u32 row_high_limit, row_high_limit_last; | ||
220 | u32 reg_sdram, ecc_on, row_base; | ||
221 | |||
222 | ecc_on = ecc_enabled(dramcr); | ||
223 | reg_sdram = dramcr & BIT(4); | ||
224 | row_high_limit_last = 0; | ||
225 | |||
226 | for (index = 0; index < mci->nr_csrows; index++) { | ||
227 | csrow = &mci->csrows[index]; | ||
228 | |||
229 | /* find the DRAM Chip Select Base address and mask */ | ||
230 | pci_read_config_byte(pdev, R82600_DRBA + index, &drbar); | ||
231 | |||
232 | debugf1("%s() Row=%d DRBA = %#0x\n", __func__, index, drbar); | ||
233 | |||
234 | row_high_limit = ((u32) drbar << 24); | ||
235 | /* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */ | ||
236 | |||
237 | debugf1("%s() Row=%d, Boundry Address=%#0x, Last = %#0x\n", | ||
238 | __func__, index, row_high_limit, row_high_limit_last); | ||
239 | |||
240 | /* Empty row [p.57] */ | ||
241 | if (row_high_limit == row_high_limit_last) | ||
242 | continue; | ||
243 | |||
244 | row_base = row_high_limit_last; | ||
245 | |||
246 | csrow->first_page = row_base >> PAGE_SHIFT; | ||
247 | csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; | ||
248 | csrow->nr_pages = csrow->last_page - csrow->first_page + 1; | ||
249 | /* Error address is top 19 bits - so granularity is * | ||
250 | * 14 bits */ | ||
251 | csrow->grain = 1 << 14; | ||
252 | csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR; | ||
253 | /* FIXME - check that this is unknowable with this chipset */ | ||
254 | csrow->dtype = DEV_UNKNOWN; | ||
255 | |||
256 | /* Mode is global on 82600 */ | ||
257 | csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE; | ||
258 | row_high_limit_last = row_high_limit; | ||
259 | } | ||
260 | } | ||
261 | |||
262 | static int r82600_probe1(struct pci_dev *pdev, int dev_idx) | ||
263 | { | ||
264 | struct mem_ctl_info *mci; | ||
213 | u8 dramcr; | 265 | u8 dramcr; |
214 | u32 ecc_on; | ||
215 | u32 reg_sdram; | ||
216 | u32 eapr; | 266 | u32 eapr; |
217 | u32 scrub_disabled; | 267 | u32 scrub_disabled; |
218 | u32 sdram_refresh_rate; | 268 | u32 sdram_refresh_rate; |
219 | u32 row_high_limit_last = 0; | ||
220 | struct r82600_error_info discard; | 269 | struct r82600_error_info discard; |
221 | 270 | ||
222 | debugf0("%s()\n", __func__); | 271 | debugf0("%s()\n", __func__); |
223 | pci_read_config_byte(pdev, R82600_DRAMC, &dramcr); | 272 | pci_read_config_byte(pdev, R82600_DRAMC, &dramcr); |
224 | pci_read_config_dword(pdev, R82600_EAP, &eapr); | 273 | pci_read_config_dword(pdev, R82600_EAP, &eapr); |
225 | ecc_on = dramcr & BIT(5); | ||
226 | reg_sdram = dramcr & BIT(4); | ||
227 | scrub_disabled = eapr & BIT(31); | 274 | scrub_disabled = eapr & BIT(31); |
228 | sdram_refresh_rate = dramcr & (BIT(0) | BIT(1)); | 275 | sdram_refresh_rate = dramcr & (BIT(0) | BIT(1)); |
229 | debugf2("%s(): sdram refresh rate = %#0x\n", __func__, | 276 | debugf2("%s(): sdram refresh rate = %#0x\n", __func__, |
230 | sdram_refresh_rate); | 277 | sdram_refresh_rate); |
231 | debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr); | 278 | debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr); |
232 | mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS); | 279 | mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS); |
233 | 280 | ||
234 | if (mci == NULL) { | 281 | if (mci == NULL) |
235 | rc = -ENOMEM; | 282 | return -ENOMEM; |
236 | goto fail; | ||
237 | } | ||
238 | 283 | ||
239 | debugf0("%s(): mci = %p\n", __func__, mci); | 284 | debugf0("%s(): mci = %p\n", __func__, mci); |
240 | mci->dev = &pdev->dev; | 285 | mci->dev = &pdev->dev; |
241 | mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; | 286 | mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; |
242 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; | 287 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; |
243 | /* FIXME try to work out if the chip leads have been used for COM2 | 288 | /* FIXME try to work out if the chip leads have been used for COM2 |
244 | * instead on this board? [MA6?] MAYBE: | 289 | * instead on this board? [MA6?] MAYBE: |
245 | */ | 290 | */ |
246 | 291 | ||
247 | /* On the R82600, the pins for memory bits 72:65 - i.e. the * | 292 | /* On the R82600, the pins for memory bits 72:65 - i.e. the * |
248 | * EC bits are shared with the pins for COM2 (!), so if COM2 * | 293 | * EC bits are shared with the pins for COM2 (!), so if COM2 * |
249 | * is enabled, we assume COM2 is wired up, and thus no EDAC * | 294 | * is enabled, we assume COM2 is wired up, and thus no EDAC * |
250 | * is possible. */ | 295 | * is possible. */ |
251 | mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; | 296 | mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; |
252 | 297 | ||
253 | if (ecc_on) { | 298 | if (ecc_enabled(dramcr)) { |
254 | if (scrub_disabled) | 299 | if (scrub_disabled) |
255 | debugf3("%s(): mci = %p - Scrubbing disabled! EAP: " | 300 | debugf3("%s(): mci = %p - Scrubbing disabled! EAP: " |
256 | "%#0x\n", __func__, mci, eapr); | 301 | "%#0x\n", __func__, mci, eapr); |
257 | } else | 302 | } else |
258 | mci->edac_cap = EDAC_FLAG_NONE; | 303 | mci->edac_cap = EDAC_FLAG_NONE; |
259 | 304 | ||
260 | mci->mod_name = EDAC_MOD_STR; | 305 | mci->mod_name = EDAC_MOD_STR; |
261 | mci->mod_ver = R82600_REVISION; | 306 | mci->mod_ver = R82600_REVISION; |
262 | mci->ctl_name = "R82600"; | 307 | mci->ctl_name = "R82600"; |
263 | mci->edac_check = r82600_check; | 308 | mci->edac_check = r82600_check; |
264 | mci->ctl_page_to_phys = NULL; | 309 | mci->ctl_page_to_phys = NULL; |
265 | 310 | r82600_init_csrows(mci, pdev, dramcr); | |
266 | for (index = 0; index < mci->nr_csrows; index++) { | ||
267 | struct csrow_info *csrow = &mci->csrows[index]; | ||
268 | u8 drbar; /* sDram Row Boundry Address Register */ | ||
269 | u32 row_high_limit; | ||
270 | u32 row_base; | ||
271 | |||
272 | /* find the DRAM Chip Select Base address and mask */ | ||
273 | pci_read_config_byte(pdev, R82600_DRBA + index, &drbar); | ||
274 | |||
275 | debugf1("MC%d: %s() Row=%d DRBA = %#0x\n", mci->mc_idx, | ||
276 | __func__, index, drbar); | ||
277 | |||
278 | row_high_limit = ((u32) drbar << 24); | ||
279 | /* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */ | ||
280 | |||
281 | debugf1("MC%d: %s() Row=%d, Boundry Address=%#0x, Last = " | ||
282 | "%#0x \n", mci->mc_idx, __func__, index, | ||
283 | row_high_limit, row_high_limit_last); | ||
284 | |||
285 | /* Empty row [p.57] */ | ||
286 | if (row_high_limit == row_high_limit_last) | ||
287 | continue; | ||
288 | |||
289 | row_base = row_high_limit_last; | ||
290 | csrow->first_page = row_base >> PAGE_SHIFT; | ||
291 | csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; | ||
292 | csrow->nr_pages = csrow->last_page - csrow->first_page + 1; | ||
293 | /* Error address is top 19 bits - so granularity is * | ||
294 | * 14 bits */ | ||
295 | csrow->grain = 1 << 14; | ||
296 | csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR; | ||
297 | /* FIXME - check that this is unknowable with this chipset */ | ||
298 | csrow->dtype = DEV_UNKNOWN; | ||
299 | |||
300 | /* Mode is global on 82600 */ | ||
301 | csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE; | ||
302 | row_high_limit_last = row_high_limit; | ||
303 | } | ||
304 | |||
305 | r82600_get_error_info(mci, &discard); /* clear counters */ | 311 | r82600_get_error_info(mci, &discard); /* clear counters */ |
306 | 312 | ||
307 | /* Here we assume that we will never see multiple instances of this | 313 | /* Here we assume that we will never see multiple instances of this |
308 | * type of memory controller. The ID is therefore hardcoded to 0. | 314 | * type of memory controller. The ID is therefore hardcoded to 0. |
309 | */ | 315 | */ |
310 | if (edac_mc_add_mc(mci,0)) { | 316 | if (edac_mc_add_mc(mci,0)) { |
311 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); | 317 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
312 | goto fail; | 318 | goto fail; |
313 | } | 319 | } |
314 | 320 | ||
315 | /* get this far and it's successful */ | 321 | /* get this far and it's successful */ |
316 | 322 | ||
317 | if (disable_hardware_scrub) { | 323 | if (disable_hardware_scrub) { |
318 | debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n", | 324 | debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n", |
319 | __func__); | 325 | __func__); |
320 | pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31)); | 326 | pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31)); |
321 | } | 327 | } |
322 | 328 | ||
323 | debugf3("%s(): success\n", __func__); | 329 | debugf3("%s(): success\n", __func__); |
324 | return 0; | 330 | return 0; |
325 | 331 | ||
326 | fail: | 332 | fail: |
327 | if (mci) | 333 | edac_mc_free(mci); |
328 | edac_mc_free(mci); | 334 | return -ENODEV; |
329 | |||
330 | return rc; | ||
331 | } | 335 | } |
332 | 336 | ||
333 | /* returns count (>= 0), or negative on error */ | 337 | /* returns count (>= 0), or negative on error */ |
334 | static int __devinit r82600_init_one(struct pci_dev *pdev, | 338 | static int __devinit r82600_init_one(struct pci_dev *pdev, |
335 | const struct pci_device_id *ent) | 339 | const struct pci_device_id *ent) |
336 | { | 340 | { |
337 | debugf0("%s()\n", __func__); | 341 | debugf0("%s()\n", __func__); |
338 | 342 | ||
339 | /* don't need to call pci_device_enable() */ | 343 | /* don't need to call pci_device_enable() */ |
340 | return r82600_probe1(pdev, ent->driver_data); | 344 | return r82600_probe1(pdev, ent->driver_data); |
341 | } | 345 | } |
342 | 346 | ||
343 | static void __devexit r82600_remove_one(struct pci_dev *pdev) | 347 | static void __devexit r82600_remove_one(struct pci_dev *pdev) |
344 | { | 348 | { |
345 | struct mem_ctl_info *mci; | 349 | struct mem_ctl_info *mci; |
346 | 350 |