Commit 88d84ac97378c2f1d5fec9af1e8b7d9a662d6b00
Committed by
Tony Luck
1 parent
ad81f0545e
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
EDAC: Fix lockdep splat
Fix the following: BUG: key ffff88043bdd0330 not in .data! ------------[ cut here ]------------ WARNING: at kernel/lockdep.c:2987 lockdep_init_map+0x565/0x5a0() DEBUG_LOCKS_WARN_ON(1) Modules linked in: glue_helper sb_edac(+) edac_core snd acpi_cpufreq lrw gf128mul ablk_helper iTCO_wdt evdev i2c_i801 dcdbas button cryptd pcspkr iTCO_vendor_support usb_common lpc_ich mfd_core soundcore mperf processor microcode CPU: 2 PID: 599 Comm: modprobe Not tainted 3.10.0 #1 Hardware name: Dell Inc. Precision T3600/0PTTT9, BIOS A08 01/24/2013 0000000000000009 ffff880439a1d920 ffffffff8160a9a9 ffff880439a1d958 ffffffff8103d9e0 ffff88043af4a510 ffffffff81a16e11 0000000000000000 ffff88043bdd0330 0000000000000000 ffff880439a1d9b8 ffffffff8103dacc Call Trace: dump_stack warn_slowpath_common warn_slowpath_fmt lockdep_init_map ? trace_hardirqs_on_caller ? trace_hardirqs_on debug_mutex_init __mutex_init bus_register edac_create_sysfs_mci_device edac_mc_add_mc sbridge_probe pci_device_probe driver_probe_device __driver_attach ? driver_probe_device bus_for_each_dev driver_attach bus_add_driver driver_register __pci_register_driver ? 0xffffffffa0010fff sbridge_init ? 0xffffffffa0010fff do_one_initcall load_module ? unset_module_init_ro_nx SyS_init_module tracesys ---[ end trace d24a70b0d3ddf733 ]--- EDAC MC0: Giving out device to 'sbridge_edac.c' 'Sandy Bridge Socket#0': DEV 0000:3f:0e.0 EDAC sbridge: Driver loaded. What happens is that bus_register needs a statically allocated lock_key because the last is handed in to lockdep. However, struct mem_ctl_info embeds struct bus_type (the whole struct, not a pointer to it) and the whole thing gets dynamically allocated. Fix this by using a statically allocated struct bus_type for the MC bus. Signed-off-by: Borislav Petkov <bp@suse.de> Acked-by: Mauro Carvalho Chehab <mchehab@infradead.org> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: stable@kernel.org # v3.10 Signed-off-by: Tony Luck <tony.luck@intel.com>
Showing 4 changed files with 31 additions and 15 deletions Inline Diff
drivers/edac/edac_mc.c
1 | /* | 1 | /* |
2 | * edac_mc kernel module | 2 | * edac_mc kernel module |
3 | * (C) 2005, 2006 Linux Networx (http://lnxi.com) | 3 | * (C) 2005, 2006 Linux Networx (http://lnxi.com) |
4 | * This file may be distributed under the terms of the | 4 | * This file may be distributed under the terms of the |
5 | * GNU General Public License. | 5 | * GNU General Public License. |
6 | * | 6 | * |
7 | * Written by Thayne Harbaugh | 7 | * Written by Thayne Harbaugh |
8 | * Based on work by Dan Hollis <goemon at anime dot net> and others. | 8 | * Based on work by Dan Hollis <goemon at anime dot net> and others. |
9 | * http://www.anime.net/~goemon/linux-ecc/ | 9 | * http://www.anime.net/~goemon/linux-ecc/ |
10 | * | 10 | * |
11 | * Modified by Dave Peterson and Doug Thompson | 11 | * Modified by Dave Peterson and Doug Thompson |
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/proc_fs.h> | 16 | #include <linux/proc_fs.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <linux/smp.h> | 19 | #include <linux/smp.h> |
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/sysctl.h> | 21 | #include <linux/sysctl.h> |
22 | #include <linux/highmem.h> | 22 | #include <linux/highmem.h> |
23 | #include <linux/timer.h> | 23 | #include <linux/timer.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/jiffies.h> | 25 | #include <linux/jiffies.h> |
26 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
27 | #include <linux/list.h> | 27 | #include <linux/list.h> |
28 | #include <linux/ctype.h> | 28 | #include <linux/ctype.h> |
29 | #include <linux/edac.h> | 29 | #include <linux/edac.h> |
30 | #include <linux/bitops.h> | 30 | #include <linux/bitops.h> |
31 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
32 | #include <asm/page.h> | 32 | #include <asm/page.h> |
33 | #include <asm/edac.h> | 33 | #include <asm/edac.h> |
34 | #include "edac_core.h" | 34 | #include "edac_core.h" |
35 | #include "edac_module.h" | 35 | #include "edac_module.h" |
36 | 36 | ||
37 | #define CREATE_TRACE_POINTS | 37 | #define CREATE_TRACE_POINTS |
38 | #define TRACE_INCLUDE_PATH ../../include/ras | 38 | #define TRACE_INCLUDE_PATH ../../include/ras |
39 | #include <ras/ras_event.h> | 39 | #include <ras/ras_event.h> |
40 | 40 | ||
41 | /* lock to memory controller's control array */ | 41 | /* lock to memory controller's control array */ |
42 | static DEFINE_MUTEX(mem_ctls_mutex); | 42 | static DEFINE_MUTEX(mem_ctls_mutex); |
43 | static LIST_HEAD(mc_devices); | 43 | static LIST_HEAD(mc_devices); |
44 | 44 | ||
45 | /* | 45 | /* |
46 | * Used to lock EDAC MC to just one module, avoiding two drivers e. g. | 46 | * Used to lock EDAC MC to just one module, avoiding two drivers e. g. |
47 | * apei/ghes and i7core_edac to be used at the same time. | 47 | * apei/ghes and i7core_edac to be used at the same time. |
48 | */ | 48 | */ |
49 | static void const *edac_mc_owner; | 49 | static void const *edac_mc_owner; |
50 | 50 | ||
51 | static struct bus_type mc_bus[EDAC_MAX_MCS]; | ||
52 | |||
51 | unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf, | 53 | unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf, |
52 | unsigned len) | 54 | unsigned len) |
53 | { | 55 | { |
54 | struct mem_ctl_info *mci = dimm->mci; | 56 | struct mem_ctl_info *mci = dimm->mci; |
55 | int i, n, count = 0; | 57 | int i, n, count = 0; |
56 | char *p = buf; | 58 | char *p = buf; |
57 | 59 | ||
58 | for (i = 0; i < mci->n_layers; i++) { | 60 | for (i = 0; i < mci->n_layers; i++) { |
59 | n = snprintf(p, len, "%s %d ", | 61 | n = snprintf(p, len, "%s %d ", |
60 | edac_layer_name[mci->layers[i].type], | 62 | edac_layer_name[mci->layers[i].type], |
61 | dimm->location[i]); | 63 | dimm->location[i]); |
62 | p += n; | 64 | p += n; |
63 | len -= n; | 65 | len -= n; |
64 | count += n; | 66 | count += n; |
65 | if (!len) | 67 | if (!len) |
66 | break; | 68 | break; |
67 | } | 69 | } |
68 | 70 | ||
69 | return count; | 71 | return count; |
70 | } | 72 | } |
71 | 73 | ||
72 | #ifdef CONFIG_EDAC_DEBUG | 74 | #ifdef CONFIG_EDAC_DEBUG |
73 | 75 | ||
74 | static void edac_mc_dump_channel(struct rank_info *chan) | 76 | static void edac_mc_dump_channel(struct rank_info *chan) |
75 | { | 77 | { |
76 | edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx); | 78 | edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx); |
77 | edac_dbg(4, " channel = %p\n", chan); | 79 | edac_dbg(4, " channel = %p\n", chan); |
78 | edac_dbg(4, " channel->csrow = %p\n", chan->csrow); | 80 | edac_dbg(4, " channel->csrow = %p\n", chan->csrow); |
79 | edac_dbg(4, " channel->dimm = %p\n", chan->dimm); | 81 | edac_dbg(4, " channel->dimm = %p\n", chan->dimm); |
80 | } | 82 | } |
81 | 83 | ||
82 | static void edac_mc_dump_dimm(struct dimm_info *dimm, int number) | 84 | static void edac_mc_dump_dimm(struct dimm_info *dimm, int number) |
83 | { | 85 | { |
84 | char location[80]; | 86 | char location[80]; |
85 | 87 | ||
86 | edac_dimm_info_location(dimm, location, sizeof(location)); | 88 | edac_dimm_info_location(dimm, location, sizeof(location)); |
87 | 89 | ||
88 | edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n", | 90 | edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n", |
89 | dimm->mci->csbased ? "rank" : "dimm", | 91 | dimm->mci->csbased ? "rank" : "dimm", |
90 | number, location, dimm->csrow, dimm->cschannel); | 92 | number, location, dimm->csrow, dimm->cschannel); |
91 | edac_dbg(4, " dimm = %p\n", dimm); | 93 | edac_dbg(4, " dimm = %p\n", dimm); |
92 | edac_dbg(4, " dimm->label = '%s'\n", dimm->label); | 94 | edac_dbg(4, " dimm->label = '%s'\n", dimm->label); |
93 | edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); | 95 | edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); |
94 | edac_dbg(4, " dimm->grain = %d\n", dimm->grain); | 96 | edac_dbg(4, " dimm->grain = %d\n", dimm->grain); |
95 | edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); | 97 | edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); |
96 | } | 98 | } |
97 | 99 | ||
98 | static void edac_mc_dump_csrow(struct csrow_info *csrow) | 100 | static void edac_mc_dump_csrow(struct csrow_info *csrow) |
99 | { | 101 | { |
100 | edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx); | 102 | edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx); |
101 | edac_dbg(4, " csrow = %p\n", csrow); | 103 | edac_dbg(4, " csrow = %p\n", csrow); |
102 | edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page); | 104 | edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page); |
103 | edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page); | 105 | edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page); |
104 | edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask); | 106 | edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask); |
105 | edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels); | 107 | edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels); |
106 | edac_dbg(4, " csrow->channels = %p\n", csrow->channels); | 108 | edac_dbg(4, " csrow->channels = %p\n", csrow->channels); |
107 | edac_dbg(4, " csrow->mci = %p\n", csrow->mci); | 109 | edac_dbg(4, " csrow->mci = %p\n", csrow->mci); |
108 | } | 110 | } |
109 | 111 | ||
110 | static void edac_mc_dump_mci(struct mem_ctl_info *mci) | 112 | static void edac_mc_dump_mci(struct mem_ctl_info *mci) |
111 | { | 113 | { |
112 | edac_dbg(3, "\tmci = %p\n", mci); | 114 | edac_dbg(3, "\tmci = %p\n", mci); |
113 | edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap); | 115 | edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap); |
114 | edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap); | 116 | edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap); |
115 | edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap); | 117 | edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap); |
116 | edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check); | 118 | edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check); |
117 | edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n", | 119 | edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n", |
118 | mci->nr_csrows, mci->csrows); | 120 | mci->nr_csrows, mci->csrows); |
119 | edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n", | 121 | edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n", |
120 | mci->tot_dimms, mci->dimms); | 122 | mci->tot_dimms, mci->dimms); |
121 | edac_dbg(3, "\tdev = %p\n", mci->pdev); | 123 | edac_dbg(3, "\tdev = %p\n", mci->pdev); |
122 | edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n", | 124 | edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n", |
123 | mci->mod_name, mci->ctl_name); | 125 | mci->mod_name, mci->ctl_name); |
124 | edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info); | 126 | edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info); |
125 | } | 127 | } |
126 | 128 | ||
127 | #endif /* CONFIG_EDAC_DEBUG */ | 129 | #endif /* CONFIG_EDAC_DEBUG */ |
128 | 130 | ||
129 | /* | 131 | /* |
130 | * keep those in sync with the enum mem_type | 132 | * keep those in sync with the enum mem_type |
131 | */ | 133 | */ |
132 | const char *edac_mem_types[] = { | 134 | const char *edac_mem_types[] = { |
133 | "Empty csrow", | 135 | "Empty csrow", |
134 | "Reserved csrow type", | 136 | "Reserved csrow type", |
135 | "Unknown csrow type", | 137 | "Unknown csrow type", |
136 | "Fast page mode RAM", | 138 | "Fast page mode RAM", |
137 | "Extended data out RAM", | 139 | "Extended data out RAM", |
138 | "Burst Extended data out RAM", | 140 | "Burst Extended data out RAM", |
139 | "Single data rate SDRAM", | 141 | "Single data rate SDRAM", |
140 | "Registered single data rate SDRAM", | 142 | "Registered single data rate SDRAM", |
141 | "Double data rate SDRAM", | 143 | "Double data rate SDRAM", |
142 | "Registered Double data rate SDRAM", | 144 | "Registered Double data rate SDRAM", |
143 | "Rambus DRAM", | 145 | "Rambus DRAM", |
144 | "Unbuffered DDR2 RAM", | 146 | "Unbuffered DDR2 RAM", |
145 | "Fully buffered DDR2", | 147 | "Fully buffered DDR2", |
146 | "Registered DDR2 RAM", | 148 | "Registered DDR2 RAM", |
147 | "Rambus XDR", | 149 | "Rambus XDR", |
148 | "Unbuffered DDR3 RAM", | 150 | "Unbuffered DDR3 RAM", |
149 | "Registered DDR3 RAM", | 151 | "Registered DDR3 RAM", |
150 | }; | 152 | }; |
151 | EXPORT_SYMBOL_GPL(edac_mem_types); | 153 | EXPORT_SYMBOL_GPL(edac_mem_types); |
152 | 154 | ||
153 | /** | 155 | /** |
154 | * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation | 156 | * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation |
155 | * @p: pointer to a pointer with the memory offset to be used. At | 157 | * @p: pointer to a pointer with the memory offset to be used. At |
156 | * return, this will be incremented to point to the next offset | 158 | * return, this will be incremented to point to the next offset |
157 | * @size: Size of the data structure to be reserved | 159 | * @size: Size of the data structure to be reserved |
158 | * @n_elems: Number of elements that should be reserved | 160 | * @n_elems: Number of elements that should be reserved |
159 | * | 161 | * |
160 | * If 'size' is a constant, the compiler will optimize this whole function | 162 | * If 'size' is a constant, the compiler will optimize this whole function |
161 | * down to either a no-op or the addition of a constant to the value of '*p'. | 163 | * down to either a no-op or the addition of a constant to the value of '*p'. |
162 | * | 164 | * |
163 | * The 'p' pointer is absolutely needed to keep the proper advancing | 165 | * The 'p' pointer is absolutely needed to keep the proper advancing |
164 | * further in memory to the proper offsets when allocating the struct along | 166 | * further in memory to the proper offsets when allocating the struct along |
165 | * with its embedded structs, as edac_device_alloc_ctl_info() does it | 167 | * with its embedded structs, as edac_device_alloc_ctl_info() does it |
166 | * above, for example. | 168 | * above, for example. |
167 | * | 169 | * |
168 | * At return, the pointer 'p' will be incremented to be used on a next call | 170 | * At return, the pointer 'p' will be incremented to be used on a next call |
169 | * to this function. | 171 | * to this function. |
170 | */ | 172 | */ |
171 | void *edac_align_ptr(void **p, unsigned size, int n_elems) | 173 | void *edac_align_ptr(void **p, unsigned size, int n_elems) |
172 | { | 174 | { |
173 | unsigned align, r; | 175 | unsigned align, r; |
174 | void *ptr = *p; | 176 | void *ptr = *p; |
175 | 177 | ||
176 | *p += size * n_elems; | 178 | *p += size * n_elems; |
177 | 179 | ||
178 | /* | 180 | /* |
179 | * 'p' can possibly be an unaligned item X such that sizeof(X) is | 181 | * 'p' can possibly be an unaligned item X such that sizeof(X) is |
180 | * 'size'. Adjust 'p' so that its alignment is at least as | 182 | * 'size'. Adjust 'p' so that its alignment is at least as |
181 | * stringent as what the compiler would provide for X and return | 183 | * stringent as what the compiler would provide for X and return |
182 | * the aligned result. | 184 | * the aligned result. |
183 | * Here we assume that the alignment of a "long long" is the most | 185 | * Here we assume that the alignment of a "long long" is the most |
184 | * stringent alignment that the compiler will ever provide by default. | 186 | * stringent alignment that the compiler will ever provide by default. |
185 | * As far as I know, this is a reasonable assumption. | 187 | * As far as I know, this is a reasonable assumption. |
186 | */ | 188 | */ |
187 | if (size > sizeof(long)) | 189 | if (size > sizeof(long)) |
188 | align = sizeof(long long); | 190 | align = sizeof(long long); |
189 | else if (size > sizeof(int)) | 191 | else if (size > sizeof(int)) |
190 | align = sizeof(long); | 192 | align = sizeof(long); |
191 | else if (size > sizeof(short)) | 193 | else if (size > sizeof(short)) |
192 | align = sizeof(int); | 194 | align = sizeof(int); |
193 | else if (size > sizeof(char)) | 195 | else if (size > sizeof(char)) |
194 | align = sizeof(short); | 196 | align = sizeof(short); |
195 | else | 197 | else |
196 | return (char *)ptr; | 198 | return (char *)ptr; |
197 | 199 | ||
198 | r = (unsigned long)p % align; | 200 | r = (unsigned long)p % align; |
199 | 201 | ||
200 | if (r == 0) | 202 | if (r == 0) |
201 | return (char *)ptr; | 203 | return (char *)ptr; |
202 | 204 | ||
203 | *p += align - r; | 205 | *p += align - r; |
204 | 206 | ||
205 | return (void *)(((unsigned long)ptr) + align - r); | 207 | return (void *)(((unsigned long)ptr) + align - r); |
206 | } | 208 | } |
207 | 209 | ||
208 | static void _edac_mc_free(struct mem_ctl_info *mci) | 210 | static void _edac_mc_free(struct mem_ctl_info *mci) |
209 | { | 211 | { |
210 | int i, chn, row; | 212 | int i, chn, row; |
211 | struct csrow_info *csr; | 213 | struct csrow_info *csr; |
212 | const unsigned int tot_dimms = mci->tot_dimms; | 214 | const unsigned int tot_dimms = mci->tot_dimms; |
213 | const unsigned int tot_channels = mci->num_cschannel; | 215 | const unsigned int tot_channels = mci->num_cschannel; |
214 | const unsigned int tot_csrows = mci->nr_csrows; | 216 | const unsigned int tot_csrows = mci->nr_csrows; |
215 | 217 | ||
216 | if (mci->dimms) { | 218 | if (mci->dimms) { |
217 | for (i = 0; i < tot_dimms; i++) | 219 | for (i = 0; i < tot_dimms; i++) |
218 | kfree(mci->dimms[i]); | 220 | kfree(mci->dimms[i]); |
219 | kfree(mci->dimms); | 221 | kfree(mci->dimms); |
220 | } | 222 | } |
221 | if (mci->csrows) { | 223 | if (mci->csrows) { |
222 | for (row = 0; row < tot_csrows; row++) { | 224 | for (row = 0; row < tot_csrows; row++) { |
223 | csr = mci->csrows[row]; | 225 | csr = mci->csrows[row]; |
224 | if (csr) { | 226 | if (csr) { |
225 | if (csr->channels) { | 227 | if (csr->channels) { |
226 | for (chn = 0; chn < tot_channels; chn++) | 228 | for (chn = 0; chn < tot_channels; chn++) |
227 | kfree(csr->channels[chn]); | 229 | kfree(csr->channels[chn]); |
228 | kfree(csr->channels); | 230 | kfree(csr->channels); |
229 | } | 231 | } |
230 | kfree(csr); | 232 | kfree(csr); |
231 | } | 233 | } |
232 | } | 234 | } |
233 | kfree(mci->csrows); | 235 | kfree(mci->csrows); |
234 | } | 236 | } |
235 | kfree(mci); | 237 | kfree(mci); |
236 | } | 238 | } |
237 | 239 | ||
238 | /** | 240 | /** |
239 | * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure | 241 | * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure |
240 | * @mc_num: Memory controller number | 242 | * @mc_num: Memory controller number |
241 | * @n_layers: Number of MC hierarchy layers | 243 | * @n_layers: Number of MC hierarchy layers |
242 | * layers: Describes each layer as seen by the Memory Controller | 244 | * layers: Describes each layer as seen by the Memory Controller |
243 | * @size_pvt: size of private storage needed | 245 | * @size_pvt: size of private storage needed |
244 | * | 246 | * |
245 | * | 247 | * |
246 | * Everything is kmalloc'ed as one big chunk - more efficient. | 248 | * Everything is kmalloc'ed as one big chunk - more efficient. |
247 | * Only can be used if all structures have the same lifetime - otherwise | 249 | * Only can be used if all structures have the same lifetime - otherwise |
248 | * you have to allocate and initialize your own structures. | 250 | * you have to allocate and initialize your own structures. |
249 | * | 251 | * |
250 | * Use edac_mc_free() to free mc structures allocated by this function. | 252 | * Use edac_mc_free() to free mc structures allocated by this function. |
251 | * | 253 | * |
252 | * NOTE: drivers handle multi-rank memories in different ways: in some | 254 | * NOTE: drivers handle multi-rank memories in different ways: in some |
253 | * drivers, one multi-rank memory stick is mapped as one entry, while, in | 255 | * drivers, one multi-rank memory stick is mapped as one entry, while, in |
254 | * others, a single multi-rank memory stick would be mapped into several | 256 | * others, a single multi-rank memory stick would be mapped into several |
255 | * entries. Currently, this function will allocate multiple struct dimm_info | 257 | * entries. Currently, this function will allocate multiple struct dimm_info |
256 | * on such scenarios, as grouping the multiple ranks require drivers change. | 258 | * on such scenarios, as grouping the multiple ranks require drivers change. |
257 | * | 259 | * |
258 | * Returns: | 260 | * Returns: |
259 | * On failure: NULL | 261 | * On failure: NULL |
260 | * On success: struct mem_ctl_info pointer | 262 | * On success: struct mem_ctl_info pointer |
261 | */ | 263 | */ |
262 | struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, | 264 | struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, |
263 | unsigned n_layers, | 265 | unsigned n_layers, |
264 | struct edac_mc_layer *layers, | 266 | struct edac_mc_layer *layers, |
265 | unsigned sz_pvt) | 267 | unsigned sz_pvt) |
266 | { | 268 | { |
267 | struct mem_ctl_info *mci; | 269 | struct mem_ctl_info *mci; |
268 | struct edac_mc_layer *layer; | 270 | struct edac_mc_layer *layer; |
269 | struct csrow_info *csr; | 271 | struct csrow_info *csr; |
270 | struct rank_info *chan; | 272 | struct rank_info *chan; |
271 | struct dimm_info *dimm; | 273 | struct dimm_info *dimm; |
272 | u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS]; | 274 | u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS]; |
273 | unsigned pos[EDAC_MAX_LAYERS]; | 275 | unsigned pos[EDAC_MAX_LAYERS]; |
274 | unsigned size, tot_dimms = 1, count = 1; | 276 | unsigned size, tot_dimms = 1, count = 1; |
275 | unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0; | 277 | unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0; |
276 | void *pvt, *p, *ptr = NULL; | 278 | void *pvt, *p, *ptr = NULL; |
277 | int i, j, row, chn, n, len, off; | 279 | int i, j, row, chn, n, len, off; |
278 | bool per_rank = false; | 280 | bool per_rank = false; |
279 | 281 | ||
280 | BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0); | 282 | BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0); |
281 | /* | 283 | /* |
282 | * Calculate the total amount of dimms and csrows/cschannels while | 284 | * Calculate the total amount of dimms and csrows/cschannels while |
283 | * in the old API emulation mode | 285 | * in the old API emulation mode |
284 | */ | 286 | */ |
285 | for (i = 0; i < n_layers; i++) { | 287 | for (i = 0; i < n_layers; i++) { |
286 | tot_dimms *= layers[i].size; | 288 | tot_dimms *= layers[i].size; |
287 | if (layers[i].is_virt_csrow) | 289 | if (layers[i].is_virt_csrow) |
288 | tot_csrows *= layers[i].size; | 290 | tot_csrows *= layers[i].size; |
289 | else | 291 | else |
290 | tot_channels *= layers[i].size; | 292 | tot_channels *= layers[i].size; |
291 | 293 | ||
292 | if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT) | 294 | if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT) |
293 | per_rank = true; | 295 | per_rank = true; |
294 | } | 296 | } |
295 | 297 | ||
296 | /* Figure out the offsets of the various items from the start of an mc | 298 | /* Figure out the offsets of the various items from the start of an mc |
297 | * structure. We want the alignment of each item to be at least as | 299 | * structure. We want the alignment of each item to be at least as |
298 | * stringent as what the compiler would provide if we could simply | 300 | * stringent as what the compiler would provide if we could simply |
299 | * hardcode everything into a single struct. | 301 | * hardcode everything into a single struct. |
300 | */ | 302 | */ |
301 | mci = edac_align_ptr(&ptr, sizeof(*mci), 1); | 303 | mci = edac_align_ptr(&ptr, sizeof(*mci), 1); |
302 | layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers); | 304 | layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers); |
303 | for (i = 0; i < n_layers; i++) { | 305 | for (i = 0; i < n_layers; i++) { |
304 | count *= layers[i].size; | 306 | count *= layers[i].size; |
305 | edac_dbg(4, "errcount layer %d size %d\n", i, count); | 307 | edac_dbg(4, "errcount layer %d size %d\n", i, count); |
306 | ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); | 308 | ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); |
307 | ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); | 309 | ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); |
308 | tot_errcount += 2 * count; | 310 | tot_errcount += 2 * count; |
309 | } | 311 | } |
310 | 312 | ||
311 | edac_dbg(4, "allocating %d error counters\n", tot_errcount); | 313 | edac_dbg(4, "allocating %d error counters\n", tot_errcount); |
312 | pvt = edac_align_ptr(&ptr, sz_pvt, 1); | 314 | pvt = edac_align_ptr(&ptr, sz_pvt, 1); |
313 | size = ((unsigned long)pvt) + sz_pvt; | 315 | size = ((unsigned long)pvt) + sz_pvt; |
314 | 316 | ||
315 | edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n", | 317 | edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n", |
316 | size, | 318 | size, |
317 | tot_dimms, | 319 | tot_dimms, |
318 | per_rank ? "ranks" : "dimms", | 320 | per_rank ? "ranks" : "dimms", |
319 | tot_csrows * tot_channels); | 321 | tot_csrows * tot_channels); |
320 | 322 | ||
321 | mci = kzalloc(size, GFP_KERNEL); | 323 | mci = kzalloc(size, GFP_KERNEL); |
322 | if (mci == NULL) | 324 | if (mci == NULL) |
323 | return NULL; | 325 | return NULL; |
324 | 326 | ||
325 | /* Adjust pointers so they point within the memory we just allocated | 327 | /* Adjust pointers so they point within the memory we just allocated |
326 | * rather than an imaginary chunk of memory located at address 0. | 328 | * rather than an imaginary chunk of memory located at address 0. |
327 | */ | 329 | */ |
328 | layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer)); | 330 | layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer)); |
329 | for (i = 0; i < n_layers; i++) { | 331 | for (i = 0; i < n_layers; i++) { |
330 | mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i])); | 332 | mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i])); |
331 | mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i])); | 333 | mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i])); |
332 | } | 334 | } |
333 | pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL; | 335 | pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL; |
334 | 336 | ||
335 | /* setup index and various internal pointers */ | 337 | /* setup index and various internal pointers */ |
336 | mci->mc_idx = mc_num; | 338 | mci->mc_idx = mc_num; |
337 | mci->tot_dimms = tot_dimms; | 339 | mci->tot_dimms = tot_dimms; |
338 | mci->pvt_info = pvt; | 340 | mci->pvt_info = pvt; |
339 | mci->n_layers = n_layers; | 341 | mci->n_layers = n_layers; |
340 | mci->layers = layer; | 342 | mci->layers = layer; |
341 | memcpy(mci->layers, layers, sizeof(*layer) * n_layers); | 343 | memcpy(mci->layers, layers, sizeof(*layer) * n_layers); |
342 | mci->nr_csrows = tot_csrows; | 344 | mci->nr_csrows = tot_csrows; |
343 | mci->num_cschannel = tot_channels; | 345 | mci->num_cschannel = tot_channels; |
344 | mci->csbased = per_rank; | 346 | mci->csbased = per_rank; |
345 | 347 | ||
346 | /* | 348 | /* |
347 | * Alocate and fill the csrow/channels structs | 349 | * Alocate and fill the csrow/channels structs |
348 | */ | 350 | */ |
349 | mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL); | 351 | mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL); |
350 | if (!mci->csrows) | 352 | if (!mci->csrows) |
351 | goto error; | 353 | goto error; |
352 | for (row = 0; row < tot_csrows; row++) { | 354 | for (row = 0; row < tot_csrows; row++) { |
353 | csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL); | 355 | csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL); |
354 | if (!csr) | 356 | if (!csr) |
355 | goto error; | 357 | goto error; |
356 | mci->csrows[row] = csr; | 358 | mci->csrows[row] = csr; |
357 | csr->csrow_idx = row; | 359 | csr->csrow_idx = row; |
358 | csr->mci = mci; | 360 | csr->mci = mci; |
359 | csr->nr_channels = tot_channels; | 361 | csr->nr_channels = tot_channels; |
360 | csr->channels = kcalloc(tot_channels, sizeof(*csr->channels), | 362 | csr->channels = kcalloc(tot_channels, sizeof(*csr->channels), |
361 | GFP_KERNEL); | 363 | GFP_KERNEL); |
362 | if (!csr->channels) | 364 | if (!csr->channels) |
363 | goto error; | 365 | goto error; |
364 | 366 | ||
365 | for (chn = 0; chn < tot_channels; chn++) { | 367 | for (chn = 0; chn < tot_channels; chn++) { |
366 | chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL); | 368 | chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL); |
367 | if (!chan) | 369 | if (!chan) |
368 | goto error; | 370 | goto error; |
369 | csr->channels[chn] = chan; | 371 | csr->channels[chn] = chan; |
370 | chan->chan_idx = chn; | 372 | chan->chan_idx = chn; |
371 | chan->csrow = csr; | 373 | chan->csrow = csr; |
372 | } | 374 | } |
373 | } | 375 | } |
374 | 376 | ||
375 | /* | 377 | /* |
376 | * Allocate and fill the dimm structs | 378 | * Allocate and fill the dimm structs |
377 | */ | 379 | */ |
378 | mci->dimms = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL); | 380 | mci->dimms = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL); |
379 | if (!mci->dimms) | 381 | if (!mci->dimms) |
380 | goto error; | 382 | goto error; |
381 | 383 | ||
382 | memset(&pos, 0, sizeof(pos)); | 384 | memset(&pos, 0, sizeof(pos)); |
383 | row = 0; | 385 | row = 0; |
384 | chn = 0; | 386 | chn = 0; |
385 | for (i = 0; i < tot_dimms; i++) { | 387 | for (i = 0; i < tot_dimms; i++) { |
386 | chan = mci->csrows[row]->channels[chn]; | 388 | chan = mci->csrows[row]->channels[chn]; |
387 | off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]); | 389 | off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]); |
388 | if (off < 0 || off >= tot_dimms) { | 390 | if (off < 0 || off >= tot_dimms) { |
389 | edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n"); | 391 | edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n"); |
390 | goto error; | 392 | goto error; |
391 | } | 393 | } |
392 | 394 | ||
393 | dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL); | 395 | dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL); |
394 | if (!dimm) | 396 | if (!dimm) |
395 | goto error; | 397 | goto error; |
396 | mci->dimms[off] = dimm; | 398 | mci->dimms[off] = dimm; |
397 | dimm->mci = mci; | 399 | dimm->mci = mci; |
398 | 400 | ||
399 | /* | 401 | /* |
400 | * Copy DIMM location and initialize it. | 402 | * Copy DIMM location and initialize it. |
401 | */ | 403 | */ |
402 | len = sizeof(dimm->label); | 404 | len = sizeof(dimm->label); |
403 | p = dimm->label; | 405 | p = dimm->label; |
404 | n = snprintf(p, len, "mc#%u", mc_num); | 406 | n = snprintf(p, len, "mc#%u", mc_num); |
405 | p += n; | 407 | p += n; |
406 | len -= n; | 408 | len -= n; |
407 | for (j = 0; j < n_layers; j++) { | 409 | for (j = 0; j < n_layers; j++) { |
408 | n = snprintf(p, len, "%s#%u", | 410 | n = snprintf(p, len, "%s#%u", |
409 | edac_layer_name[layers[j].type], | 411 | edac_layer_name[layers[j].type], |
410 | pos[j]); | 412 | pos[j]); |
411 | p += n; | 413 | p += n; |
412 | len -= n; | 414 | len -= n; |
413 | dimm->location[j] = pos[j]; | 415 | dimm->location[j] = pos[j]; |
414 | 416 | ||
415 | if (len <= 0) | 417 | if (len <= 0) |
416 | break; | 418 | break; |
417 | } | 419 | } |
418 | 420 | ||
419 | /* Link it to the csrows old API data */ | 421 | /* Link it to the csrows old API data */ |
420 | chan->dimm = dimm; | 422 | chan->dimm = dimm; |
421 | dimm->csrow = row; | 423 | dimm->csrow = row; |
422 | dimm->cschannel = chn; | 424 | dimm->cschannel = chn; |
423 | 425 | ||
424 | /* Increment csrow location */ | 426 | /* Increment csrow location */ |
425 | if (layers[0].is_virt_csrow) { | 427 | if (layers[0].is_virt_csrow) { |
426 | chn++; | 428 | chn++; |
427 | if (chn == tot_channels) { | 429 | if (chn == tot_channels) { |
428 | chn = 0; | 430 | chn = 0; |
429 | row++; | 431 | row++; |
430 | } | 432 | } |
431 | } else { | 433 | } else { |
432 | row++; | 434 | row++; |
433 | if (row == tot_csrows) { | 435 | if (row == tot_csrows) { |
434 | row = 0; | 436 | row = 0; |
435 | chn++; | 437 | chn++; |
436 | } | 438 | } |
437 | } | 439 | } |
438 | 440 | ||
439 | /* Increment dimm location */ | 441 | /* Increment dimm location */ |
440 | for (j = n_layers - 1; j >= 0; j--) { | 442 | for (j = n_layers - 1; j >= 0; j--) { |
441 | pos[j]++; | 443 | pos[j]++; |
442 | if (pos[j] < layers[j].size) | 444 | if (pos[j] < layers[j].size) |
443 | break; | 445 | break; |
444 | pos[j] = 0; | 446 | pos[j] = 0; |
445 | } | 447 | } |
446 | } | 448 | } |
447 | 449 | ||
448 | mci->op_state = OP_ALLOC; | 450 | mci->op_state = OP_ALLOC; |
449 | 451 | ||
450 | return mci; | 452 | return mci; |
451 | 453 | ||
452 | error: | 454 | error: |
453 | _edac_mc_free(mci); | 455 | _edac_mc_free(mci); |
454 | 456 | ||
455 | return NULL; | 457 | return NULL; |
456 | } | 458 | } |
457 | EXPORT_SYMBOL_GPL(edac_mc_alloc); | 459 | EXPORT_SYMBOL_GPL(edac_mc_alloc); |
458 | 460 | ||
459 | /** | 461 | /** |
460 | * edac_mc_free | 462 | * edac_mc_free |
461 | * 'Free' a previously allocated 'mci' structure | 463 | * 'Free' a previously allocated 'mci' structure |
462 | * @mci: pointer to a struct mem_ctl_info structure | 464 | * @mci: pointer to a struct mem_ctl_info structure |
463 | */ | 465 | */ |
464 | void edac_mc_free(struct mem_ctl_info *mci) | 466 | void edac_mc_free(struct mem_ctl_info *mci) |
465 | { | 467 | { |
466 | edac_dbg(1, "\n"); | 468 | edac_dbg(1, "\n"); |
467 | 469 | ||
468 | /* If we're not yet registered with sysfs free only what was allocated | 470 | /* If we're not yet registered with sysfs free only what was allocated |
469 | * in edac_mc_alloc(). | 471 | * in edac_mc_alloc(). |
470 | */ | 472 | */ |
471 | if (!device_is_registered(&mci->dev)) { | 473 | if (!device_is_registered(&mci->dev)) { |
472 | _edac_mc_free(mci); | 474 | _edac_mc_free(mci); |
473 | return; | 475 | return; |
474 | } | 476 | } |
475 | 477 | ||
476 | /* the mci instance is freed here, when the sysfs object is dropped */ | 478 | /* the mci instance is freed here, when the sysfs object is dropped */ |
477 | edac_unregister_sysfs(mci); | 479 | edac_unregister_sysfs(mci); |
478 | } | 480 | } |
479 | EXPORT_SYMBOL_GPL(edac_mc_free); | 481 | EXPORT_SYMBOL_GPL(edac_mc_free); |
480 | 482 | ||
481 | 483 | ||
482 | /** | 484 | /** |
483 | * find_mci_by_dev | 485 | * find_mci_by_dev |
484 | * | 486 | * |
485 | * scan list of controllers looking for the one that manages | 487 | * scan list of controllers looking for the one that manages |
486 | * the 'dev' device | 488 | * the 'dev' device |
487 | * @dev: pointer to a struct device related with the MCI | 489 | * @dev: pointer to a struct device related with the MCI |
488 | */ | 490 | */ |
489 | struct mem_ctl_info *find_mci_by_dev(struct device *dev) | 491 | struct mem_ctl_info *find_mci_by_dev(struct device *dev) |
490 | { | 492 | { |
491 | struct mem_ctl_info *mci; | 493 | struct mem_ctl_info *mci; |
492 | struct list_head *item; | 494 | struct list_head *item; |
493 | 495 | ||
494 | edac_dbg(3, "\n"); | 496 | edac_dbg(3, "\n"); |
495 | 497 | ||
496 | list_for_each(item, &mc_devices) { | 498 | list_for_each(item, &mc_devices) { |
497 | mci = list_entry(item, struct mem_ctl_info, link); | 499 | mci = list_entry(item, struct mem_ctl_info, link); |
498 | 500 | ||
499 | if (mci->pdev == dev) | 501 | if (mci->pdev == dev) |
500 | return mci; | 502 | return mci; |
501 | } | 503 | } |
502 | 504 | ||
503 | return NULL; | 505 | return NULL; |
504 | } | 506 | } |
505 | EXPORT_SYMBOL_GPL(find_mci_by_dev); | 507 | EXPORT_SYMBOL_GPL(find_mci_by_dev); |
506 | 508 | ||
507 | /* | 509 | /* |
508 | * handler for EDAC to check if NMI type handler has asserted interrupt | 510 | * handler for EDAC to check if NMI type handler has asserted interrupt |
509 | */ | 511 | */ |
510 | static int edac_mc_assert_error_check_and_clear(void) | 512 | static int edac_mc_assert_error_check_and_clear(void) |
511 | { | 513 | { |
512 | int old_state; | 514 | int old_state; |
513 | 515 | ||
514 | if (edac_op_state == EDAC_OPSTATE_POLL) | 516 | if (edac_op_state == EDAC_OPSTATE_POLL) |
515 | return 1; | 517 | return 1; |
516 | 518 | ||
517 | old_state = edac_err_assert; | 519 | old_state = edac_err_assert; |
518 | edac_err_assert = 0; | 520 | edac_err_assert = 0; |
519 | 521 | ||
520 | return old_state; | 522 | return old_state; |
521 | } | 523 | } |
522 | 524 | ||
523 | /* | 525 | /* |
524 | * edac_mc_workq_function | 526 | * edac_mc_workq_function |
525 | * performs the operation scheduled by a workq request | 527 | * performs the operation scheduled by a workq request |
526 | */ | 528 | */ |
527 | static void edac_mc_workq_function(struct work_struct *work_req) | 529 | static void edac_mc_workq_function(struct work_struct *work_req) |
528 | { | 530 | { |
529 | struct delayed_work *d_work = to_delayed_work(work_req); | 531 | struct delayed_work *d_work = to_delayed_work(work_req); |
530 | struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work); | 532 | struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work); |
531 | 533 | ||
532 | mutex_lock(&mem_ctls_mutex); | 534 | mutex_lock(&mem_ctls_mutex); |
533 | 535 | ||
534 | /* if this control struct has movd to offline state, we are done */ | 536 | /* if this control struct has movd to offline state, we are done */ |
535 | if (mci->op_state == OP_OFFLINE) { | 537 | if (mci->op_state == OP_OFFLINE) { |
536 | mutex_unlock(&mem_ctls_mutex); | 538 | mutex_unlock(&mem_ctls_mutex); |
537 | return; | 539 | return; |
538 | } | 540 | } |
539 | 541 | ||
540 | /* Only poll controllers that are running polled and have a check */ | 542 | /* Only poll controllers that are running polled and have a check */ |
541 | if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL)) | 543 | if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL)) |
542 | mci->edac_check(mci); | 544 | mci->edac_check(mci); |
543 | 545 | ||
544 | mutex_unlock(&mem_ctls_mutex); | 546 | mutex_unlock(&mem_ctls_mutex); |
545 | 547 | ||
546 | /* Reschedule */ | 548 | /* Reschedule */ |
547 | queue_delayed_work(edac_workqueue, &mci->work, | 549 | queue_delayed_work(edac_workqueue, &mci->work, |
548 | msecs_to_jiffies(edac_mc_get_poll_msec())); | 550 | msecs_to_jiffies(edac_mc_get_poll_msec())); |
549 | } | 551 | } |
550 | 552 | ||
551 | /* | 553 | /* |
552 | * edac_mc_workq_setup | 554 | * edac_mc_workq_setup |
553 | * initialize a workq item for this mci | 555 | * initialize a workq item for this mci |
554 | * passing in the new delay period in msec | 556 | * passing in the new delay period in msec |
555 | * | 557 | * |
556 | * locking model: | 558 | * locking model: |
557 | * | 559 | * |
558 | * called with the mem_ctls_mutex held | 560 | * called with the mem_ctls_mutex held |
559 | */ | 561 | */ |
560 | static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) | 562 | static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) |
561 | { | 563 | { |
562 | edac_dbg(0, "\n"); | 564 | edac_dbg(0, "\n"); |
563 | 565 | ||
564 | /* if this instance is not in the POLL state, then simply return */ | 566 | /* if this instance is not in the POLL state, then simply return */ |
565 | if (mci->op_state != OP_RUNNING_POLL) | 567 | if (mci->op_state != OP_RUNNING_POLL) |
566 | return; | 568 | return; |
567 | 569 | ||
568 | INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); | 570 | INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); |
569 | mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); | 571 | mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); |
570 | } | 572 | } |
571 | 573 | ||
572 | /* | 574 | /* |
573 | * edac_mc_workq_teardown | 575 | * edac_mc_workq_teardown |
574 | * stop the workq processing on this mci | 576 | * stop the workq processing on this mci |
575 | * | 577 | * |
576 | * locking model: | 578 | * locking model: |
577 | * | 579 | * |
578 | * called WITHOUT lock held | 580 | * called WITHOUT lock held |
579 | */ | 581 | */ |
580 | static void edac_mc_workq_teardown(struct mem_ctl_info *mci) | 582 | static void edac_mc_workq_teardown(struct mem_ctl_info *mci) |
581 | { | 583 | { |
582 | int status; | 584 | int status; |
583 | 585 | ||
584 | if (mci->op_state != OP_RUNNING_POLL) | 586 | if (mci->op_state != OP_RUNNING_POLL) |
585 | return; | 587 | return; |
586 | 588 | ||
587 | status = cancel_delayed_work(&mci->work); | 589 | status = cancel_delayed_work(&mci->work); |
588 | if (status == 0) { | 590 | if (status == 0) { |
589 | edac_dbg(0, "not canceled, flush the queue\n"); | 591 | edac_dbg(0, "not canceled, flush the queue\n"); |
590 | 592 | ||
591 | /* workq instance might be running, wait for it */ | 593 | /* workq instance might be running, wait for it */ |
592 | flush_workqueue(edac_workqueue); | 594 | flush_workqueue(edac_workqueue); |
593 | } | 595 | } |
594 | } | 596 | } |
595 | 597 | ||
596 | /* | 598 | /* |
597 | * edac_mc_reset_delay_period(unsigned long value) | 599 | * edac_mc_reset_delay_period(unsigned long value) |
598 | * | 600 | * |
599 | * user space has updated our poll period value, need to | 601 | * user space has updated our poll period value, need to |
600 | * reset our workq delays | 602 | * reset our workq delays |
601 | */ | 603 | */ |
602 | void edac_mc_reset_delay_period(int value) | 604 | void edac_mc_reset_delay_period(int value) |
603 | { | 605 | { |
604 | struct mem_ctl_info *mci; | 606 | struct mem_ctl_info *mci; |
605 | struct list_head *item; | 607 | struct list_head *item; |
606 | 608 | ||
607 | mutex_lock(&mem_ctls_mutex); | 609 | mutex_lock(&mem_ctls_mutex); |
608 | 610 | ||
609 | list_for_each(item, &mc_devices) { | 611 | list_for_each(item, &mc_devices) { |
610 | mci = list_entry(item, struct mem_ctl_info, link); | 612 | mci = list_entry(item, struct mem_ctl_info, link); |
611 | 613 | ||
612 | edac_mc_workq_setup(mci, (unsigned long) value); | 614 | edac_mc_workq_setup(mci, (unsigned long) value); |
613 | } | 615 | } |
614 | 616 | ||
615 | mutex_unlock(&mem_ctls_mutex); | 617 | mutex_unlock(&mem_ctls_mutex); |
616 | } | 618 | } |
617 | 619 | ||
618 | 620 | ||
619 | 621 | ||
620 | /* Return 0 on success, 1 on failure. | 622 | /* Return 0 on success, 1 on failure. |
621 | * Before calling this function, caller must | 623 | * Before calling this function, caller must |
622 | * assign a unique value to mci->mc_idx. | 624 | * assign a unique value to mci->mc_idx. |
623 | * | 625 | * |
624 | * locking model: | 626 | * locking model: |
625 | * | 627 | * |
626 | * called with the mem_ctls_mutex lock held | 628 | * called with the mem_ctls_mutex lock held |
627 | */ | 629 | */ |
628 | static int add_mc_to_global_list(struct mem_ctl_info *mci) | 630 | static int add_mc_to_global_list(struct mem_ctl_info *mci) |
629 | { | 631 | { |
630 | struct list_head *item, *insert_before; | 632 | struct list_head *item, *insert_before; |
631 | struct mem_ctl_info *p; | 633 | struct mem_ctl_info *p; |
632 | 634 | ||
633 | insert_before = &mc_devices; | 635 | insert_before = &mc_devices; |
634 | 636 | ||
635 | p = find_mci_by_dev(mci->pdev); | 637 | p = find_mci_by_dev(mci->pdev); |
636 | if (unlikely(p != NULL)) | 638 | if (unlikely(p != NULL)) |
637 | goto fail0; | 639 | goto fail0; |
638 | 640 | ||
639 | list_for_each(item, &mc_devices) { | 641 | list_for_each(item, &mc_devices) { |
640 | p = list_entry(item, struct mem_ctl_info, link); | 642 | p = list_entry(item, struct mem_ctl_info, link); |
641 | 643 | ||
642 | if (p->mc_idx >= mci->mc_idx) { | 644 | if (p->mc_idx >= mci->mc_idx) { |
643 | if (unlikely(p->mc_idx == mci->mc_idx)) | 645 | if (unlikely(p->mc_idx == mci->mc_idx)) |
644 | goto fail1; | 646 | goto fail1; |
645 | 647 | ||
646 | insert_before = item; | 648 | insert_before = item; |
647 | break; | 649 | break; |
648 | } | 650 | } |
649 | } | 651 | } |
650 | 652 | ||
651 | list_add_tail_rcu(&mci->link, insert_before); | 653 | list_add_tail_rcu(&mci->link, insert_before); |
652 | atomic_inc(&edac_handlers); | 654 | atomic_inc(&edac_handlers); |
653 | return 0; | 655 | return 0; |
654 | 656 | ||
655 | fail0: | 657 | fail0: |
656 | edac_printk(KERN_WARNING, EDAC_MC, | 658 | edac_printk(KERN_WARNING, EDAC_MC, |
657 | "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev), | 659 | "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev), |
658 | edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx); | 660 | edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx); |
659 | return 1; | 661 | return 1; |
660 | 662 | ||
661 | fail1: | 663 | fail1: |
662 | edac_printk(KERN_WARNING, EDAC_MC, | 664 | edac_printk(KERN_WARNING, EDAC_MC, |
663 | "bug in low-level driver: attempt to assign\n" | 665 | "bug in low-level driver: attempt to assign\n" |
664 | " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__); | 666 | " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__); |
665 | return 1; | 667 | return 1; |
666 | } | 668 | } |
667 | 669 | ||
668 | static int del_mc_from_global_list(struct mem_ctl_info *mci) | 670 | static int del_mc_from_global_list(struct mem_ctl_info *mci) |
669 | { | 671 | { |
670 | int handlers = atomic_dec_return(&edac_handlers); | 672 | int handlers = atomic_dec_return(&edac_handlers); |
671 | list_del_rcu(&mci->link); | 673 | list_del_rcu(&mci->link); |
672 | 674 | ||
673 | /* these are for safe removal of devices from global list while | 675 | /* these are for safe removal of devices from global list while |
674 | * NMI handlers may be traversing list | 676 | * NMI handlers may be traversing list |
675 | */ | 677 | */ |
676 | synchronize_rcu(); | 678 | synchronize_rcu(); |
677 | INIT_LIST_HEAD(&mci->link); | 679 | INIT_LIST_HEAD(&mci->link); |
678 | 680 | ||
679 | return handlers; | 681 | return handlers; |
680 | } | 682 | } |
681 | 683 | ||
682 | /** | 684 | /** |
683 | * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'. | 685 | * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'. |
684 | * | 686 | * |
685 | * If found, return a pointer to the structure. | 687 | * If found, return a pointer to the structure. |
686 | * Else return NULL. | 688 | * Else return NULL. |
687 | * | 689 | * |
688 | * Caller must hold mem_ctls_mutex. | 690 | * Caller must hold mem_ctls_mutex. |
689 | */ | 691 | */ |
690 | struct mem_ctl_info *edac_mc_find(int idx) | 692 | struct mem_ctl_info *edac_mc_find(int idx) |
691 | { | 693 | { |
692 | struct list_head *item; | 694 | struct list_head *item; |
693 | struct mem_ctl_info *mci; | 695 | struct mem_ctl_info *mci; |
694 | 696 | ||
695 | list_for_each(item, &mc_devices) { | 697 | list_for_each(item, &mc_devices) { |
696 | mci = list_entry(item, struct mem_ctl_info, link); | 698 | mci = list_entry(item, struct mem_ctl_info, link); |
697 | 699 | ||
698 | if (mci->mc_idx >= idx) { | 700 | if (mci->mc_idx >= idx) { |
699 | if (mci->mc_idx == idx) | 701 | if (mci->mc_idx == idx) |
700 | return mci; | 702 | return mci; |
701 | 703 | ||
702 | break; | 704 | break; |
703 | } | 705 | } |
704 | } | 706 | } |
705 | 707 | ||
706 | return NULL; | 708 | return NULL; |
707 | } | 709 | } |
708 | EXPORT_SYMBOL(edac_mc_find); | 710 | EXPORT_SYMBOL(edac_mc_find); |
709 | 711 | ||
710 | /** | 712 | /** |
711 | * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and | 713 | * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and |
712 | * create sysfs entries associated with mci structure | 714 | * create sysfs entries associated with mci structure |
713 | * @mci: pointer to the mci structure to be added to the list | 715 | * @mci: pointer to the mci structure to be added to the list |
714 | * | 716 | * |
715 | * Return: | 717 | * Return: |
716 | * 0 Success | 718 | * 0 Success |
717 | * !0 Failure | 719 | * !0 Failure |
718 | */ | 720 | */ |
719 | 721 | ||
720 | /* FIXME - should a warning be printed if no error detection? correction? */ | 722 | /* FIXME - should a warning be printed if no error detection? correction? */ |
721 | int edac_mc_add_mc(struct mem_ctl_info *mci) | 723 | int edac_mc_add_mc(struct mem_ctl_info *mci) |
722 | { | 724 | { |
723 | int ret = -EINVAL; | 725 | int ret = -EINVAL; |
724 | edac_dbg(0, "\n"); | 726 | edac_dbg(0, "\n"); |
725 | 727 | ||
728 | if (mci->mc_idx >= EDAC_MAX_MCS) { | ||
729 | pr_warn_once("Too many memory controllers: %d\n", mci->mc_idx); | ||
730 | return -ENODEV; | ||
731 | } | ||
732 | |||
726 | #ifdef CONFIG_EDAC_DEBUG | 733 | #ifdef CONFIG_EDAC_DEBUG |
727 | if (edac_debug_level >= 3) | 734 | if (edac_debug_level >= 3) |
728 | edac_mc_dump_mci(mci); | 735 | edac_mc_dump_mci(mci); |
729 | 736 | ||
730 | if (edac_debug_level >= 4) { | 737 | if (edac_debug_level >= 4) { |
731 | int i; | 738 | int i; |
732 | 739 | ||
733 | for (i = 0; i < mci->nr_csrows; i++) { | 740 | for (i = 0; i < mci->nr_csrows; i++) { |
734 | struct csrow_info *csrow = mci->csrows[i]; | 741 | struct csrow_info *csrow = mci->csrows[i]; |
735 | u32 nr_pages = 0; | 742 | u32 nr_pages = 0; |
736 | int j; | 743 | int j; |
737 | 744 | ||
738 | for (j = 0; j < csrow->nr_channels; j++) | 745 | for (j = 0; j < csrow->nr_channels; j++) |
739 | nr_pages += csrow->channels[j]->dimm->nr_pages; | 746 | nr_pages += csrow->channels[j]->dimm->nr_pages; |
740 | if (!nr_pages) | 747 | if (!nr_pages) |
741 | continue; | 748 | continue; |
742 | edac_mc_dump_csrow(csrow); | 749 | edac_mc_dump_csrow(csrow); |
743 | for (j = 0; j < csrow->nr_channels; j++) | 750 | for (j = 0; j < csrow->nr_channels; j++) |
744 | if (csrow->channels[j]->dimm->nr_pages) | 751 | if (csrow->channels[j]->dimm->nr_pages) |
745 | edac_mc_dump_channel(csrow->channels[j]); | 752 | edac_mc_dump_channel(csrow->channels[j]); |
746 | } | 753 | } |
747 | for (i = 0; i < mci->tot_dimms; i++) | 754 | for (i = 0; i < mci->tot_dimms; i++) |
748 | if (mci->dimms[i]->nr_pages) | 755 | if (mci->dimms[i]->nr_pages) |
749 | edac_mc_dump_dimm(mci->dimms[i], i); | 756 | edac_mc_dump_dimm(mci->dimms[i], i); |
750 | } | 757 | } |
751 | #endif | 758 | #endif |
752 | mutex_lock(&mem_ctls_mutex); | 759 | mutex_lock(&mem_ctls_mutex); |
753 | 760 | ||
754 | if (edac_mc_owner && edac_mc_owner != mci->mod_name) { | 761 | if (edac_mc_owner && edac_mc_owner != mci->mod_name) { |
755 | ret = -EPERM; | 762 | ret = -EPERM; |
756 | goto fail0; | 763 | goto fail0; |
757 | } | 764 | } |
758 | 765 | ||
759 | if (add_mc_to_global_list(mci)) | 766 | if (add_mc_to_global_list(mci)) |
760 | goto fail0; | 767 | goto fail0; |
761 | 768 | ||
762 | /* set load time so that error rate can be tracked */ | 769 | /* set load time so that error rate can be tracked */ |
763 | mci->start_time = jiffies; | 770 | mci->start_time = jiffies; |
771 | |||
772 | mci->bus = &mc_bus[mci->mc_idx]; | ||
764 | 773 | ||
765 | if (edac_create_sysfs_mci_device(mci)) { | 774 | if (edac_create_sysfs_mci_device(mci)) { |
766 | edac_mc_printk(mci, KERN_WARNING, | 775 | edac_mc_printk(mci, KERN_WARNING, |
767 | "failed to create sysfs device\n"); | 776 | "failed to create sysfs device\n"); |
768 | goto fail1; | 777 | goto fail1; |
769 | } | 778 | } |
770 | 779 | ||
771 | /* If there IS a check routine, then we are running POLLED */ | 780 | /* If there IS a check routine, then we are running POLLED */ |
772 | if (mci->edac_check != NULL) { | 781 | if (mci->edac_check != NULL) { |
773 | /* This instance is NOW RUNNING */ | 782 | /* This instance is NOW RUNNING */ |
774 | mci->op_state = OP_RUNNING_POLL; | 783 | mci->op_state = OP_RUNNING_POLL; |
775 | 784 | ||
776 | edac_mc_workq_setup(mci, edac_mc_get_poll_msec()); | 785 | edac_mc_workq_setup(mci, edac_mc_get_poll_msec()); |
777 | } else { | 786 | } else { |
778 | mci->op_state = OP_RUNNING_INTERRUPT; | 787 | mci->op_state = OP_RUNNING_INTERRUPT; |
779 | } | 788 | } |
780 | 789 | ||
781 | /* Report action taken */ | 790 | /* Report action taken */ |
782 | edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':" | 791 | edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':" |
783 | " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci)); | 792 | " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci)); |
784 | 793 | ||
785 | edac_mc_owner = mci->mod_name; | 794 | edac_mc_owner = mci->mod_name; |
786 | 795 | ||
787 | mutex_unlock(&mem_ctls_mutex); | 796 | mutex_unlock(&mem_ctls_mutex); |
788 | return 0; | 797 | return 0; |
789 | 798 | ||
790 | fail1: | 799 | fail1: |
791 | del_mc_from_global_list(mci); | 800 | del_mc_from_global_list(mci); |
792 | 801 | ||
793 | fail0: | 802 | fail0: |
794 | mutex_unlock(&mem_ctls_mutex); | 803 | mutex_unlock(&mem_ctls_mutex); |
795 | return ret; | 804 | return ret; |
796 | } | 805 | } |
797 | EXPORT_SYMBOL_GPL(edac_mc_add_mc); | 806 | EXPORT_SYMBOL_GPL(edac_mc_add_mc); |
798 | 807 | ||
799 | /** | 808 | /** |
800 | * edac_mc_del_mc: Remove sysfs entries for specified mci structure and | 809 | * edac_mc_del_mc: Remove sysfs entries for specified mci structure and |
801 | * remove mci structure from global list | 810 | * remove mci structure from global list |
802 | * @pdev: Pointer to 'struct device' representing mci structure to remove. | 811 | * @pdev: Pointer to 'struct device' representing mci structure to remove. |
803 | * | 812 | * |
804 | * Return pointer to removed mci structure, or NULL if device not found. | 813 | * Return pointer to removed mci structure, or NULL if device not found. |
805 | */ | 814 | */ |
806 | struct mem_ctl_info *edac_mc_del_mc(struct device *dev) | 815 | struct mem_ctl_info *edac_mc_del_mc(struct device *dev) |
807 | { | 816 | { |
808 | struct mem_ctl_info *mci; | 817 | struct mem_ctl_info *mci; |
809 | 818 | ||
810 | edac_dbg(0, "\n"); | 819 | edac_dbg(0, "\n"); |
811 | 820 | ||
812 | mutex_lock(&mem_ctls_mutex); | 821 | mutex_lock(&mem_ctls_mutex); |
813 | 822 | ||
814 | /* find the requested mci struct in the global list */ | 823 | /* find the requested mci struct in the global list */ |
815 | mci = find_mci_by_dev(dev); | 824 | mci = find_mci_by_dev(dev); |
816 | if (mci == NULL) { | 825 | if (mci == NULL) { |
817 | mutex_unlock(&mem_ctls_mutex); | 826 | mutex_unlock(&mem_ctls_mutex); |
818 | return NULL; | 827 | return NULL; |
819 | } | 828 | } |
820 | 829 | ||
821 | if (!del_mc_from_global_list(mci)) | 830 | if (!del_mc_from_global_list(mci)) |
822 | edac_mc_owner = NULL; | 831 | edac_mc_owner = NULL; |
823 | mutex_unlock(&mem_ctls_mutex); | 832 | mutex_unlock(&mem_ctls_mutex); |
824 | 833 | ||
825 | /* flush workq processes */ | 834 | /* flush workq processes */ |
826 | edac_mc_workq_teardown(mci); | 835 | edac_mc_workq_teardown(mci); |
827 | 836 | ||
828 | /* marking MCI offline */ | 837 | /* marking MCI offline */ |
829 | mci->op_state = OP_OFFLINE; | 838 | mci->op_state = OP_OFFLINE; |
830 | 839 | ||
831 | /* remove from sysfs */ | 840 | /* remove from sysfs */ |
832 | edac_remove_sysfs_mci_device(mci); | 841 | edac_remove_sysfs_mci_device(mci); |
833 | 842 | ||
834 | edac_printk(KERN_INFO, EDAC_MC, | 843 | edac_printk(KERN_INFO, EDAC_MC, |
835 | "Removed device %d for %s %s: DEV %s\n", mci->mc_idx, | 844 | "Removed device %d for %s %s: DEV %s\n", mci->mc_idx, |
836 | mci->mod_name, mci->ctl_name, edac_dev_name(mci)); | 845 | mci->mod_name, mci->ctl_name, edac_dev_name(mci)); |
837 | 846 | ||
838 | return mci; | 847 | return mci; |
839 | } | 848 | } |
840 | EXPORT_SYMBOL_GPL(edac_mc_del_mc); | 849 | EXPORT_SYMBOL_GPL(edac_mc_del_mc); |
841 | 850 | ||
842 | static void edac_mc_scrub_block(unsigned long page, unsigned long offset, | 851 | static void edac_mc_scrub_block(unsigned long page, unsigned long offset, |
843 | u32 size) | 852 | u32 size) |
844 | { | 853 | { |
845 | struct page *pg; | 854 | struct page *pg; |
846 | void *virt_addr; | 855 | void *virt_addr; |
847 | unsigned long flags = 0; | 856 | unsigned long flags = 0; |
848 | 857 | ||
849 | edac_dbg(3, "\n"); | 858 | edac_dbg(3, "\n"); |
850 | 859 | ||
851 | /* ECC error page was not in our memory. Ignore it. */ | 860 | /* ECC error page was not in our memory. Ignore it. */ |
852 | if (!pfn_valid(page)) | 861 | if (!pfn_valid(page)) |
853 | return; | 862 | return; |
854 | 863 | ||
855 | /* Find the actual page structure then map it and fix */ | 864 | /* Find the actual page structure then map it and fix */ |
856 | pg = pfn_to_page(page); | 865 | pg = pfn_to_page(page); |
857 | 866 | ||
858 | if (PageHighMem(pg)) | 867 | if (PageHighMem(pg)) |
859 | local_irq_save(flags); | 868 | local_irq_save(flags); |
860 | 869 | ||
861 | virt_addr = kmap_atomic(pg); | 870 | virt_addr = kmap_atomic(pg); |
862 | 871 | ||
863 | /* Perform architecture specific atomic scrub operation */ | 872 | /* Perform architecture specific atomic scrub operation */ |
864 | atomic_scrub(virt_addr + offset, size); | 873 | atomic_scrub(virt_addr + offset, size); |
865 | 874 | ||
866 | /* Unmap and complete */ | 875 | /* Unmap and complete */ |
867 | kunmap_atomic(virt_addr); | 876 | kunmap_atomic(virt_addr); |
868 | 877 | ||
869 | if (PageHighMem(pg)) | 878 | if (PageHighMem(pg)) |
870 | local_irq_restore(flags); | 879 | local_irq_restore(flags); |
871 | } | 880 | } |
872 | 881 | ||
873 | /* FIXME - should return -1 */ | 882 | /* FIXME - should return -1 */ |
874 | int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) | 883 | int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) |
875 | { | 884 | { |
876 | struct csrow_info **csrows = mci->csrows; | 885 | struct csrow_info **csrows = mci->csrows; |
877 | int row, i, j, n; | 886 | int row, i, j, n; |
878 | 887 | ||
879 | edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page); | 888 | edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page); |
880 | row = -1; | 889 | row = -1; |
881 | 890 | ||
882 | for (i = 0; i < mci->nr_csrows; i++) { | 891 | for (i = 0; i < mci->nr_csrows; i++) { |
883 | struct csrow_info *csrow = csrows[i]; | 892 | struct csrow_info *csrow = csrows[i]; |
884 | n = 0; | 893 | n = 0; |
885 | for (j = 0; j < csrow->nr_channels; j++) { | 894 | for (j = 0; j < csrow->nr_channels; j++) { |
886 | struct dimm_info *dimm = csrow->channels[j]->dimm; | 895 | struct dimm_info *dimm = csrow->channels[j]->dimm; |
887 | n += dimm->nr_pages; | 896 | n += dimm->nr_pages; |
888 | } | 897 | } |
889 | if (n == 0) | 898 | if (n == 0) |
890 | continue; | 899 | continue; |
891 | 900 | ||
892 | edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n", | 901 | edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n", |
893 | mci->mc_idx, | 902 | mci->mc_idx, |
894 | csrow->first_page, page, csrow->last_page, | 903 | csrow->first_page, page, csrow->last_page, |
895 | csrow->page_mask); | 904 | csrow->page_mask); |
896 | 905 | ||
897 | if ((page >= csrow->first_page) && | 906 | if ((page >= csrow->first_page) && |
898 | (page <= csrow->last_page) && | 907 | (page <= csrow->last_page) && |
899 | ((page & csrow->page_mask) == | 908 | ((page & csrow->page_mask) == |
900 | (csrow->first_page & csrow->page_mask))) { | 909 | (csrow->first_page & csrow->page_mask))) { |
901 | row = i; | 910 | row = i; |
902 | break; | 911 | break; |
903 | } | 912 | } |
904 | } | 913 | } |
905 | 914 | ||
906 | if (row == -1) | 915 | if (row == -1) |
907 | edac_mc_printk(mci, KERN_ERR, | 916 | edac_mc_printk(mci, KERN_ERR, |
908 | "could not look up page error address %lx\n", | 917 | "could not look up page error address %lx\n", |
909 | (unsigned long)page); | 918 | (unsigned long)page); |
910 | 919 | ||
911 | return row; | 920 | return row; |
912 | } | 921 | } |
913 | EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page); | 922 | EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page); |
914 | 923 | ||
915 | const char *edac_layer_name[] = { | 924 | const char *edac_layer_name[] = { |
916 | [EDAC_MC_LAYER_BRANCH] = "branch", | 925 | [EDAC_MC_LAYER_BRANCH] = "branch", |
917 | [EDAC_MC_LAYER_CHANNEL] = "channel", | 926 | [EDAC_MC_LAYER_CHANNEL] = "channel", |
918 | [EDAC_MC_LAYER_SLOT] = "slot", | 927 | [EDAC_MC_LAYER_SLOT] = "slot", |
919 | [EDAC_MC_LAYER_CHIP_SELECT] = "csrow", | 928 | [EDAC_MC_LAYER_CHIP_SELECT] = "csrow", |
920 | [EDAC_MC_LAYER_ALL_MEM] = "memory", | 929 | [EDAC_MC_LAYER_ALL_MEM] = "memory", |
921 | }; | 930 | }; |
922 | EXPORT_SYMBOL_GPL(edac_layer_name); | 931 | EXPORT_SYMBOL_GPL(edac_layer_name); |
923 | 932 | ||
924 | static void edac_inc_ce_error(struct mem_ctl_info *mci, | 933 | static void edac_inc_ce_error(struct mem_ctl_info *mci, |
925 | bool enable_per_layer_report, | 934 | bool enable_per_layer_report, |
926 | const int pos[EDAC_MAX_LAYERS], | 935 | const int pos[EDAC_MAX_LAYERS], |
927 | const u16 count) | 936 | const u16 count) |
928 | { | 937 | { |
929 | int i, index = 0; | 938 | int i, index = 0; |
930 | 939 | ||
931 | mci->ce_mc += count; | 940 | mci->ce_mc += count; |
932 | 941 | ||
933 | if (!enable_per_layer_report) { | 942 | if (!enable_per_layer_report) { |
934 | mci->ce_noinfo_count += count; | 943 | mci->ce_noinfo_count += count; |
935 | return; | 944 | return; |
936 | } | 945 | } |
937 | 946 | ||
938 | for (i = 0; i < mci->n_layers; i++) { | 947 | for (i = 0; i < mci->n_layers; i++) { |
939 | if (pos[i] < 0) | 948 | if (pos[i] < 0) |
940 | break; | 949 | break; |
941 | index += pos[i]; | 950 | index += pos[i]; |
942 | mci->ce_per_layer[i][index] += count; | 951 | mci->ce_per_layer[i][index] += count; |
943 | 952 | ||
944 | if (i < mci->n_layers - 1) | 953 | if (i < mci->n_layers - 1) |
945 | index *= mci->layers[i + 1].size; | 954 | index *= mci->layers[i + 1].size; |
946 | } | 955 | } |
947 | } | 956 | } |
948 | 957 | ||
949 | static void edac_inc_ue_error(struct mem_ctl_info *mci, | 958 | static void edac_inc_ue_error(struct mem_ctl_info *mci, |
950 | bool enable_per_layer_report, | 959 | bool enable_per_layer_report, |
951 | const int pos[EDAC_MAX_LAYERS], | 960 | const int pos[EDAC_MAX_LAYERS], |
952 | const u16 count) | 961 | const u16 count) |
953 | { | 962 | { |
954 | int i, index = 0; | 963 | int i, index = 0; |
955 | 964 | ||
956 | mci->ue_mc += count; | 965 | mci->ue_mc += count; |
957 | 966 | ||
958 | if (!enable_per_layer_report) { | 967 | if (!enable_per_layer_report) { |
959 | mci->ce_noinfo_count += count; | 968 | mci->ce_noinfo_count += count; |
960 | return; | 969 | return; |
961 | } | 970 | } |
962 | 971 | ||
963 | for (i = 0; i < mci->n_layers; i++) { | 972 | for (i = 0; i < mci->n_layers; i++) { |
964 | if (pos[i] < 0) | 973 | if (pos[i] < 0) |
965 | break; | 974 | break; |
966 | index += pos[i]; | 975 | index += pos[i]; |
967 | mci->ue_per_layer[i][index] += count; | 976 | mci->ue_per_layer[i][index] += count; |
968 | 977 | ||
969 | if (i < mci->n_layers - 1) | 978 | if (i < mci->n_layers - 1) |
970 | index *= mci->layers[i + 1].size; | 979 | index *= mci->layers[i + 1].size; |
971 | } | 980 | } |
972 | } | 981 | } |
973 | 982 | ||
974 | static void edac_ce_error(struct mem_ctl_info *mci, | 983 | static void edac_ce_error(struct mem_ctl_info *mci, |
975 | const u16 error_count, | 984 | const u16 error_count, |
976 | const int pos[EDAC_MAX_LAYERS], | 985 | const int pos[EDAC_MAX_LAYERS], |
977 | const char *msg, | 986 | const char *msg, |
978 | const char *location, | 987 | const char *location, |
979 | const char *label, | 988 | const char *label, |
980 | const char *detail, | 989 | const char *detail, |
981 | const char *other_detail, | 990 | const char *other_detail, |
982 | const bool enable_per_layer_report, | 991 | const bool enable_per_layer_report, |
983 | const unsigned long page_frame_number, | 992 | const unsigned long page_frame_number, |
984 | const unsigned long offset_in_page, | 993 | const unsigned long offset_in_page, |
985 | long grain) | 994 | long grain) |
986 | { | 995 | { |
987 | unsigned long remapped_page; | 996 | unsigned long remapped_page; |
988 | char *msg_aux = ""; | 997 | char *msg_aux = ""; |
989 | 998 | ||
990 | if (*msg) | 999 | if (*msg) |
991 | msg_aux = " "; | 1000 | msg_aux = " "; |
992 | 1001 | ||
993 | if (edac_mc_get_log_ce()) { | 1002 | if (edac_mc_get_log_ce()) { |
994 | if (other_detail && *other_detail) | 1003 | if (other_detail && *other_detail) |
995 | edac_mc_printk(mci, KERN_WARNING, | 1004 | edac_mc_printk(mci, KERN_WARNING, |
996 | "%d CE %s%son %s (%s %s - %s)\n", | 1005 | "%d CE %s%son %s (%s %s - %s)\n", |
997 | error_count, msg, msg_aux, label, | 1006 | error_count, msg, msg_aux, label, |
998 | location, detail, other_detail); | 1007 | location, detail, other_detail); |
999 | else | 1008 | else |
1000 | edac_mc_printk(mci, KERN_WARNING, | 1009 | edac_mc_printk(mci, KERN_WARNING, |
1001 | "%d CE %s%son %s (%s %s)\n", | 1010 | "%d CE %s%son %s (%s %s)\n", |
1002 | error_count, msg, msg_aux, label, | 1011 | error_count, msg, msg_aux, label, |
1003 | location, detail); | 1012 | location, detail); |
1004 | } | 1013 | } |
1005 | edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count); | 1014 | edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count); |
1006 | 1015 | ||
1007 | if (mci->scrub_mode & SCRUB_SW_SRC) { | 1016 | if (mci->scrub_mode & SCRUB_SW_SRC) { |
1008 | /* | 1017 | /* |
1009 | * Some memory controllers (called MCs below) can remap | 1018 | * Some memory controllers (called MCs below) can remap |
1010 | * memory so that it is still available at a different | 1019 | * memory so that it is still available at a different |
1011 | * address when PCI devices map into memory. | 1020 | * address when PCI devices map into memory. |
1012 | * MC's that can't do this, lose the memory where PCI | 1021 | * MC's that can't do this, lose the memory where PCI |
1013 | * devices are mapped. This mapping is MC-dependent | 1022 | * devices are mapped. This mapping is MC-dependent |
1014 | * and so we call back into the MC driver for it to | 1023 | * and so we call back into the MC driver for it to |
1015 | * map the MC page to a physical (CPU) page which can | 1024 | * map the MC page to a physical (CPU) page which can |
1016 | * then be mapped to a virtual page - which can then | 1025 | * then be mapped to a virtual page - which can then |
1017 | * be scrubbed. | 1026 | * be scrubbed. |
1018 | */ | 1027 | */ |
1019 | remapped_page = mci->ctl_page_to_phys ? | 1028 | remapped_page = mci->ctl_page_to_phys ? |
1020 | mci->ctl_page_to_phys(mci, page_frame_number) : | 1029 | mci->ctl_page_to_phys(mci, page_frame_number) : |
1021 | page_frame_number; | 1030 | page_frame_number; |
1022 | 1031 | ||
1023 | edac_mc_scrub_block(remapped_page, | 1032 | edac_mc_scrub_block(remapped_page, |
1024 | offset_in_page, grain); | 1033 | offset_in_page, grain); |
1025 | } | 1034 | } |
1026 | } | 1035 | } |
1027 | 1036 | ||
1028 | static void edac_ue_error(struct mem_ctl_info *mci, | 1037 | static void edac_ue_error(struct mem_ctl_info *mci, |
1029 | const u16 error_count, | 1038 | const u16 error_count, |
1030 | const int pos[EDAC_MAX_LAYERS], | 1039 | const int pos[EDAC_MAX_LAYERS], |
1031 | const char *msg, | 1040 | const char *msg, |
1032 | const char *location, | 1041 | const char *location, |
1033 | const char *label, | 1042 | const char *label, |
1034 | const char *detail, | 1043 | const char *detail, |
1035 | const char *other_detail, | 1044 | const char *other_detail, |
1036 | const bool enable_per_layer_report) | 1045 | const bool enable_per_layer_report) |
1037 | { | 1046 | { |
1038 | char *msg_aux = ""; | 1047 | char *msg_aux = ""; |
1039 | 1048 | ||
1040 | if (*msg) | 1049 | if (*msg) |
1041 | msg_aux = " "; | 1050 | msg_aux = " "; |
1042 | 1051 | ||
1043 | if (edac_mc_get_log_ue()) { | 1052 | if (edac_mc_get_log_ue()) { |
1044 | if (other_detail && *other_detail) | 1053 | if (other_detail && *other_detail) |
1045 | edac_mc_printk(mci, KERN_WARNING, | 1054 | edac_mc_printk(mci, KERN_WARNING, |
1046 | "%d UE %s%son %s (%s %s - %s)\n", | 1055 | "%d UE %s%son %s (%s %s - %s)\n", |
1047 | error_count, msg, msg_aux, label, | 1056 | error_count, msg, msg_aux, label, |
1048 | location, detail, other_detail); | 1057 | location, detail, other_detail); |
1049 | else | 1058 | else |
1050 | edac_mc_printk(mci, KERN_WARNING, | 1059 | edac_mc_printk(mci, KERN_WARNING, |
1051 | "%d UE %s%son %s (%s %s)\n", | 1060 | "%d UE %s%son %s (%s %s)\n", |
1052 | error_count, msg, msg_aux, label, | 1061 | error_count, msg, msg_aux, label, |
1053 | location, detail); | 1062 | location, detail); |
1054 | } | 1063 | } |
1055 | 1064 | ||
1056 | if (edac_mc_get_panic_on_ue()) { | 1065 | if (edac_mc_get_panic_on_ue()) { |
1057 | if (other_detail && *other_detail) | 1066 | if (other_detail && *other_detail) |
1058 | panic("UE %s%son %s (%s%s - %s)\n", | 1067 | panic("UE %s%son %s (%s%s - %s)\n", |
1059 | msg, msg_aux, label, location, detail, other_detail); | 1068 | msg, msg_aux, label, location, detail, other_detail); |
1060 | else | 1069 | else |
1061 | panic("UE %s%son %s (%s%s)\n", | 1070 | panic("UE %s%son %s (%s%s)\n", |
1062 | msg, msg_aux, label, location, detail); | 1071 | msg, msg_aux, label, location, detail); |
1063 | } | 1072 | } |
1064 | 1073 | ||
1065 | edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count); | 1074 | edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count); |
1066 | } | 1075 | } |
1067 | 1076 | ||
1068 | /** | 1077 | /** |
1069 | * edac_raw_mc_handle_error - reports a memory event to userspace without doing | 1078 | * edac_raw_mc_handle_error - reports a memory event to userspace without doing |
1070 | * anything to discover the error location | 1079 | * anything to discover the error location |
1071 | * | 1080 | * |
1072 | * @type: severity of the error (CE/UE/Fatal) | 1081 | * @type: severity of the error (CE/UE/Fatal) |
1073 | * @mci: a struct mem_ctl_info pointer | 1082 | * @mci: a struct mem_ctl_info pointer |
1074 | * @e: error description | 1083 | * @e: error description |
1075 | * | 1084 | * |
1076 | * This raw function is used internally by edac_mc_handle_error(). It should | 1085 | * This raw function is used internally by edac_mc_handle_error(). It should |
1077 | * only be called directly when the hardware error come directly from BIOS, | 1086 | * only be called directly when the hardware error come directly from BIOS, |
1078 | * like in the case of APEI GHES driver. | 1087 | * like in the case of APEI GHES driver. |
1079 | */ | 1088 | */ |
1080 | void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type, | 1089 | void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type, |
1081 | struct mem_ctl_info *mci, | 1090 | struct mem_ctl_info *mci, |
1082 | struct edac_raw_error_desc *e) | 1091 | struct edac_raw_error_desc *e) |
1083 | { | 1092 | { |
1084 | char detail[80]; | 1093 | char detail[80]; |
1085 | int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer }; | 1094 | int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer }; |
1086 | 1095 | ||
1087 | /* Memory type dependent details about the error */ | 1096 | /* Memory type dependent details about the error */ |
1088 | if (type == HW_EVENT_ERR_CORRECTED) { | 1097 | if (type == HW_EVENT_ERR_CORRECTED) { |
1089 | snprintf(detail, sizeof(detail), | 1098 | snprintf(detail, sizeof(detail), |
1090 | "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx", | 1099 | "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx", |
1091 | e->page_frame_number, e->offset_in_page, | 1100 | e->page_frame_number, e->offset_in_page, |
1092 | e->grain, e->syndrome); | 1101 | e->grain, e->syndrome); |
1093 | edac_ce_error(mci, e->error_count, pos, e->msg, e->location, e->label, | 1102 | edac_ce_error(mci, e->error_count, pos, e->msg, e->location, e->label, |
1094 | detail, e->other_detail, e->enable_per_layer_report, | 1103 | detail, e->other_detail, e->enable_per_layer_report, |
1095 | e->page_frame_number, e->offset_in_page, e->grain); | 1104 | e->page_frame_number, e->offset_in_page, e->grain); |
1096 | } else { | 1105 | } else { |
1097 | snprintf(detail, sizeof(detail), | 1106 | snprintf(detail, sizeof(detail), |
1098 | "page:0x%lx offset:0x%lx grain:%ld", | 1107 | "page:0x%lx offset:0x%lx grain:%ld", |
1099 | e->page_frame_number, e->offset_in_page, e->grain); | 1108 | e->page_frame_number, e->offset_in_page, e->grain); |
1100 | 1109 | ||
1101 | edac_ue_error(mci, e->error_count, pos, e->msg, e->location, e->label, | 1110 | edac_ue_error(mci, e->error_count, pos, e->msg, e->location, e->label, |
1102 | detail, e->other_detail, e->enable_per_layer_report); | 1111 | detail, e->other_detail, e->enable_per_layer_report); |
1103 | } | 1112 | } |
1104 | 1113 | ||
1105 | 1114 | ||
1106 | } | 1115 | } |
1107 | EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error); | 1116 | EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error); |
1108 | 1117 | ||
1109 | /** | 1118 | /** |
1110 | * edac_mc_handle_error - reports a memory event to userspace | 1119 | * edac_mc_handle_error - reports a memory event to userspace |
1111 | * | 1120 | * |
1112 | * @type: severity of the error (CE/UE/Fatal) | 1121 | * @type: severity of the error (CE/UE/Fatal) |
1113 | * @mci: a struct mem_ctl_info pointer | 1122 | * @mci: a struct mem_ctl_info pointer |
1114 | * @error_count: Number of errors of the same type | 1123 | * @error_count: Number of errors of the same type |
1115 | * @page_frame_number: mem page where the error occurred | 1124 | * @page_frame_number: mem page where the error occurred |
1116 | * @offset_in_page: offset of the error inside the page | 1125 | * @offset_in_page: offset of the error inside the page |
1117 | * @syndrome: ECC syndrome | 1126 | * @syndrome: ECC syndrome |
1118 | * @top_layer: Memory layer[0] position | 1127 | * @top_layer: Memory layer[0] position |
1119 | * @mid_layer: Memory layer[1] position | 1128 | * @mid_layer: Memory layer[1] position |
1120 | * @low_layer: Memory layer[2] position | 1129 | * @low_layer: Memory layer[2] position |
1121 | * @msg: Message meaningful to the end users that | 1130 | * @msg: Message meaningful to the end users that |
1122 | * explains the event | 1131 | * explains the event |
1123 | * @other_detail: Technical details about the event that | 1132 | * @other_detail: Technical details about the event that |
1124 | * may help hardware manufacturers and | 1133 | * may help hardware manufacturers and |
1125 | * EDAC developers to analyse the event | 1134 | * EDAC developers to analyse the event |
1126 | */ | 1135 | */ |
1127 | void edac_mc_handle_error(const enum hw_event_mc_err_type type, | 1136 | void edac_mc_handle_error(const enum hw_event_mc_err_type type, |
1128 | struct mem_ctl_info *mci, | 1137 | struct mem_ctl_info *mci, |
1129 | const u16 error_count, | 1138 | const u16 error_count, |
1130 | const unsigned long page_frame_number, | 1139 | const unsigned long page_frame_number, |
1131 | const unsigned long offset_in_page, | 1140 | const unsigned long offset_in_page, |
1132 | const unsigned long syndrome, | 1141 | const unsigned long syndrome, |
1133 | const int top_layer, | 1142 | const int top_layer, |
1134 | const int mid_layer, | 1143 | const int mid_layer, |
1135 | const int low_layer, | 1144 | const int low_layer, |
1136 | const char *msg, | 1145 | const char *msg, |
1137 | const char *other_detail) | 1146 | const char *other_detail) |
1138 | { | 1147 | { |
1139 | char *p; | 1148 | char *p; |
1140 | int row = -1, chan = -1; | 1149 | int row = -1, chan = -1; |
1141 | int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer }; | 1150 | int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer }; |
1142 | int i, n_labels = 0; | 1151 | int i, n_labels = 0; |
1143 | u8 grain_bits; | 1152 | u8 grain_bits; |
1144 | struct edac_raw_error_desc *e = &mci->error_desc; | 1153 | struct edac_raw_error_desc *e = &mci->error_desc; |
1145 | 1154 | ||
1146 | edac_dbg(3, "MC%d\n", mci->mc_idx); | 1155 | edac_dbg(3, "MC%d\n", mci->mc_idx); |
1147 | 1156 | ||
1148 | /* Fills the error report buffer */ | 1157 | /* Fills the error report buffer */ |
1149 | memset(e, 0, sizeof (*e)); | 1158 | memset(e, 0, sizeof (*e)); |
1150 | e->error_count = error_count; | 1159 | e->error_count = error_count; |
1151 | e->top_layer = top_layer; | 1160 | e->top_layer = top_layer; |
1152 | e->mid_layer = mid_layer; | 1161 | e->mid_layer = mid_layer; |
1153 | e->low_layer = low_layer; | 1162 | e->low_layer = low_layer; |
1154 | e->page_frame_number = page_frame_number; | 1163 | e->page_frame_number = page_frame_number; |
1155 | e->offset_in_page = offset_in_page; | 1164 | e->offset_in_page = offset_in_page; |
1156 | e->syndrome = syndrome; | 1165 | e->syndrome = syndrome; |
1157 | e->msg = msg; | 1166 | e->msg = msg; |
1158 | e->other_detail = other_detail; | 1167 | e->other_detail = other_detail; |
1159 | 1168 | ||
1160 | /* | 1169 | /* |
1161 | * Check if the event report is consistent and if the memory | 1170 | * Check if the event report is consistent and if the memory |
1162 | * location is known. If it is known, enable_per_layer_report will be | 1171 | * location is known. If it is known, enable_per_layer_report will be |
1163 | * true, the DIMM(s) label info will be filled and the per-layer | 1172 | * true, the DIMM(s) label info will be filled and the per-layer |
1164 | * error counters will be incremented. | 1173 | * error counters will be incremented. |
1165 | */ | 1174 | */ |
1166 | for (i = 0; i < mci->n_layers; i++) { | 1175 | for (i = 0; i < mci->n_layers; i++) { |
1167 | if (pos[i] >= (int)mci->layers[i].size) { | 1176 | if (pos[i] >= (int)mci->layers[i].size) { |
1168 | 1177 | ||
1169 | edac_mc_printk(mci, KERN_ERR, | 1178 | edac_mc_printk(mci, KERN_ERR, |
1170 | "INTERNAL ERROR: %s value is out of range (%d >= %d)\n", | 1179 | "INTERNAL ERROR: %s value is out of range (%d >= %d)\n", |
1171 | edac_layer_name[mci->layers[i].type], | 1180 | edac_layer_name[mci->layers[i].type], |
1172 | pos[i], mci->layers[i].size); | 1181 | pos[i], mci->layers[i].size); |
1173 | /* | 1182 | /* |
1174 | * Instead of just returning it, let's use what's | 1183 | * Instead of just returning it, let's use what's |
1175 | * known about the error. The increment routines and | 1184 | * known about the error. The increment routines and |
1176 | * the DIMM filter logic will do the right thing by | 1185 | * the DIMM filter logic will do the right thing by |
1177 | * pointing the likely damaged DIMMs. | 1186 | * pointing the likely damaged DIMMs. |
1178 | */ | 1187 | */ |
1179 | pos[i] = -1; | 1188 | pos[i] = -1; |
1180 | } | 1189 | } |
1181 | if (pos[i] >= 0) | 1190 | if (pos[i] >= 0) |
1182 | e->enable_per_layer_report = true; | 1191 | e->enable_per_layer_report = true; |
1183 | } | 1192 | } |
1184 | 1193 | ||
1185 | /* | 1194 | /* |
1186 | * Get the dimm label/grain that applies to the match criteria. | 1195 | * Get the dimm label/grain that applies to the match criteria. |
1187 | * As the error algorithm may not be able to point to just one memory | 1196 | * As the error algorithm may not be able to point to just one memory |
1188 | * stick, the logic here will get all possible labels that could | 1197 | * stick, the logic here will get all possible labels that could |
1189 | * pottentially be affected by the error. | 1198 | * pottentially be affected by the error. |
1190 | * On FB-DIMM memory controllers, for uncorrected errors, it is common | 1199 | * On FB-DIMM memory controllers, for uncorrected errors, it is common |
1191 | * to have only the MC channel and the MC dimm (also called "branch") | 1200 | * to have only the MC channel and the MC dimm (also called "branch") |
1192 | * but the channel is not known, as the memory is arranged in pairs, | 1201 | * but the channel is not known, as the memory is arranged in pairs, |
1193 | * where each memory belongs to a separate channel within the same | 1202 | * where each memory belongs to a separate channel within the same |
1194 | * branch. | 1203 | * branch. |
1195 | */ | 1204 | */ |
1196 | p = e->label; | 1205 | p = e->label; |
1197 | *p = '\0'; | 1206 | *p = '\0'; |
1198 | 1207 | ||
1199 | for (i = 0; i < mci->tot_dimms; i++) { | 1208 | for (i = 0; i < mci->tot_dimms; i++) { |
1200 | struct dimm_info *dimm = mci->dimms[i]; | 1209 | struct dimm_info *dimm = mci->dimms[i]; |
1201 | 1210 | ||
1202 | if (top_layer >= 0 && top_layer != dimm->location[0]) | 1211 | if (top_layer >= 0 && top_layer != dimm->location[0]) |
1203 | continue; | 1212 | continue; |
1204 | if (mid_layer >= 0 && mid_layer != dimm->location[1]) | 1213 | if (mid_layer >= 0 && mid_layer != dimm->location[1]) |
1205 | continue; | 1214 | continue; |
1206 | if (low_layer >= 0 && low_layer != dimm->location[2]) | 1215 | if (low_layer >= 0 && low_layer != dimm->location[2]) |
1207 | continue; | 1216 | continue; |
1208 | 1217 | ||
1209 | /* get the max grain, over the error match range */ | 1218 | /* get the max grain, over the error match range */ |
1210 | if (dimm->grain > e->grain) | 1219 | if (dimm->grain > e->grain) |
1211 | e->grain = dimm->grain; | 1220 | e->grain = dimm->grain; |
1212 | 1221 | ||
1213 | /* | 1222 | /* |
1214 | * If the error is memory-controller wide, there's no need to | 1223 | * If the error is memory-controller wide, there's no need to |
1215 | * seek for the affected DIMMs because the whole | 1224 | * seek for the affected DIMMs because the whole |
1216 | * channel/memory controller/... may be affected. | 1225 | * channel/memory controller/... may be affected. |
1217 | * Also, don't show errors for empty DIMM slots. | 1226 | * Also, don't show errors for empty DIMM slots. |
1218 | */ | 1227 | */ |
1219 | if (e->enable_per_layer_report && dimm->nr_pages) { | 1228 | if (e->enable_per_layer_report && dimm->nr_pages) { |
1220 | if (n_labels >= EDAC_MAX_LABELS) { | 1229 | if (n_labels >= EDAC_MAX_LABELS) { |
1221 | e->enable_per_layer_report = false; | 1230 | e->enable_per_layer_report = false; |
1222 | break; | 1231 | break; |
1223 | } | 1232 | } |
1224 | n_labels++; | 1233 | n_labels++; |
1225 | if (p != e->label) { | 1234 | if (p != e->label) { |
1226 | strcpy(p, OTHER_LABEL); | 1235 | strcpy(p, OTHER_LABEL); |
1227 | p += strlen(OTHER_LABEL); | 1236 | p += strlen(OTHER_LABEL); |
1228 | } | 1237 | } |
1229 | strcpy(p, dimm->label); | 1238 | strcpy(p, dimm->label); |
1230 | p += strlen(p); | 1239 | p += strlen(p); |
1231 | *p = '\0'; | 1240 | *p = '\0'; |
1232 | 1241 | ||
1233 | /* | 1242 | /* |
1234 | * get csrow/channel of the DIMM, in order to allow | 1243 | * get csrow/channel of the DIMM, in order to allow |
1235 | * incrementing the compat API counters | 1244 | * incrementing the compat API counters |
1236 | */ | 1245 | */ |
1237 | edac_dbg(4, "%s csrows map: (%d,%d)\n", | 1246 | edac_dbg(4, "%s csrows map: (%d,%d)\n", |
1238 | mci->csbased ? "rank" : "dimm", | 1247 | mci->csbased ? "rank" : "dimm", |
1239 | dimm->csrow, dimm->cschannel); | 1248 | dimm->csrow, dimm->cschannel); |
1240 | if (row == -1) | 1249 | if (row == -1) |
1241 | row = dimm->csrow; | 1250 | row = dimm->csrow; |
1242 | else if (row >= 0 && row != dimm->csrow) | 1251 | else if (row >= 0 && row != dimm->csrow) |
1243 | row = -2; | 1252 | row = -2; |
1244 | 1253 | ||
1245 | if (chan == -1) | 1254 | if (chan == -1) |
1246 | chan = dimm->cschannel; | 1255 | chan = dimm->cschannel; |
1247 | else if (chan >= 0 && chan != dimm->cschannel) | 1256 | else if (chan >= 0 && chan != dimm->cschannel) |
1248 | chan = -2; | 1257 | chan = -2; |
1249 | } | 1258 | } |
1250 | } | 1259 | } |
1251 | 1260 | ||
1252 | if (!e->enable_per_layer_report) { | 1261 | if (!e->enable_per_layer_report) { |
1253 | strcpy(e->label, "any memory"); | 1262 | strcpy(e->label, "any memory"); |
1254 | } else { | 1263 | } else { |
1255 | edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan); | 1264 | edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan); |
1256 | if (p == e->label) | 1265 | if (p == e->label) |
1257 | strcpy(e->label, "unknown memory"); | 1266 | strcpy(e->label, "unknown memory"); |
1258 | if (type == HW_EVENT_ERR_CORRECTED) { | 1267 | if (type == HW_EVENT_ERR_CORRECTED) { |
1259 | if (row >= 0) { | 1268 | if (row >= 0) { |
1260 | mci->csrows[row]->ce_count += error_count; | 1269 | mci->csrows[row]->ce_count += error_count; |
1261 | if (chan >= 0) | 1270 | if (chan >= 0) |
1262 | mci->csrows[row]->channels[chan]->ce_count += error_count; | 1271 | mci->csrows[row]->channels[chan]->ce_count += error_count; |
1263 | } | 1272 | } |
1264 | } else | 1273 | } else |
1265 | if (row >= 0) | 1274 | if (row >= 0) |
1266 | mci->csrows[row]->ue_count += error_count; | 1275 | mci->csrows[row]->ue_count += error_count; |
1267 | } | 1276 | } |
1268 | 1277 | ||
1269 | /* Fill the RAM location data */ | 1278 | /* Fill the RAM location data */ |
1270 | p = e->location; | 1279 | p = e->location; |
1271 | 1280 | ||
1272 | for (i = 0; i < mci->n_layers; i++) { | 1281 | for (i = 0; i < mci->n_layers; i++) { |
1273 | if (pos[i] < 0) | 1282 | if (pos[i] < 0) |
1274 | continue; | 1283 | continue; |
1275 | 1284 | ||
1276 | p += sprintf(p, "%s:%d ", | 1285 | p += sprintf(p, "%s:%d ", |
1277 | edac_layer_name[mci->layers[i].type], | 1286 | edac_layer_name[mci->layers[i].type], |
1278 | pos[i]); | 1287 | pos[i]); |
1279 | } | 1288 | } |
1280 | if (p > e->location) | 1289 | if (p > e->location) |
1281 | *(p - 1) = '\0'; | 1290 | *(p - 1) = '\0'; |
1282 | 1291 | ||
1283 | /* Report the error via the trace interface */ | 1292 | /* Report the error via the trace interface */ |
1284 | grain_bits = fls_long(e->grain) + 1; | 1293 | grain_bits = fls_long(e->grain) + 1; |
1285 | trace_mc_event(type, e->msg, e->label, e->error_count, | 1294 | trace_mc_event(type, e->msg, e->label, e->error_count, |
1286 | mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer, | 1295 | mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer, |
1287 | PAGES_TO_MiB(e->page_frame_number) | e->offset_in_page, | 1296 | PAGES_TO_MiB(e->page_frame_number) | e->offset_in_page, |
1288 | grain_bits, e->syndrome, e->other_detail); | 1297 | grain_bits, e->syndrome, e->other_detail); |
1289 | 1298 | ||
1290 | edac_raw_mc_handle_error(type, mci, e); | 1299 | edac_raw_mc_handle_error(type, mci, e); |
1291 | } | 1300 | } |
1292 | EXPORT_SYMBOL_GPL(edac_mc_handle_error); | 1301 | EXPORT_SYMBOL_GPL(edac_mc_handle_error); |
1293 | 1302 |
drivers/edac/edac_mc_sysfs.c
1 | /* | 1 | /* |
2 | * edac_mc kernel module | 2 | * edac_mc kernel module |
3 | * (C) 2005-2007 Linux Networx (http://lnxi.com) | 3 | * (C) 2005-2007 Linux Networx (http://lnxi.com) |
4 | * | 4 | * |
5 | * This file may be distributed under the terms of the | 5 | * This file may be distributed under the terms of the |
6 | * GNU General Public License. | 6 | * GNU General Public License. |
7 | * | 7 | * |
8 | * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com | 8 | * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com |
9 | * | 9 | * |
10 | * (c) 2012-2013 - Mauro Carvalho Chehab <mchehab@redhat.com> | 10 | * (c) 2012-2013 - Mauro Carvalho Chehab <mchehab@redhat.com> |
11 | * The entire API were re-written, and ported to use struct device | 11 | * The entire API were re-written, and ported to use struct device |
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/ctype.h> | 15 | #include <linux/ctype.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/edac.h> | 17 | #include <linux/edac.h> |
18 | #include <linux/bug.h> | 18 | #include <linux/bug.h> |
19 | #include <linux/pm_runtime.h> | 19 | #include <linux/pm_runtime.h> |
20 | #include <linux/uaccess.h> | 20 | #include <linux/uaccess.h> |
21 | 21 | ||
22 | #include "edac_core.h" | 22 | #include "edac_core.h" |
23 | #include "edac_module.h" | 23 | #include "edac_module.h" |
24 | 24 | ||
25 | /* MC EDAC Controls, setable by module parameter, and sysfs */ | 25 | /* MC EDAC Controls, setable by module parameter, and sysfs */ |
26 | static int edac_mc_log_ue = 1; | 26 | static int edac_mc_log_ue = 1; |
27 | static int edac_mc_log_ce = 1; | 27 | static int edac_mc_log_ce = 1; |
28 | static int edac_mc_panic_on_ue; | 28 | static int edac_mc_panic_on_ue; |
29 | static int edac_mc_poll_msec = 1000; | 29 | static int edac_mc_poll_msec = 1000; |
30 | 30 | ||
31 | /* Getter functions for above */ | 31 | /* Getter functions for above */ |
32 | int edac_mc_get_log_ue(void) | 32 | int edac_mc_get_log_ue(void) |
33 | { | 33 | { |
34 | return edac_mc_log_ue; | 34 | return edac_mc_log_ue; |
35 | } | 35 | } |
36 | 36 | ||
37 | int edac_mc_get_log_ce(void) | 37 | int edac_mc_get_log_ce(void) |
38 | { | 38 | { |
39 | return edac_mc_log_ce; | 39 | return edac_mc_log_ce; |
40 | } | 40 | } |
41 | 41 | ||
42 | int edac_mc_get_panic_on_ue(void) | 42 | int edac_mc_get_panic_on_ue(void) |
43 | { | 43 | { |
44 | return edac_mc_panic_on_ue; | 44 | return edac_mc_panic_on_ue; |
45 | } | 45 | } |
46 | 46 | ||
47 | /* this is temporary */ | 47 | /* this is temporary */ |
48 | int edac_mc_get_poll_msec(void) | 48 | int edac_mc_get_poll_msec(void) |
49 | { | 49 | { |
50 | return edac_mc_poll_msec; | 50 | return edac_mc_poll_msec; |
51 | } | 51 | } |
52 | 52 | ||
53 | static int edac_set_poll_msec(const char *val, struct kernel_param *kp) | 53 | static int edac_set_poll_msec(const char *val, struct kernel_param *kp) |
54 | { | 54 | { |
55 | long l; | 55 | long l; |
56 | int ret; | 56 | int ret; |
57 | 57 | ||
58 | if (!val) | 58 | if (!val) |
59 | return -EINVAL; | 59 | return -EINVAL; |
60 | 60 | ||
61 | ret = strict_strtol(val, 0, &l); | 61 | ret = strict_strtol(val, 0, &l); |
62 | if (ret == -EINVAL || ((int)l != l)) | 62 | if (ret == -EINVAL || ((int)l != l)) |
63 | return -EINVAL; | 63 | return -EINVAL; |
64 | *((int *)kp->arg) = l; | 64 | *((int *)kp->arg) = l; |
65 | 65 | ||
66 | /* notify edac_mc engine to reset the poll period */ | 66 | /* notify edac_mc engine to reset the poll period */ |
67 | edac_mc_reset_delay_period(l); | 67 | edac_mc_reset_delay_period(l); |
68 | 68 | ||
69 | return 0; | 69 | return 0; |
70 | } | 70 | } |
71 | 71 | ||
72 | /* Parameter declarations for above */ | 72 | /* Parameter declarations for above */ |
73 | module_param(edac_mc_panic_on_ue, int, 0644); | 73 | module_param(edac_mc_panic_on_ue, int, 0644); |
74 | MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on"); | 74 | MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on"); |
75 | module_param(edac_mc_log_ue, int, 0644); | 75 | module_param(edac_mc_log_ue, int, 0644); |
76 | MODULE_PARM_DESC(edac_mc_log_ue, | 76 | MODULE_PARM_DESC(edac_mc_log_ue, |
77 | "Log uncorrectable error to console: 0=off 1=on"); | 77 | "Log uncorrectable error to console: 0=off 1=on"); |
78 | module_param(edac_mc_log_ce, int, 0644); | 78 | module_param(edac_mc_log_ce, int, 0644); |
79 | MODULE_PARM_DESC(edac_mc_log_ce, | 79 | MODULE_PARM_DESC(edac_mc_log_ce, |
80 | "Log correctable error to console: 0=off 1=on"); | 80 | "Log correctable error to console: 0=off 1=on"); |
81 | module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int, | 81 | module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int, |
82 | &edac_mc_poll_msec, 0644); | 82 | &edac_mc_poll_msec, 0644); |
83 | MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds"); | 83 | MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds"); |
84 | 84 | ||
85 | static struct device *mci_pdev; | 85 | static struct device *mci_pdev; |
86 | 86 | ||
87 | /* | 87 | /* |
88 | * various constants for Memory Controllers | 88 | * various constants for Memory Controllers |
89 | */ | 89 | */ |
90 | static const char * const mem_types[] = { | 90 | static const char * const mem_types[] = { |
91 | [MEM_EMPTY] = "Empty", | 91 | [MEM_EMPTY] = "Empty", |
92 | [MEM_RESERVED] = "Reserved", | 92 | [MEM_RESERVED] = "Reserved", |
93 | [MEM_UNKNOWN] = "Unknown", | 93 | [MEM_UNKNOWN] = "Unknown", |
94 | [MEM_FPM] = "FPM", | 94 | [MEM_FPM] = "FPM", |
95 | [MEM_EDO] = "EDO", | 95 | [MEM_EDO] = "EDO", |
96 | [MEM_BEDO] = "BEDO", | 96 | [MEM_BEDO] = "BEDO", |
97 | [MEM_SDR] = "Unbuffered-SDR", | 97 | [MEM_SDR] = "Unbuffered-SDR", |
98 | [MEM_RDR] = "Registered-SDR", | 98 | [MEM_RDR] = "Registered-SDR", |
99 | [MEM_DDR] = "Unbuffered-DDR", | 99 | [MEM_DDR] = "Unbuffered-DDR", |
100 | [MEM_RDDR] = "Registered-DDR", | 100 | [MEM_RDDR] = "Registered-DDR", |
101 | [MEM_RMBS] = "RMBS", | 101 | [MEM_RMBS] = "RMBS", |
102 | [MEM_DDR2] = "Unbuffered-DDR2", | 102 | [MEM_DDR2] = "Unbuffered-DDR2", |
103 | [MEM_FB_DDR2] = "FullyBuffered-DDR2", | 103 | [MEM_FB_DDR2] = "FullyBuffered-DDR2", |
104 | [MEM_RDDR2] = "Registered-DDR2", | 104 | [MEM_RDDR2] = "Registered-DDR2", |
105 | [MEM_XDR] = "XDR", | 105 | [MEM_XDR] = "XDR", |
106 | [MEM_DDR3] = "Unbuffered-DDR3", | 106 | [MEM_DDR3] = "Unbuffered-DDR3", |
107 | [MEM_RDDR3] = "Registered-DDR3" | 107 | [MEM_RDDR3] = "Registered-DDR3" |
108 | }; | 108 | }; |
109 | 109 | ||
110 | static const char * const dev_types[] = { | 110 | static const char * const dev_types[] = { |
111 | [DEV_UNKNOWN] = "Unknown", | 111 | [DEV_UNKNOWN] = "Unknown", |
112 | [DEV_X1] = "x1", | 112 | [DEV_X1] = "x1", |
113 | [DEV_X2] = "x2", | 113 | [DEV_X2] = "x2", |
114 | [DEV_X4] = "x4", | 114 | [DEV_X4] = "x4", |
115 | [DEV_X8] = "x8", | 115 | [DEV_X8] = "x8", |
116 | [DEV_X16] = "x16", | 116 | [DEV_X16] = "x16", |
117 | [DEV_X32] = "x32", | 117 | [DEV_X32] = "x32", |
118 | [DEV_X64] = "x64" | 118 | [DEV_X64] = "x64" |
119 | }; | 119 | }; |
120 | 120 | ||
121 | static const char * const edac_caps[] = { | 121 | static const char * const edac_caps[] = { |
122 | [EDAC_UNKNOWN] = "Unknown", | 122 | [EDAC_UNKNOWN] = "Unknown", |
123 | [EDAC_NONE] = "None", | 123 | [EDAC_NONE] = "None", |
124 | [EDAC_RESERVED] = "Reserved", | 124 | [EDAC_RESERVED] = "Reserved", |
125 | [EDAC_PARITY] = "PARITY", | 125 | [EDAC_PARITY] = "PARITY", |
126 | [EDAC_EC] = "EC", | 126 | [EDAC_EC] = "EC", |
127 | [EDAC_SECDED] = "SECDED", | 127 | [EDAC_SECDED] = "SECDED", |
128 | [EDAC_S2ECD2ED] = "S2ECD2ED", | 128 | [EDAC_S2ECD2ED] = "S2ECD2ED", |
129 | [EDAC_S4ECD4ED] = "S4ECD4ED", | 129 | [EDAC_S4ECD4ED] = "S4ECD4ED", |
130 | [EDAC_S8ECD8ED] = "S8ECD8ED", | 130 | [EDAC_S8ECD8ED] = "S8ECD8ED", |
131 | [EDAC_S16ECD16ED] = "S16ECD16ED" | 131 | [EDAC_S16ECD16ED] = "S16ECD16ED" |
132 | }; | 132 | }; |
133 | 133 | ||
134 | #ifdef CONFIG_EDAC_LEGACY_SYSFS | 134 | #ifdef CONFIG_EDAC_LEGACY_SYSFS |
135 | /* | 135 | /* |
136 | * EDAC sysfs CSROW data structures and methods | 136 | * EDAC sysfs CSROW data structures and methods |
137 | */ | 137 | */ |
138 | 138 | ||
139 | #define to_csrow(k) container_of(k, struct csrow_info, dev) | 139 | #define to_csrow(k) container_of(k, struct csrow_info, dev) |
140 | 140 | ||
141 | /* | 141 | /* |
142 | * We need it to avoid namespace conflicts between the legacy API | 142 | * We need it to avoid namespace conflicts between the legacy API |
143 | * and the per-dimm/per-rank one | 143 | * and the per-dimm/per-rank one |
144 | */ | 144 | */ |
145 | #define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \ | 145 | #define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \ |
146 | static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store) | 146 | static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store) |
147 | 147 | ||
148 | struct dev_ch_attribute { | 148 | struct dev_ch_attribute { |
149 | struct device_attribute attr; | 149 | struct device_attribute attr; |
150 | int channel; | 150 | int channel; |
151 | }; | 151 | }; |
152 | 152 | ||
153 | #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \ | 153 | #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \ |
154 | struct dev_ch_attribute dev_attr_legacy_##_name = \ | 154 | struct dev_ch_attribute dev_attr_legacy_##_name = \ |
155 | { __ATTR(_name, _mode, _show, _store), (_var) } | 155 | { __ATTR(_name, _mode, _show, _store), (_var) } |
156 | 156 | ||
157 | #define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel) | 157 | #define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel) |
158 | 158 | ||
159 | /* Set of more default csrow<id> attribute show/store functions */ | 159 | /* Set of more default csrow<id> attribute show/store functions */ |
160 | static ssize_t csrow_ue_count_show(struct device *dev, | 160 | static ssize_t csrow_ue_count_show(struct device *dev, |
161 | struct device_attribute *mattr, char *data) | 161 | struct device_attribute *mattr, char *data) |
162 | { | 162 | { |
163 | struct csrow_info *csrow = to_csrow(dev); | 163 | struct csrow_info *csrow = to_csrow(dev); |
164 | 164 | ||
165 | return sprintf(data, "%u\n", csrow->ue_count); | 165 | return sprintf(data, "%u\n", csrow->ue_count); |
166 | } | 166 | } |
167 | 167 | ||
168 | static ssize_t csrow_ce_count_show(struct device *dev, | 168 | static ssize_t csrow_ce_count_show(struct device *dev, |
169 | struct device_attribute *mattr, char *data) | 169 | struct device_attribute *mattr, char *data) |
170 | { | 170 | { |
171 | struct csrow_info *csrow = to_csrow(dev); | 171 | struct csrow_info *csrow = to_csrow(dev); |
172 | 172 | ||
173 | return sprintf(data, "%u\n", csrow->ce_count); | 173 | return sprintf(data, "%u\n", csrow->ce_count); |
174 | } | 174 | } |
175 | 175 | ||
176 | static ssize_t csrow_size_show(struct device *dev, | 176 | static ssize_t csrow_size_show(struct device *dev, |
177 | struct device_attribute *mattr, char *data) | 177 | struct device_attribute *mattr, char *data) |
178 | { | 178 | { |
179 | struct csrow_info *csrow = to_csrow(dev); | 179 | struct csrow_info *csrow = to_csrow(dev); |
180 | int i; | 180 | int i; |
181 | u32 nr_pages = 0; | 181 | u32 nr_pages = 0; |
182 | 182 | ||
183 | for (i = 0; i < csrow->nr_channels; i++) | 183 | for (i = 0; i < csrow->nr_channels; i++) |
184 | nr_pages += csrow->channels[i]->dimm->nr_pages; | 184 | nr_pages += csrow->channels[i]->dimm->nr_pages; |
185 | return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages)); | 185 | return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages)); |
186 | } | 186 | } |
187 | 187 | ||
188 | static ssize_t csrow_mem_type_show(struct device *dev, | 188 | static ssize_t csrow_mem_type_show(struct device *dev, |
189 | struct device_attribute *mattr, char *data) | 189 | struct device_attribute *mattr, char *data) |
190 | { | 190 | { |
191 | struct csrow_info *csrow = to_csrow(dev); | 191 | struct csrow_info *csrow = to_csrow(dev); |
192 | 192 | ||
193 | return sprintf(data, "%s\n", mem_types[csrow->channels[0]->dimm->mtype]); | 193 | return sprintf(data, "%s\n", mem_types[csrow->channels[0]->dimm->mtype]); |
194 | } | 194 | } |
195 | 195 | ||
196 | static ssize_t csrow_dev_type_show(struct device *dev, | 196 | static ssize_t csrow_dev_type_show(struct device *dev, |
197 | struct device_attribute *mattr, char *data) | 197 | struct device_attribute *mattr, char *data) |
198 | { | 198 | { |
199 | struct csrow_info *csrow = to_csrow(dev); | 199 | struct csrow_info *csrow = to_csrow(dev); |
200 | 200 | ||
201 | return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]); | 201 | return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]); |
202 | } | 202 | } |
203 | 203 | ||
204 | static ssize_t csrow_edac_mode_show(struct device *dev, | 204 | static ssize_t csrow_edac_mode_show(struct device *dev, |
205 | struct device_attribute *mattr, | 205 | struct device_attribute *mattr, |
206 | char *data) | 206 | char *data) |
207 | { | 207 | { |
208 | struct csrow_info *csrow = to_csrow(dev); | 208 | struct csrow_info *csrow = to_csrow(dev); |
209 | 209 | ||
210 | return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]); | 210 | return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]); |
211 | } | 211 | } |
212 | 212 | ||
213 | /* show/store functions for DIMM Label attributes */ | 213 | /* show/store functions for DIMM Label attributes */ |
214 | static ssize_t channel_dimm_label_show(struct device *dev, | 214 | static ssize_t channel_dimm_label_show(struct device *dev, |
215 | struct device_attribute *mattr, | 215 | struct device_attribute *mattr, |
216 | char *data) | 216 | char *data) |
217 | { | 217 | { |
218 | struct csrow_info *csrow = to_csrow(dev); | 218 | struct csrow_info *csrow = to_csrow(dev); |
219 | unsigned chan = to_channel(mattr); | 219 | unsigned chan = to_channel(mattr); |
220 | struct rank_info *rank = csrow->channels[chan]; | 220 | struct rank_info *rank = csrow->channels[chan]; |
221 | 221 | ||
222 | /* if field has not been initialized, there is nothing to send */ | 222 | /* if field has not been initialized, there is nothing to send */ |
223 | if (!rank->dimm->label[0]) | 223 | if (!rank->dimm->label[0]) |
224 | return 0; | 224 | return 0; |
225 | 225 | ||
226 | return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", | 226 | return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", |
227 | rank->dimm->label); | 227 | rank->dimm->label); |
228 | } | 228 | } |
229 | 229 | ||
230 | static ssize_t channel_dimm_label_store(struct device *dev, | 230 | static ssize_t channel_dimm_label_store(struct device *dev, |
231 | struct device_attribute *mattr, | 231 | struct device_attribute *mattr, |
232 | const char *data, size_t count) | 232 | const char *data, size_t count) |
233 | { | 233 | { |
234 | struct csrow_info *csrow = to_csrow(dev); | 234 | struct csrow_info *csrow = to_csrow(dev); |
235 | unsigned chan = to_channel(mattr); | 235 | unsigned chan = to_channel(mattr); |
236 | struct rank_info *rank = csrow->channels[chan]; | 236 | struct rank_info *rank = csrow->channels[chan]; |
237 | 237 | ||
238 | ssize_t max_size = 0; | 238 | ssize_t max_size = 0; |
239 | 239 | ||
240 | max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); | 240 | max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); |
241 | strncpy(rank->dimm->label, data, max_size); | 241 | strncpy(rank->dimm->label, data, max_size); |
242 | rank->dimm->label[max_size] = '\0'; | 242 | rank->dimm->label[max_size] = '\0'; |
243 | 243 | ||
244 | return max_size; | 244 | return max_size; |
245 | } | 245 | } |
246 | 246 | ||
247 | /* show function for dynamic chX_ce_count attribute */ | 247 | /* show function for dynamic chX_ce_count attribute */ |
248 | static ssize_t channel_ce_count_show(struct device *dev, | 248 | static ssize_t channel_ce_count_show(struct device *dev, |
249 | struct device_attribute *mattr, char *data) | 249 | struct device_attribute *mattr, char *data) |
250 | { | 250 | { |
251 | struct csrow_info *csrow = to_csrow(dev); | 251 | struct csrow_info *csrow = to_csrow(dev); |
252 | unsigned chan = to_channel(mattr); | 252 | unsigned chan = to_channel(mattr); |
253 | struct rank_info *rank = csrow->channels[chan]; | 253 | struct rank_info *rank = csrow->channels[chan]; |
254 | 254 | ||
255 | return sprintf(data, "%u\n", rank->ce_count); | 255 | return sprintf(data, "%u\n", rank->ce_count); |
256 | } | 256 | } |
257 | 257 | ||
258 | /* cwrow<id>/attribute files */ | 258 | /* cwrow<id>/attribute files */ |
259 | DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL); | 259 | DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL); |
260 | DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL); | 260 | DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL); |
261 | DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL); | 261 | DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL); |
262 | DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL); | 262 | DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL); |
263 | DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL); | 263 | DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL); |
264 | DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL); | 264 | DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL); |
265 | 265 | ||
266 | /* default attributes of the CSROW<id> object */ | 266 | /* default attributes of the CSROW<id> object */ |
267 | static struct attribute *csrow_attrs[] = { | 267 | static struct attribute *csrow_attrs[] = { |
268 | &dev_attr_legacy_dev_type.attr, | 268 | &dev_attr_legacy_dev_type.attr, |
269 | &dev_attr_legacy_mem_type.attr, | 269 | &dev_attr_legacy_mem_type.attr, |
270 | &dev_attr_legacy_edac_mode.attr, | 270 | &dev_attr_legacy_edac_mode.attr, |
271 | &dev_attr_legacy_size_mb.attr, | 271 | &dev_attr_legacy_size_mb.attr, |
272 | &dev_attr_legacy_ue_count.attr, | 272 | &dev_attr_legacy_ue_count.attr, |
273 | &dev_attr_legacy_ce_count.attr, | 273 | &dev_attr_legacy_ce_count.attr, |
274 | NULL, | 274 | NULL, |
275 | }; | 275 | }; |
276 | 276 | ||
277 | static struct attribute_group csrow_attr_grp = { | 277 | static struct attribute_group csrow_attr_grp = { |
278 | .attrs = csrow_attrs, | 278 | .attrs = csrow_attrs, |
279 | }; | 279 | }; |
280 | 280 | ||
281 | static const struct attribute_group *csrow_attr_groups[] = { | 281 | static const struct attribute_group *csrow_attr_groups[] = { |
282 | &csrow_attr_grp, | 282 | &csrow_attr_grp, |
283 | NULL | 283 | NULL |
284 | }; | 284 | }; |
285 | 285 | ||
286 | static void csrow_attr_release(struct device *dev) | 286 | static void csrow_attr_release(struct device *dev) |
287 | { | 287 | { |
288 | struct csrow_info *csrow = container_of(dev, struct csrow_info, dev); | 288 | struct csrow_info *csrow = container_of(dev, struct csrow_info, dev); |
289 | 289 | ||
290 | edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev)); | 290 | edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev)); |
291 | kfree(csrow); | 291 | kfree(csrow); |
292 | } | 292 | } |
293 | 293 | ||
294 | static struct device_type csrow_attr_type = { | 294 | static struct device_type csrow_attr_type = { |
295 | .groups = csrow_attr_groups, | 295 | .groups = csrow_attr_groups, |
296 | .release = csrow_attr_release, | 296 | .release = csrow_attr_release, |
297 | }; | 297 | }; |
298 | 298 | ||
299 | /* | 299 | /* |
300 | * possible dynamic channel DIMM Label attribute files | 300 | * possible dynamic channel DIMM Label attribute files |
301 | * | 301 | * |
302 | */ | 302 | */ |
303 | 303 | ||
304 | #define EDAC_NR_CHANNELS 6 | 304 | #define EDAC_NR_CHANNELS 6 |
305 | 305 | ||
306 | DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR, | 306 | DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR, |
307 | channel_dimm_label_show, channel_dimm_label_store, 0); | 307 | channel_dimm_label_show, channel_dimm_label_store, 0); |
308 | DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR, | 308 | DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR, |
309 | channel_dimm_label_show, channel_dimm_label_store, 1); | 309 | channel_dimm_label_show, channel_dimm_label_store, 1); |
310 | DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR, | 310 | DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR, |
311 | channel_dimm_label_show, channel_dimm_label_store, 2); | 311 | channel_dimm_label_show, channel_dimm_label_store, 2); |
312 | DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR, | 312 | DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR, |
313 | channel_dimm_label_show, channel_dimm_label_store, 3); | 313 | channel_dimm_label_show, channel_dimm_label_store, 3); |
314 | DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR, | 314 | DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR, |
315 | channel_dimm_label_show, channel_dimm_label_store, 4); | 315 | channel_dimm_label_show, channel_dimm_label_store, 4); |
316 | DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR, | 316 | DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR, |
317 | channel_dimm_label_show, channel_dimm_label_store, 5); | 317 | channel_dimm_label_show, channel_dimm_label_store, 5); |
318 | 318 | ||
319 | /* Total possible dynamic DIMM Label attribute file table */ | 319 | /* Total possible dynamic DIMM Label attribute file table */ |
320 | static struct device_attribute *dynamic_csrow_dimm_attr[] = { | 320 | static struct device_attribute *dynamic_csrow_dimm_attr[] = { |
321 | &dev_attr_legacy_ch0_dimm_label.attr, | 321 | &dev_attr_legacy_ch0_dimm_label.attr, |
322 | &dev_attr_legacy_ch1_dimm_label.attr, | 322 | &dev_attr_legacy_ch1_dimm_label.attr, |
323 | &dev_attr_legacy_ch2_dimm_label.attr, | 323 | &dev_attr_legacy_ch2_dimm_label.attr, |
324 | &dev_attr_legacy_ch3_dimm_label.attr, | 324 | &dev_attr_legacy_ch3_dimm_label.attr, |
325 | &dev_attr_legacy_ch4_dimm_label.attr, | 325 | &dev_attr_legacy_ch4_dimm_label.attr, |
326 | &dev_attr_legacy_ch5_dimm_label.attr | 326 | &dev_attr_legacy_ch5_dimm_label.attr |
327 | }; | 327 | }; |
328 | 328 | ||
329 | /* possible dynamic channel ce_count attribute files */ | 329 | /* possible dynamic channel ce_count attribute files */ |
330 | DEVICE_CHANNEL(ch0_ce_count, S_IRUGO, | 330 | DEVICE_CHANNEL(ch0_ce_count, S_IRUGO, |
331 | channel_ce_count_show, NULL, 0); | 331 | channel_ce_count_show, NULL, 0); |
332 | DEVICE_CHANNEL(ch1_ce_count, S_IRUGO, | 332 | DEVICE_CHANNEL(ch1_ce_count, S_IRUGO, |
333 | channel_ce_count_show, NULL, 1); | 333 | channel_ce_count_show, NULL, 1); |
334 | DEVICE_CHANNEL(ch2_ce_count, S_IRUGO, | 334 | DEVICE_CHANNEL(ch2_ce_count, S_IRUGO, |
335 | channel_ce_count_show, NULL, 2); | 335 | channel_ce_count_show, NULL, 2); |
336 | DEVICE_CHANNEL(ch3_ce_count, S_IRUGO, | 336 | DEVICE_CHANNEL(ch3_ce_count, S_IRUGO, |
337 | channel_ce_count_show, NULL, 3); | 337 | channel_ce_count_show, NULL, 3); |
338 | DEVICE_CHANNEL(ch4_ce_count, S_IRUGO, | 338 | DEVICE_CHANNEL(ch4_ce_count, S_IRUGO, |
339 | channel_ce_count_show, NULL, 4); | 339 | channel_ce_count_show, NULL, 4); |
340 | DEVICE_CHANNEL(ch5_ce_count, S_IRUGO, | 340 | DEVICE_CHANNEL(ch5_ce_count, S_IRUGO, |
341 | channel_ce_count_show, NULL, 5); | 341 | channel_ce_count_show, NULL, 5); |
342 | 342 | ||
343 | /* Total possible dynamic ce_count attribute file table */ | 343 | /* Total possible dynamic ce_count attribute file table */ |
344 | static struct device_attribute *dynamic_csrow_ce_count_attr[] = { | 344 | static struct device_attribute *dynamic_csrow_ce_count_attr[] = { |
345 | &dev_attr_legacy_ch0_ce_count.attr, | 345 | &dev_attr_legacy_ch0_ce_count.attr, |
346 | &dev_attr_legacy_ch1_ce_count.attr, | 346 | &dev_attr_legacy_ch1_ce_count.attr, |
347 | &dev_attr_legacy_ch2_ce_count.attr, | 347 | &dev_attr_legacy_ch2_ce_count.attr, |
348 | &dev_attr_legacy_ch3_ce_count.attr, | 348 | &dev_attr_legacy_ch3_ce_count.attr, |
349 | &dev_attr_legacy_ch4_ce_count.attr, | 349 | &dev_attr_legacy_ch4_ce_count.attr, |
350 | &dev_attr_legacy_ch5_ce_count.attr | 350 | &dev_attr_legacy_ch5_ce_count.attr |
351 | }; | 351 | }; |
352 | 352 | ||
353 | static inline int nr_pages_per_csrow(struct csrow_info *csrow) | 353 | static inline int nr_pages_per_csrow(struct csrow_info *csrow) |
354 | { | 354 | { |
355 | int chan, nr_pages = 0; | 355 | int chan, nr_pages = 0; |
356 | 356 | ||
357 | for (chan = 0; chan < csrow->nr_channels; chan++) | 357 | for (chan = 0; chan < csrow->nr_channels; chan++) |
358 | nr_pages += csrow->channels[chan]->dimm->nr_pages; | 358 | nr_pages += csrow->channels[chan]->dimm->nr_pages; |
359 | 359 | ||
360 | return nr_pages; | 360 | return nr_pages; |
361 | } | 361 | } |
362 | 362 | ||
363 | /* Create a CSROW object under specifed edac_mc_device */ | 363 | /* Create a CSROW object under specifed edac_mc_device */ |
364 | static int edac_create_csrow_object(struct mem_ctl_info *mci, | 364 | static int edac_create_csrow_object(struct mem_ctl_info *mci, |
365 | struct csrow_info *csrow, int index) | 365 | struct csrow_info *csrow, int index) |
366 | { | 366 | { |
367 | int err, chan; | 367 | int err, chan; |
368 | 368 | ||
369 | if (csrow->nr_channels >= EDAC_NR_CHANNELS) | 369 | if (csrow->nr_channels >= EDAC_NR_CHANNELS) |
370 | return -ENODEV; | 370 | return -ENODEV; |
371 | 371 | ||
372 | csrow->dev.type = &csrow_attr_type; | 372 | csrow->dev.type = &csrow_attr_type; |
373 | csrow->dev.bus = &mci->bus; | 373 | csrow->dev.bus = mci->bus; |
374 | device_initialize(&csrow->dev); | 374 | device_initialize(&csrow->dev); |
375 | csrow->dev.parent = &mci->dev; | 375 | csrow->dev.parent = &mci->dev; |
376 | csrow->mci = mci; | 376 | csrow->mci = mci; |
377 | dev_set_name(&csrow->dev, "csrow%d", index); | 377 | dev_set_name(&csrow->dev, "csrow%d", index); |
378 | dev_set_drvdata(&csrow->dev, csrow); | 378 | dev_set_drvdata(&csrow->dev, csrow); |
379 | 379 | ||
380 | edac_dbg(0, "creating (virtual) csrow node %s\n", | 380 | edac_dbg(0, "creating (virtual) csrow node %s\n", |
381 | dev_name(&csrow->dev)); | 381 | dev_name(&csrow->dev)); |
382 | 382 | ||
383 | err = device_add(&csrow->dev); | 383 | err = device_add(&csrow->dev); |
384 | if (err < 0) | 384 | if (err < 0) |
385 | return err; | 385 | return err; |
386 | 386 | ||
387 | for (chan = 0; chan < csrow->nr_channels; chan++) { | 387 | for (chan = 0; chan < csrow->nr_channels; chan++) { |
388 | /* Only expose populated DIMMs */ | 388 | /* Only expose populated DIMMs */ |
389 | if (!csrow->channels[chan]->dimm->nr_pages) | 389 | if (!csrow->channels[chan]->dimm->nr_pages) |
390 | continue; | 390 | continue; |
391 | err = device_create_file(&csrow->dev, | 391 | err = device_create_file(&csrow->dev, |
392 | dynamic_csrow_dimm_attr[chan]); | 392 | dynamic_csrow_dimm_attr[chan]); |
393 | if (err < 0) | 393 | if (err < 0) |
394 | goto error; | 394 | goto error; |
395 | err = device_create_file(&csrow->dev, | 395 | err = device_create_file(&csrow->dev, |
396 | dynamic_csrow_ce_count_attr[chan]); | 396 | dynamic_csrow_ce_count_attr[chan]); |
397 | if (err < 0) { | 397 | if (err < 0) { |
398 | device_remove_file(&csrow->dev, | 398 | device_remove_file(&csrow->dev, |
399 | dynamic_csrow_dimm_attr[chan]); | 399 | dynamic_csrow_dimm_attr[chan]); |
400 | goto error; | 400 | goto error; |
401 | } | 401 | } |
402 | } | 402 | } |
403 | 403 | ||
404 | return 0; | 404 | return 0; |
405 | 405 | ||
406 | error: | 406 | error: |
407 | for (--chan; chan >= 0; chan--) { | 407 | for (--chan; chan >= 0; chan--) { |
408 | device_remove_file(&csrow->dev, | 408 | device_remove_file(&csrow->dev, |
409 | dynamic_csrow_dimm_attr[chan]); | 409 | dynamic_csrow_dimm_attr[chan]); |
410 | device_remove_file(&csrow->dev, | 410 | device_remove_file(&csrow->dev, |
411 | dynamic_csrow_ce_count_attr[chan]); | 411 | dynamic_csrow_ce_count_attr[chan]); |
412 | } | 412 | } |
413 | put_device(&csrow->dev); | 413 | put_device(&csrow->dev); |
414 | 414 | ||
415 | return err; | 415 | return err; |
416 | } | 416 | } |
417 | 417 | ||
418 | /* Create a CSROW object under specifed edac_mc_device */ | 418 | /* Create a CSROW object under specifed edac_mc_device */ |
419 | static int edac_create_csrow_objects(struct mem_ctl_info *mci) | 419 | static int edac_create_csrow_objects(struct mem_ctl_info *mci) |
420 | { | 420 | { |
421 | int err, i, chan; | 421 | int err, i, chan; |
422 | struct csrow_info *csrow; | 422 | struct csrow_info *csrow; |
423 | 423 | ||
424 | for (i = 0; i < mci->nr_csrows; i++) { | 424 | for (i = 0; i < mci->nr_csrows; i++) { |
425 | csrow = mci->csrows[i]; | 425 | csrow = mci->csrows[i]; |
426 | if (!nr_pages_per_csrow(csrow)) | 426 | if (!nr_pages_per_csrow(csrow)) |
427 | continue; | 427 | continue; |
428 | err = edac_create_csrow_object(mci, mci->csrows[i], i); | 428 | err = edac_create_csrow_object(mci, mci->csrows[i], i); |
429 | if (err < 0) { | 429 | if (err < 0) { |
430 | edac_dbg(1, | 430 | edac_dbg(1, |
431 | "failure: create csrow objects for csrow %d\n", | 431 | "failure: create csrow objects for csrow %d\n", |
432 | i); | 432 | i); |
433 | goto error; | 433 | goto error; |
434 | } | 434 | } |
435 | } | 435 | } |
436 | return 0; | 436 | return 0; |
437 | 437 | ||
438 | error: | 438 | error: |
439 | for (--i; i >= 0; i--) { | 439 | for (--i; i >= 0; i--) { |
440 | csrow = mci->csrows[i]; | 440 | csrow = mci->csrows[i]; |
441 | if (!nr_pages_per_csrow(csrow)) | 441 | if (!nr_pages_per_csrow(csrow)) |
442 | continue; | 442 | continue; |
443 | for (chan = csrow->nr_channels - 1; chan >= 0; chan--) { | 443 | for (chan = csrow->nr_channels - 1; chan >= 0; chan--) { |
444 | if (!csrow->channels[chan]->dimm->nr_pages) | 444 | if (!csrow->channels[chan]->dimm->nr_pages) |
445 | continue; | 445 | continue; |
446 | device_remove_file(&csrow->dev, | 446 | device_remove_file(&csrow->dev, |
447 | dynamic_csrow_dimm_attr[chan]); | 447 | dynamic_csrow_dimm_attr[chan]); |
448 | device_remove_file(&csrow->dev, | 448 | device_remove_file(&csrow->dev, |
449 | dynamic_csrow_ce_count_attr[chan]); | 449 | dynamic_csrow_ce_count_attr[chan]); |
450 | } | 450 | } |
451 | put_device(&mci->csrows[i]->dev); | 451 | put_device(&mci->csrows[i]->dev); |
452 | } | 452 | } |
453 | 453 | ||
454 | return err; | 454 | return err; |
455 | } | 455 | } |
456 | 456 | ||
457 | static void edac_delete_csrow_objects(struct mem_ctl_info *mci) | 457 | static void edac_delete_csrow_objects(struct mem_ctl_info *mci) |
458 | { | 458 | { |
459 | int i, chan; | 459 | int i, chan; |
460 | struct csrow_info *csrow; | 460 | struct csrow_info *csrow; |
461 | 461 | ||
462 | for (i = mci->nr_csrows - 1; i >= 0; i--) { | 462 | for (i = mci->nr_csrows - 1; i >= 0; i--) { |
463 | csrow = mci->csrows[i]; | 463 | csrow = mci->csrows[i]; |
464 | if (!nr_pages_per_csrow(csrow)) | 464 | if (!nr_pages_per_csrow(csrow)) |
465 | continue; | 465 | continue; |
466 | for (chan = csrow->nr_channels - 1; chan >= 0; chan--) { | 466 | for (chan = csrow->nr_channels - 1; chan >= 0; chan--) { |
467 | if (!csrow->channels[chan]->dimm->nr_pages) | 467 | if (!csrow->channels[chan]->dimm->nr_pages) |
468 | continue; | 468 | continue; |
469 | edac_dbg(1, "Removing csrow %d channel %d sysfs nodes\n", | 469 | edac_dbg(1, "Removing csrow %d channel %d sysfs nodes\n", |
470 | i, chan); | 470 | i, chan); |
471 | device_remove_file(&csrow->dev, | 471 | device_remove_file(&csrow->dev, |
472 | dynamic_csrow_dimm_attr[chan]); | 472 | dynamic_csrow_dimm_attr[chan]); |
473 | device_remove_file(&csrow->dev, | 473 | device_remove_file(&csrow->dev, |
474 | dynamic_csrow_ce_count_attr[chan]); | 474 | dynamic_csrow_ce_count_attr[chan]); |
475 | } | 475 | } |
476 | device_unregister(&mci->csrows[i]->dev); | 476 | device_unregister(&mci->csrows[i]->dev); |
477 | } | 477 | } |
478 | } | 478 | } |
479 | #endif | 479 | #endif |
480 | 480 | ||
481 | /* | 481 | /* |
482 | * Per-dimm (or per-rank) devices | 482 | * Per-dimm (or per-rank) devices |
483 | */ | 483 | */ |
484 | 484 | ||
485 | #define to_dimm(k) container_of(k, struct dimm_info, dev) | 485 | #define to_dimm(k) container_of(k, struct dimm_info, dev) |
486 | 486 | ||
487 | /* show/store functions for DIMM Label attributes */ | 487 | /* show/store functions for DIMM Label attributes */ |
488 | static ssize_t dimmdev_location_show(struct device *dev, | 488 | static ssize_t dimmdev_location_show(struct device *dev, |
489 | struct device_attribute *mattr, char *data) | 489 | struct device_attribute *mattr, char *data) |
490 | { | 490 | { |
491 | struct dimm_info *dimm = to_dimm(dev); | 491 | struct dimm_info *dimm = to_dimm(dev); |
492 | 492 | ||
493 | return edac_dimm_info_location(dimm, data, PAGE_SIZE); | 493 | return edac_dimm_info_location(dimm, data, PAGE_SIZE); |
494 | } | 494 | } |
495 | 495 | ||
496 | static ssize_t dimmdev_label_show(struct device *dev, | 496 | static ssize_t dimmdev_label_show(struct device *dev, |
497 | struct device_attribute *mattr, char *data) | 497 | struct device_attribute *mattr, char *data) |
498 | { | 498 | { |
499 | struct dimm_info *dimm = to_dimm(dev); | 499 | struct dimm_info *dimm = to_dimm(dev); |
500 | 500 | ||
501 | /* if field has not been initialized, there is nothing to send */ | 501 | /* if field has not been initialized, there is nothing to send */ |
502 | if (!dimm->label[0]) | 502 | if (!dimm->label[0]) |
503 | return 0; | 503 | return 0; |
504 | 504 | ||
505 | return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", dimm->label); | 505 | return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", dimm->label); |
506 | } | 506 | } |
507 | 507 | ||
508 | static ssize_t dimmdev_label_store(struct device *dev, | 508 | static ssize_t dimmdev_label_store(struct device *dev, |
509 | struct device_attribute *mattr, | 509 | struct device_attribute *mattr, |
510 | const char *data, | 510 | const char *data, |
511 | size_t count) | 511 | size_t count) |
512 | { | 512 | { |
513 | struct dimm_info *dimm = to_dimm(dev); | 513 | struct dimm_info *dimm = to_dimm(dev); |
514 | 514 | ||
515 | ssize_t max_size = 0; | 515 | ssize_t max_size = 0; |
516 | 516 | ||
517 | max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); | 517 | max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); |
518 | strncpy(dimm->label, data, max_size); | 518 | strncpy(dimm->label, data, max_size); |
519 | dimm->label[max_size] = '\0'; | 519 | dimm->label[max_size] = '\0'; |
520 | 520 | ||
521 | return max_size; | 521 | return max_size; |
522 | } | 522 | } |
523 | 523 | ||
524 | static ssize_t dimmdev_size_show(struct device *dev, | 524 | static ssize_t dimmdev_size_show(struct device *dev, |
525 | struct device_attribute *mattr, char *data) | 525 | struct device_attribute *mattr, char *data) |
526 | { | 526 | { |
527 | struct dimm_info *dimm = to_dimm(dev); | 527 | struct dimm_info *dimm = to_dimm(dev); |
528 | 528 | ||
529 | return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages)); | 529 | return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages)); |
530 | } | 530 | } |
531 | 531 | ||
532 | static ssize_t dimmdev_mem_type_show(struct device *dev, | 532 | static ssize_t dimmdev_mem_type_show(struct device *dev, |
533 | struct device_attribute *mattr, char *data) | 533 | struct device_attribute *mattr, char *data) |
534 | { | 534 | { |
535 | struct dimm_info *dimm = to_dimm(dev); | 535 | struct dimm_info *dimm = to_dimm(dev); |
536 | 536 | ||
537 | return sprintf(data, "%s\n", mem_types[dimm->mtype]); | 537 | return sprintf(data, "%s\n", mem_types[dimm->mtype]); |
538 | } | 538 | } |
539 | 539 | ||
540 | static ssize_t dimmdev_dev_type_show(struct device *dev, | 540 | static ssize_t dimmdev_dev_type_show(struct device *dev, |
541 | struct device_attribute *mattr, char *data) | 541 | struct device_attribute *mattr, char *data) |
542 | { | 542 | { |
543 | struct dimm_info *dimm = to_dimm(dev); | 543 | struct dimm_info *dimm = to_dimm(dev); |
544 | 544 | ||
545 | return sprintf(data, "%s\n", dev_types[dimm->dtype]); | 545 | return sprintf(data, "%s\n", dev_types[dimm->dtype]); |
546 | } | 546 | } |
547 | 547 | ||
548 | static ssize_t dimmdev_edac_mode_show(struct device *dev, | 548 | static ssize_t dimmdev_edac_mode_show(struct device *dev, |
549 | struct device_attribute *mattr, | 549 | struct device_attribute *mattr, |
550 | char *data) | 550 | char *data) |
551 | { | 551 | { |
552 | struct dimm_info *dimm = to_dimm(dev); | 552 | struct dimm_info *dimm = to_dimm(dev); |
553 | 553 | ||
554 | return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]); | 554 | return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]); |
555 | } | 555 | } |
556 | 556 | ||
557 | /* dimm/rank attribute files */ | 557 | /* dimm/rank attribute files */ |
558 | static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR, | 558 | static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR, |
559 | dimmdev_label_show, dimmdev_label_store); | 559 | dimmdev_label_show, dimmdev_label_store); |
560 | static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL); | 560 | static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL); |
561 | static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL); | 561 | static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL); |
562 | static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL); | 562 | static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL); |
563 | static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL); | 563 | static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL); |
564 | static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL); | 564 | static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL); |
565 | 565 | ||
566 | /* attributes of the dimm<id>/rank<id> object */ | 566 | /* attributes of the dimm<id>/rank<id> object */ |
567 | static struct attribute *dimm_attrs[] = { | 567 | static struct attribute *dimm_attrs[] = { |
568 | &dev_attr_dimm_label.attr, | 568 | &dev_attr_dimm_label.attr, |
569 | &dev_attr_dimm_location.attr, | 569 | &dev_attr_dimm_location.attr, |
570 | &dev_attr_size.attr, | 570 | &dev_attr_size.attr, |
571 | &dev_attr_dimm_mem_type.attr, | 571 | &dev_attr_dimm_mem_type.attr, |
572 | &dev_attr_dimm_dev_type.attr, | 572 | &dev_attr_dimm_dev_type.attr, |
573 | &dev_attr_dimm_edac_mode.attr, | 573 | &dev_attr_dimm_edac_mode.attr, |
574 | NULL, | 574 | NULL, |
575 | }; | 575 | }; |
576 | 576 | ||
577 | static struct attribute_group dimm_attr_grp = { | 577 | static struct attribute_group dimm_attr_grp = { |
578 | .attrs = dimm_attrs, | 578 | .attrs = dimm_attrs, |
579 | }; | 579 | }; |
580 | 580 | ||
581 | static const struct attribute_group *dimm_attr_groups[] = { | 581 | static const struct attribute_group *dimm_attr_groups[] = { |
582 | &dimm_attr_grp, | 582 | &dimm_attr_grp, |
583 | NULL | 583 | NULL |
584 | }; | 584 | }; |
585 | 585 | ||
586 | static void dimm_attr_release(struct device *dev) | 586 | static void dimm_attr_release(struct device *dev) |
587 | { | 587 | { |
588 | struct dimm_info *dimm = container_of(dev, struct dimm_info, dev); | 588 | struct dimm_info *dimm = container_of(dev, struct dimm_info, dev); |
589 | 589 | ||
590 | edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev)); | 590 | edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev)); |
591 | kfree(dimm); | 591 | kfree(dimm); |
592 | } | 592 | } |
593 | 593 | ||
594 | static struct device_type dimm_attr_type = { | 594 | static struct device_type dimm_attr_type = { |
595 | .groups = dimm_attr_groups, | 595 | .groups = dimm_attr_groups, |
596 | .release = dimm_attr_release, | 596 | .release = dimm_attr_release, |
597 | }; | 597 | }; |
598 | 598 | ||
599 | /* Create a DIMM object under specifed memory controller device */ | 599 | /* Create a DIMM object under specifed memory controller device */ |
600 | static int edac_create_dimm_object(struct mem_ctl_info *mci, | 600 | static int edac_create_dimm_object(struct mem_ctl_info *mci, |
601 | struct dimm_info *dimm, | 601 | struct dimm_info *dimm, |
602 | int index) | 602 | int index) |
603 | { | 603 | { |
604 | int err; | 604 | int err; |
605 | dimm->mci = mci; | 605 | dimm->mci = mci; |
606 | 606 | ||
607 | dimm->dev.type = &dimm_attr_type; | 607 | dimm->dev.type = &dimm_attr_type; |
608 | dimm->dev.bus = &mci->bus; | 608 | dimm->dev.bus = mci->bus; |
609 | device_initialize(&dimm->dev); | 609 | device_initialize(&dimm->dev); |
610 | 610 | ||
611 | dimm->dev.parent = &mci->dev; | 611 | dimm->dev.parent = &mci->dev; |
612 | if (mci->csbased) | 612 | if (mci->csbased) |
613 | dev_set_name(&dimm->dev, "rank%d", index); | 613 | dev_set_name(&dimm->dev, "rank%d", index); |
614 | else | 614 | else |
615 | dev_set_name(&dimm->dev, "dimm%d", index); | 615 | dev_set_name(&dimm->dev, "dimm%d", index); |
616 | dev_set_drvdata(&dimm->dev, dimm); | 616 | dev_set_drvdata(&dimm->dev, dimm); |
617 | pm_runtime_forbid(&mci->dev); | 617 | pm_runtime_forbid(&mci->dev); |
618 | 618 | ||
619 | err = device_add(&dimm->dev); | 619 | err = device_add(&dimm->dev); |
620 | 620 | ||
621 | edac_dbg(0, "creating rank/dimm device %s\n", dev_name(&dimm->dev)); | 621 | edac_dbg(0, "creating rank/dimm device %s\n", dev_name(&dimm->dev)); |
622 | 622 | ||
623 | return err; | 623 | return err; |
624 | } | 624 | } |
625 | 625 | ||
626 | /* | 626 | /* |
627 | * Memory controller device | 627 | * Memory controller device |
628 | */ | 628 | */ |
629 | 629 | ||
630 | #define to_mci(k) container_of(k, struct mem_ctl_info, dev) | 630 | #define to_mci(k) container_of(k, struct mem_ctl_info, dev) |
631 | 631 | ||
632 | static ssize_t mci_reset_counters_store(struct device *dev, | 632 | static ssize_t mci_reset_counters_store(struct device *dev, |
633 | struct device_attribute *mattr, | 633 | struct device_attribute *mattr, |
634 | const char *data, size_t count) | 634 | const char *data, size_t count) |
635 | { | 635 | { |
636 | struct mem_ctl_info *mci = to_mci(dev); | 636 | struct mem_ctl_info *mci = to_mci(dev); |
637 | int cnt, row, chan, i; | 637 | int cnt, row, chan, i; |
638 | mci->ue_mc = 0; | 638 | mci->ue_mc = 0; |
639 | mci->ce_mc = 0; | 639 | mci->ce_mc = 0; |
640 | mci->ue_noinfo_count = 0; | 640 | mci->ue_noinfo_count = 0; |
641 | mci->ce_noinfo_count = 0; | 641 | mci->ce_noinfo_count = 0; |
642 | 642 | ||
643 | for (row = 0; row < mci->nr_csrows; row++) { | 643 | for (row = 0; row < mci->nr_csrows; row++) { |
644 | struct csrow_info *ri = mci->csrows[row]; | 644 | struct csrow_info *ri = mci->csrows[row]; |
645 | 645 | ||
646 | ri->ue_count = 0; | 646 | ri->ue_count = 0; |
647 | ri->ce_count = 0; | 647 | ri->ce_count = 0; |
648 | 648 | ||
649 | for (chan = 0; chan < ri->nr_channels; chan++) | 649 | for (chan = 0; chan < ri->nr_channels; chan++) |
650 | ri->channels[chan]->ce_count = 0; | 650 | ri->channels[chan]->ce_count = 0; |
651 | } | 651 | } |
652 | 652 | ||
653 | cnt = 1; | 653 | cnt = 1; |
654 | for (i = 0; i < mci->n_layers; i++) { | 654 | for (i = 0; i < mci->n_layers; i++) { |
655 | cnt *= mci->layers[i].size; | 655 | cnt *= mci->layers[i].size; |
656 | memset(mci->ce_per_layer[i], 0, cnt * sizeof(u32)); | 656 | memset(mci->ce_per_layer[i], 0, cnt * sizeof(u32)); |
657 | memset(mci->ue_per_layer[i], 0, cnt * sizeof(u32)); | 657 | memset(mci->ue_per_layer[i], 0, cnt * sizeof(u32)); |
658 | } | 658 | } |
659 | 659 | ||
660 | mci->start_time = jiffies; | 660 | mci->start_time = jiffies; |
661 | return count; | 661 | return count; |
662 | } | 662 | } |
663 | 663 | ||
664 | /* Memory scrubbing interface: | 664 | /* Memory scrubbing interface: |
665 | * | 665 | * |
666 | * A MC driver can limit the scrubbing bandwidth based on the CPU type. | 666 | * A MC driver can limit the scrubbing bandwidth based on the CPU type. |
667 | * Therefore, ->set_sdram_scrub_rate should be made to return the actual | 667 | * Therefore, ->set_sdram_scrub_rate should be made to return the actual |
668 | * bandwidth that is accepted or 0 when scrubbing is to be disabled. | 668 | * bandwidth that is accepted or 0 when scrubbing is to be disabled. |
669 | * | 669 | * |
670 | * Negative value still means that an error has occurred while setting | 670 | * Negative value still means that an error has occurred while setting |
671 | * the scrub rate. | 671 | * the scrub rate. |
672 | */ | 672 | */ |
673 | static ssize_t mci_sdram_scrub_rate_store(struct device *dev, | 673 | static ssize_t mci_sdram_scrub_rate_store(struct device *dev, |
674 | struct device_attribute *mattr, | 674 | struct device_attribute *mattr, |
675 | const char *data, size_t count) | 675 | const char *data, size_t count) |
676 | { | 676 | { |
677 | struct mem_ctl_info *mci = to_mci(dev); | 677 | struct mem_ctl_info *mci = to_mci(dev); |
678 | unsigned long bandwidth = 0; | 678 | unsigned long bandwidth = 0; |
679 | int new_bw = 0; | 679 | int new_bw = 0; |
680 | 680 | ||
681 | if (kstrtoul(data, 10, &bandwidth) < 0) | 681 | if (kstrtoul(data, 10, &bandwidth) < 0) |
682 | return -EINVAL; | 682 | return -EINVAL; |
683 | 683 | ||
684 | new_bw = mci->set_sdram_scrub_rate(mci, bandwidth); | 684 | new_bw = mci->set_sdram_scrub_rate(mci, bandwidth); |
685 | if (new_bw < 0) { | 685 | if (new_bw < 0) { |
686 | edac_printk(KERN_WARNING, EDAC_MC, | 686 | edac_printk(KERN_WARNING, EDAC_MC, |
687 | "Error setting scrub rate to: %lu\n", bandwidth); | 687 | "Error setting scrub rate to: %lu\n", bandwidth); |
688 | return -EINVAL; | 688 | return -EINVAL; |
689 | } | 689 | } |
690 | 690 | ||
691 | return count; | 691 | return count; |
692 | } | 692 | } |
693 | 693 | ||
694 | /* | 694 | /* |
695 | * ->get_sdram_scrub_rate() return value semantics same as above. | 695 | * ->get_sdram_scrub_rate() return value semantics same as above. |
696 | */ | 696 | */ |
697 | static ssize_t mci_sdram_scrub_rate_show(struct device *dev, | 697 | static ssize_t mci_sdram_scrub_rate_show(struct device *dev, |
698 | struct device_attribute *mattr, | 698 | struct device_attribute *mattr, |
699 | char *data) | 699 | char *data) |
700 | { | 700 | { |
701 | struct mem_ctl_info *mci = to_mci(dev); | 701 | struct mem_ctl_info *mci = to_mci(dev); |
702 | int bandwidth = 0; | 702 | int bandwidth = 0; |
703 | 703 | ||
704 | bandwidth = mci->get_sdram_scrub_rate(mci); | 704 | bandwidth = mci->get_sdram_scrub_rate(mci); |
705 | if (bandwidth < 0) { | 705 | if (bandwidth < 0) { |
706 | edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n"); | 706 | edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n"); |
707 | return bandwidth; | 707 | return bandwidth; |
708 | } | 708 | } |
709 | 709 | ||
710 | return sprintf(data, "%d\n", bandwidth); | 710 | return sprintf(data, "%d\n", bandwidth); |
711 | } | 711 | } |
712 | 712 | ||
713 | /* default attribute files for the MCI object */ | 713 | /* default attribute files for the MCI object */ |
714 | static ssize_t mci_ue_count_show(struct device *dev, | 714 | static ssize_t mci_ue_count_show(struct device *dev, |
715 | struct device_attribute *mattr, | 715 | struct device_attribute *mattr, |
716 | char *data) | 716 | char *data) |
717 | { | 717 | { |
718 | struct mem_ctl_info *mci = to_mci(dev); | 718 | struct mem_ctl_info *mci = to_mci(dev); |
719 | 719 | ||
720 | return sprintf(data, "%d\n", mci->ue_mc); | 720 | return sprintf(data, "%d\n", mci->ue_mc); |
721 | } | 721 | } |
722 | 722 | ||
723 | static ssize_t mci_ce_count_show(struct device *dev, | 723 | static ssize_t mci_ce_count_show(struct device *dev, |
724 | struct device_attribute *mattr, | 724 | struct device_attribute *mattr, |
725 | char *data) | 725 | char *data) |
726 | { | 726 | { |
727 | struct mem_ctl_info *mci = to_mci(dev); | 727 | struct mem_ctl_info *mci = to_mci(dev); |
728 | 728 | ||
729 | return sprintf(data, "%d\n", mci->ce_mc); | 729 | return sprintf(data, "%d\n", mci->ce_mc); |
730 | } | 730 | } |
731 | 731 | ||
732 | static ssize_t mci_ce_noinfo_show(struct device *dev, | 732 | static ssize_t mci_ce_noinfo_show(struct device *dev, |
733 | struct device_attribute *mattr, | 733 | struct device_attribute *mattr, |
734 | char *data) | 734 | char *data) |
735 | { | 735 | { |
736 | struct mem_ctl_info *mci = to_mci(dev); | 736 | struct mem_ctl_info *mci = to_mci(dev); |
737 | 737 | ||
738 | return sprintf(data, "%d\n", mci->ce_noinfo_count); | 738 | return sprintf(data, "%d\n", mci->ce_noinfo_count); |
739 | } | 739 | } |
740 | 740 | ||
741 | static ssize_t mci_ue_noinfo_show(struct device *dev, | 741 | static ssize_t mci_ue_noinfo_show(struct device *dev, |
742 | struct device_attribute *mattr, | 742 | struct device_attribute *mattr, |
743 | char *data) | 743 | char *data) |
744 | { | 744 | { |
745 | struct mem_ctl_info *mci = to_mci(dev); | 745 | struct mem_ctl_info *mci = to_mci(dev); |
746 | 746 | ||
747 | return sprintf(data, "%d\n", mci->ue_noinfo_count); | 747 | return sprintf(data, "%d\n", mci->ue_noinfo_count); |
748 | } | 748 | } |
749 | 749 | ||
750 | static ssize_t mci_seconds_show(struct device *dev, | 750 | static ssize_t mci_seconds_show(struct device *dev, |
751 | struct device_attribute *mattr, | 751 | struct device_attribute *mattr, |
752 | char *data) | 752 | char *data) |
753 | { | 753 | { |
754 | struct mem_ctl_info *mci = to_mci(dev); | 754 | struct mem_ctl_info *mci = to_mci(dev); |
755 | 755 | ||
756 | return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ); | 756 | return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ); |
757 | } | 757 | } |
758 | 758 | ||
759 | static ssize_t mci_ctl_name_show(struct device *dev, | 759 | static ssize_t mci_ctl_name_show(struct device *dev, |
760 | struct device_attribute *mattr, | 760 | struct device_attribute *mattr, |
761 | char *data) | 761 | char *data) |
762 | { | 762 | { |
763 | struct mem_ctl_info *mci = to_mci(dev); | 763 | struct mem_ctl_info *mci = to_mci(dev); |
764 | 764 | ||
765 | return sprintf(data, "%s\n", mci->ctl_name); | 765 | return sprintf(data, "%s\n", mci->ctl_name); |
766 | } | 766 | } |
767 | 767 | ||
768 | static ssize_t mci_size_mb_show(struct device *dev, | 768 | static ssize_t mci_size_mb_show(struct device *dev, |
769 | struct device_attribute *mattr, | 769 | struct device_attribute *mattr, |
770 | char *data) | 770 | char *data) |
771 | { | 771 | { |
772 | struct mem_ctl_info *mci = to_mci(dev); | 772 | struct mem_ctl_info *mci = to_mci(dev); |
773 | int total_pages = 0, csrow_idx, j; | 773 | int total_pages = 0, csrow_idx, j; |
774 | 774 | ||
775 | for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) { | 775 | for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) { |
776 | struct csrow_info *csrow = mci->csrows[csrow_idx]; | 776 | struct csrow_info *csrow = mci->csrows[csrow_idx]; |
777 | 777 | ||
778 | for (j = 0; j < csrow->nr_channels; j++) { | 778 | for (j = 0; j < csrow->nr_channels; j++) { |
779 | struct dimm_info *dimm = csrow->channels[j]->dimm; | 779 | struct dimm_info *dimm = csrow->channels[j]->dimm; |
780 | 780 | ||
781 | total_pages += dimm->nr_pages; | 781 | total_pages += dimm->nr_pages; |
782 | } | 782 | } |
783 | } | 783 | } |
784 | 784 | ||
785 | return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages)); | 785 | return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages)); |
786 | } | 786 | } |
787 | 787 | ||
788 | static ssize_t mci_max_location_show(struct device *dev, | 788 | static ssize_t mci_max_location_show(struct device *dev, |
789 | struct device_attribute *mattr, | 789 | struct device_attribute *mattr, |
790 | char *data) | 790 | char *data) |
791 | { | 791 | { |
792 | struct mem_ctl_info *mci = to_mci(dev); | 792 | struct mem_ctl_info *mci = to_mci(dev); |
793 | int i; | 793 | int i; |
794 | char *p = data; | 794 | char *p = data; |
795 | 795 | ||
796 | for (i = 0; i < mci->n_layers; i++) { | 796 | for (i = 0; i < mci->n_layers; i++) { |
797 | p += sprintf(p, "%s %d ", | 797 | p += sprintf(p, "%s %d ", |
798 | edac_layer_name[mci->layers[i].type], | 798 | edac_layer_name[mci->layers[i].type], |
799 | mci->layers[i].size - 1); | 799 | mci->layers[i].size - 1); |
800 | } | 800 | } |
801 | 801 | ||
802 | return p - data; | 802 | return p - data; |
803 | } | 803 | } |
804 | 804 | ||
805 | #ifdef CONFIG_EDAC_DEBUG | 805 | #ifdef CONFIG_EDAC_DEBUG |
806 | static ssize_t edac_fake_inject_write(struct file *file, | 806 | static ssize_t edac_fake_inject_write(struct file *file, |
807 | const char __user *data, | 807 | const char __user *data, |
808 | size_t count, loff_t *ppos) | 808 | size_t count, loff_t *ppos) |
809 | { | 809 | { |
810 | struct device *dev = file->private_data; | 810 | struct device *dev = file->private_data; |
811 | struct mem_ctl_info *mci = to_mci(dev); | 811 | struct mem_ctl_info *mci = to_mci(dev); |
812 | static enum hw_event_mc_err_type type; | 812 | static enum hw_event_mc_err_type type; |
813 | u16 errcount = mci->fake_inject_count; | 813 | u16 errcount = mci->fake_inject_count; |
814 | 814 | ||
815 | if (!errcount) | 815 | if (!errcount) |
816 | errcount = 1; | 816 | errcount = 1; |
817 | 817 | ||
818 | type = mci->fake_inject_ue ? HW_EVENT_ERR_UNCORRECTED | 818 | type = mci->fake_inject_ue ? HW_EVENT_ERR_UNCORRECTED |
819 | : HW_EVENT_ERR_CORRECTED; | 819 | : HW_EVENT_ERR_CORRECTED; |
820 | 820 | ||
821 | printk(KERN_DEBUG | 821 | printk(KERN_DEBUG |
822 | "Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n", | 822 | "Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n", |
823 | errcount, | 823 | errcount, |
824 | (type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE", | 824 | (type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE", |
825 | errcount > 1 ? "s" : "", | 825 | errcount > 1 ? "s" : "", |
826 | mci->fake_inject_layer[0], | 826 | mci->fake_inject_layer[0], |
827 | mci->fake_inject_layer[1], | 827 | mci->fake_inject_layer[1], |
828 | mci->fake_inject_layer[2] | 828 | mci->fake_inject_layer[2] |
829 | ); | 829 | ); |
830 | edac_mc_handle_error(type, mci, errcount, 0, 0, 0, | 830 | edac_mc_handle_error(type, mci, errcount, 0, 0, 0, |
831 | mci->fake_inject_layer[0], | 831 | mci->fake_inject_layer[0], |
832 | mci->fake_inject_layer[1], | 832 | mci->fake_inject_layer[1], |
833 | mci->fake_inject_layer[2], | 833 | mci->fake_inject_layer[2], |
834 | "FAKE ERROR", "for EDAC testing only"); | 834 | "FAKE ERROR", "for EDAC testing only"); |
835 | 835 | ||
836 | return count; | 836 | return count; |
837 | } | 837 | } |
838 | 838 | ||
839 | static const struct file_operations debug_fake_inject_fops = { | 839 | static const struct file_operations debug_fake_inject_fops = { |
840 | .open = simple_open, | 840 | .open = simple_open, |
841 | .write = edac_fake_inject_write, | 841 | .write = edac_fake_inject_write, |
842 | .llseek = generic_file_llseek, | 842 | .llseek = generic_file_llseek, |
843 | }; | 843 | }; |
844 | #endif | 844 | #endif |
845 | 845 | ||
846 | /* default Control file */ | 846 | /* default Control file */ |
847 | DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store); | 847 | DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store); |
848 | 848 | ||
849 | /* default Attribute files */ | 849 | /* default Attribute files */ |
850 | DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL); | 850 | DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL); |
851 | DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL); | 851 | DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL); |
852 | DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL); | 852 | DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL); |
853 | DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL); | 853 | DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL); |
854 | DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL); | 854 | DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL); |
855 | DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL); | 855 | DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL); |
856 | DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL); | 856 | DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL); |
857 | DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL); | 857 | DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL); |
858 | 858 | ||
859 | /* memory scrubber attribute file */ | 859 | /* memory scrubber attribute file */ |
860 | DEVICE_ATTR(sdram_scrub_rate, 0, NULL, NULL); | 860 | DEVICE_ATTR(sdram_scrub_rate, 0, NULL, NULL); |
861 | 861 | ||
862 | static struct attribute *mci_attrs[] = { | 862 | static struct attribute *mci_attrs[] = { |
863 | &dev_attr_reset_counters.attr, | 863 | &dev_attr_reset_counters.attr, |
864 | &dev_attr_mc_name.attr, | 864 | &dev_attr_mc_name.attr, |
865 | &dev_attr_size_mb.attr, | 865 | &dev_attr_size_mb.attr, |
866 | &dev_attr_seconds_since_reset.attr, | 866 | &dev_attr_seconds_since_reset.attr, |
867 | &dev_attr_ue_noinfo_count.attr, | 867 | &dev_attr_ue_noinfo_count.attr, |
868 | &dev_attr_ce_noinfo_count.attr, | 868 | &dev_attr_ce_noinfo_count.attr, |
869 | &dev_attr_ue_count.attr, | 869 | &dev_attr_ue_count.attr, |
870 | &dev_attr_ce_count.attr, | 870 | &dev_attr_ce_count.attr, |
871 | &dev_attr_max_location.attr, | 871 | &dev_attr_max_location.attr, |
872 | NULL | 872 | NULL |
873 | }; | 873 | }; |
874 | 874 | ||
875 | static struct attribute_group mci_attr_grp = { | 875 | static struct attribute_group mci_attr_grp = { |
876 | .attrs = mci_attrs, | 876 | .attrs = mci_attrs, |
877 | }; | 877 | }; |
878 | 878 | ||
879 | static const struct attribute_group *mci_attr_groups[] = { | 879 | static const struct attribute_group *mci_attr_groups[] = { |
880 | &mci_attr_grp, | 880 | &mci_attr_grp, |
881 | NULL | 881 | NULL |
882 | }; | 882 | }; |
883 | 883 | ||
884 | static void mci_attr_release(struct device *dev) | 884 | static void mci_attr_release(struct device *dev) |
885 | { | 885 | { |
886 | struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev); | 886 | struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev); |
887 | 887 | ||
888 | edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev)); | 888 | edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev)); |
889 | kfree(mci); | 889 | kfree(mci); |
890 | } | 890 | } |
891 | 891 | ||
892 | static struct device_type mci_attr_type = { | 892 | static struct device_type mci_attr_type = { |
893 | .groups = mci_attr_groups, | 893 | .groups = mci_attr_groups, |
894 | .release = mci_attr_release, | 894 | .release = mci_attr_release, |
895 | }; | 895 | }; |
896 | 896 | ||
897 | #ifdef CONFIG_EDAC_DEBUG | 897 | #ifdef CONFIG_EDAC_DEBUG |
898 | static struct dentry *edac_debugfs; | 898 | static struct dentry *edac_debugfs; |
899 | 899 | ||
900 | int __init edac_debugfs_init(void) | 900 | int __init edac_debugfs_init(void) |
901 | { | 901 | { |
902 | edac_debugfs = debugfs_create_dir("edac", NULL); | 902 | edac_debugfs = debugfs_create_dir("edac", NULL); |
903 | if (IS_ERR(edac_debugfs)) { | 903 | if (IS_ERR(edac_debugfs)) { |
904 | edac_debugfs = NULL; | 904 | edac_debugfs = NULL; |
905 | return -ENOMEM; | 905 | return -ENOMEM; |
906 | } | 906 | } |
907 | return 0; | 907 | return 0; |
908 | } | 908 | } |
909 | 909 | ||
910 | void __exit edac_debugfs_exit(void) | 910 | void __exit edac_debugfs_exit(void) |
911 | { | 911 | { |
912 | debugfs_remove(edac_debugfs); | 912 | debugfs_remove(edac_debugfs); |
913 | } | 913 | } |
914 | 914 | ||
915 | int edac_create_debug_nodes(struct mem_ctl_info *mci) | 915 | int edac_create_debug_nodes(struct mem_ctl_info *mci) |
916 | { | 916 | { |
917 | struct dentry *d, *parent; | 917 | struct dentry *d, *parent; |
918 | char name[80]; | 918 | char name[80]; |
919 | int i; | 919 | int i; |
920 | 920 | ||
921 | if (!edac_debugfs) | 921 | if (!edac_debugfs) |
922 | return -ENODEV; | 922 | return -ENODEV; |
923 | 923 | ||
924 | d = debugfs_create_dir(mci->dev.kobj.name, edac_debugfs); | 924 | d = debugfs_create_dir(mci->dev.kobj.name, edac_debugfs); |
925 | if (!d) | 925 | if (!d) |
926 | return -ENOMEM; | 926 | return -ENOMEM; |
927 | parent = d; | 927 | parent = d; |
928 | 928 | ||
929 | for (i = 0; i < mci->n_layers; i++) { | 929 | for (i = 0; i < mci->n_layers; i++) { |
930 | sprintf(name, "fake_inject_%s", | 930 | sprintf(name, "fake_inject_%s", |
931 | edac_layer_name[mci->layers[i].type]); | 931 | edac_layer_name[mci->layers[i].type]); |
932 | d = debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent, | 932 | d = debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent, |
933 | &mci->fake_inject_layer[i]); | 933 | &mci->fake_inject_layer[i]); |
934 | if (!d) | 934 | if (!d) |
935 | goto nomem; | 935 | goto nomem; |
936 | } | 936 | } |
937 | 937 | ||
938 | d = debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent, | 938 | d = debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent, |
939 | &mci->fake_inject_ue); | 939 | &mci->fake_inject_ue); |
940 | if (!d) | 940 | if (!d) |
941 | goto nomem; | 941 | goto nomem; |
942 | 942 | ||
943 | d = debugfs_create_u16("fake_inject_count", S_IRUGO | S_IWUSR, parent, | 943 | d = debugfs_create_u16("fake_inject_count", S_IRUGO | S_IWUSR, parent, |
944 | &mci->fake_inject_count); | 944 | &mci->fake_inject_count); |
945 | if (!d) | 945 | if (!d) |
946 | goto nomem; | 946 | goto nomem; |
947 | 947 | ||
948 | d = debugfs_create_file("fake_inject", S_IWUSR, parent, | 948 | d = debugfs_create_file("fake_inject", S_IWUSR, parent, |
949 | &mci->dev, | 949 | &mci->dev, |
950 | &debug_fake_inject_fops); | 950 | &debug_fake_inject_fops); |
951 | if (!d) | 951 | if (!d) |
952 | goto nomem; | 952 | goto nomem; |
953 | 953 | ||
954 | mci->debugfs = parent; | 954 | mci->debugfs = parent; |
955 | return 0; | 955 | return 0; |
956 | nomem: | 956 | nomem: |
957 | debugfs_remove(mci->debugfs); | 957 | debugfs_remove(mci->debugfs); |
958 | return -ENOMEM; | 958 | return -ENOMEM; |
959 | } | 959 | } |
960 | #endif | 960 | #endif |
961 | 961 | ||
962 | /* | 962 | /* |
963 | * Create a new Memory Controller kobject instance, | 963 | * Create a new Memory Controller kobject instance, |
964 | * mc<id> under the 'mc' directory | 964 | * mc<id> under the 'mc' directory |
965 | * | 965 | * |
966 | * Return: | 966 | * Return: |
967 | * 0 Success | 967 | * 0 Success |
968 | * !0 Failure | 968 | * !0 Failure |
969 | */ | 969 | */ |
970 | int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) | 970 | int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) |
971 | { | 971 | { |
972 | int i, err; | 972 | int i, err; |
973 | 973 | ||
974 | /* | 974 | /* |
975 | * The memory controller needs its own bus, in order to avoid | 975 | * The memory controller needs its own bus, in order to avoid |
976 | * namespace conflicts at /sys/bus/edac. | 976 | * namespace conflicts at /sys/bus/edac. |
977 | */ | 977 | */ |
978 | mci->bus.name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); | 978 | mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); |
979 | if (!mci->bus.name) | 979 | if (!mci->bus->name) |
980 | return -ENOMEM; | 980 | return -ENOMEM; |
981 | edac_dbg(0, "creating bus %s\n", mci->bus.name); | 981 | |
982 | err = bus_register(&mci->bus); | 982 | edac_dbg(0, "creating bus %s\n", mci->bus->name); |
983 | |||
984 | err = bus_register(mci->bus); | ||
983 | if (err < 0) | 985 | if (err < 0) |
984 | return err; | 986 | return err; |
985 | 987 | ||
986 | /* get the /sys/devices/system/edac subsys reference */ | 988 | /* get the /sys/devices/system/edac subsys reference */ |
987 | mci->dev.type = &mci_attr_type; | 989 | mci->dev.type = &mci_attr_type; |
988 | device_initialize(&mci->dev); | 990 | device_initialize(&mci->dev); |
989 | 991 | ||
990 | mci->dev.parent = mci_pdev; | 992 | mci->dev.parent = mci_pdev; |
991 | mci->dev.bus = &mci->bus; | 993 | mci->dev.bus = mci->bus; |
992 | dev_set_name(&mci->dev, "mc%d", mci->mc_idx); | 994 | dev_set_name(&mci->dev, "mc%d", mci->mc_idx); |
993 | dev_set_drvdata(&mci->dev, mci); | 995 | dev_set_drvdata(&mci->dev, mci); |
994 | pm_runtime_forbid(&mci->dev); | 996 | pm_runtime_forbid(&mci->dev); |
995 | 997 | ||
996 | edac_dbg(0, "creating device %s\n", dev_name(&mci->dev)); | 998 | edac_dbg(0, "creating device %s\n", dev_name(&mci->dev)); |
997 | err = device_add(&mci->dev); | 999 | err = device_add(&mci->dev); |
998 | if (err < 0) { | 1000 | if (err < 0) { |
999 | edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev)); | 1001 | edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev)); |
1000 | bus_unregister(&mci->bus); | 1002 | bus_unregister(mci->bus); |
1001 | kfree(mci->bus.name); | 1003 | kfree(mci->bus->name); |
1002 | return err; | 1004 | return err; |
1003 | } | 1005 | } |
1004 | 1006 | ||
1005 | if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) { | 1007 | if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) { |
1006 | if (mci->get_sdram_scrub_rate) { | 1008 | if (mci->get_sdram_scrub_rate) { |
1007 | dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO; | 1009 | dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO; |
1008 | dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show; | 1010 | dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show; |
1009 | } | 1011 | } |
1010 | if (mci->set_sdram_scrub_rate) { | 1012 | if (mci->set_sdram_scrub_rate) { |
1011 | dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR; | 1013 | dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR; |
1012 | dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store; | 1014 | dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store; |
1013 | } | 1015 | } |
1014 | err = device_create_file(&mci->dev, | 1016 | err = device_create_file(&mci->dev, |
1015 | &dev_attr_sdram_scrub_rate); | 1017 | &dev_attr_sdram_scrub_rate); |
1016 | if (err) { | 1018 | if (err) { |
1017 | edac_dbg(1, "failure: create sdram_scrub_rate\n"); | 1019 | edac_dbg(1, "failure: create sdram_scrub_rate\n"); |
1018 | goto fail2; | 1020 | goto fail2; |
1019 | } | 1021 | } |
1020 | } | 1022 | } |
1021 | /* | 1023 | /* |
1022 | * Create the dimm/rank devices | 1024 | * Create the dimm/rank devices |
1023 | */ | 1025 | */ |
1024 | for (i = 0; i < mci->tot_dimms; i++) { | 1026 | for (i = 0; i < mci->tot_dimms; i++) { |
1025 | struct dimm_info *dimm = mci->dimms[i]; | 1027 | struct dimm_info *dimm = mci->dimms[i]; |
1026 | /* Only expose populated DIMMs */ | 1028 | /* Only expose populated DIMMs */ |
1027 | if (dimm->nr_pages == 0) | 1029 | if (dimm->nr_pages == 0) |
1028 | continue; | 1030 | continue; |
1029 | #ifdef CONFIG_EDAC_DEBUG | 1031 | #ifdef CONFIG_EDAC_DEBUG |
1030 | edac_dbg(1, "creating dimm%d, located at ", i); | 1032 | edac_dbg(1, "creating dimm%d, located at ", i); |
1031 | if (edac_debug_level >= 1) { | 1033 | if (edac_debug_level >= 1) { |
1032 | int lay; | 1034 | int lay; |
1033 | for (lay = 0; lay < mci->n_layers; lay++) | 1035 | for (lay = 0; lay < mci->n_layers; lay++) |
1034 | printk(KERN_CONT "%s %d ", | 1036 | printk(KERN_CONT "%s %d ", |
1035 | edac_layer_name[mci->layers[lay].type], | 1037 | edac_layer_name[mci->layers[lay].type], |
1036 | dimm->location[lay]); | 1038 | dimm->location[lay]); |
1037 | printk(KERN_CONT "\n"); | 1039 | printk(KERN_CONT "\n"); |
1038 | } | 1040 | } |
1039 | #endif | 1041 | #endif |
1040 | err = edac_create_dimm_object(mci, dimm, i); | 1042 | err = edac_create_dimm_object(mci, dimm, i); |
1041 | if (err) { | 1043 | if (err) { |
1042 | edac_dbg(1, "failure: create dimm %d obj\n", i); | 1044 | edac_dbg(1, "failure: create dimm %d obj\n", i); |
1043 | goto fail; | 1045 | goto fail; |
1044 | } | 1046 | } |
1045 | } | 1047 | } |
1046 | 1048 | ||
1047 | #ifdef CONFIG_EDAC_LEGACY_SYSFS | 1049 | #ifdef CONFIG_EDAC_LEGACY_SYSFS |
1048 | err = edac_create_csrow_objects(mci); | 1050 | err = edac_create_csrow_objects(mci); |
1049 | if (err < 0) | 1051 | if (err < 0) |
1050 | goto fail; | 1052 | goto fail; |
1051 | #endif | 1053 | #endif |
1052 | 1054 | ||
1053 | #ifdef CONFIG_EDAC_DEBUG | 1055 | #ifdef CONFIG_EDAC_DEBUG |
1054 | edac_create_debug_nodes(mci); | 1056 | edac_create_debug_nodes(mci); |
1055 | #endif | 1057 | #endif |
1056 | return 0; | 1058 | return 0; |
1057 | 1059 | ||
1058 | fail: | 1060 | fail: |
1059 | for (i--; i >= 0; i--) { | 1061 | for (i--; i >= 0; i--) { |
1060 | struct dimm_info *dimm = mci->dimms[i]; | 1062 | struct dimm_info *dimm = mci->dimms[i]; |
1061 | if (dimm->nr_pages == 0) | 1063 | if (dimm->nr_pages == 0) |
1062 | continue; | 1064 | continue; |
1063 | device_unregister(&dimm->dev); | 1065 | device_unregister(&dimm->dev); |
1064 | } | 1066 | } |
1065 | fail2: | 1067 | fail2: |
1066 | device_unregister(&mci->dev); | 1068 | device_unregister(&mci->dev); |
1067 | bus_unregister(&mci->bus); | 1069 | bus_unregister(mci->bus); |
1068 | kfree(mci->bus.name); | 1070 | kfree(mci->bus->name); |
1069 | return err; | 1071 | return err; |
1070 | } | 1072 | } |
1071 | 1073 | ||
1072 | /* | 1074 | /* |
1073 | * remove a Memory Controller instance | 1075 | * remove a Memory Controller instance |
1074 | */ | 1076 | */ |
1075 | void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) | 1077 | void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) |
1076 | { | 1078 | { |
1077 | int i; | 1079 | int i; |
1078 | 1080 | ||
1079 | edac_dbg(0, "\n"); | 1081 | edac_dbg(0, "\n"); |
1080 | 1082 | ||
1081 | #ifdef CONFIG_EDAC_DEBUG | 1083 | #ifdef CONFIG_EDAC_DEBUG |
1082 | debugfs_remove(mci->debugfs); | 1084 | debugfs_remove(mci->debugfs); |
1083 | #endif | 1085 | #endif |
1084 | #ifdef CONFIG_EDAC_LEGACY_SYSFS | 1086 | #ifdef CONFIG_EDAC_LEGACY_SYSFS |
1085 | edac_delete_csrow_objects(mci); | 1087 | edac_delete_csrow_objects(mci); |
1086 | #endif | 1088 | #endif |
1087 | 1089 | ||
1088 | for (i = 0; i < mci->tot_dimms; i++) { | 1090 | for (i = 0; i < mci->tot_dimms; i++) { |
1089 | struct dimm_info *dimm = mci->dimms[i]; | 1091 | struct dimm_info *dimm = mci->dimms[i]; |
1090 | if (dimm->nr_pages == 0) | 1092 | if (dimm->nr_pages == 0) |
1091 | continue; | 1093 | continue; |
1092 | edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev)); | 1094 | edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev)); |
1093 | device_unregister(&dimm->dev); | 1095 | device_unregister(&dimm->dev); |
1094 | } | 1096 | } |
1095 | } | 1097 | } |
1096 | 1098 | ||
1097 | void edac_unregister_sysfs(struct mem_ctl_info *mci) | 1099 | void edac_unregister_sysfs(struct mem_ctl_info *mci) |
1098 | { | 1100 | { |
1099 | edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev)); | 1101 | edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev)); |
1100 | device_unregister(&mci->dev); | 1102 | device_unregister(&mci->dev); |
1101 | bus_unregister(&mci->bus); | 1103 | bus_unregister(mci->bus); |
1102 | kfree(mci->bus.name); | 1104 | kfree(mci->bus->name); |
1103 | } | 1105 | } |
1104 | 1106 | ||
1105 | static void mc_attr_release(struct device *dev) | 1107 | static void mc_attr_release(struct device *dev) |
1106 | { | 1108 | { |
1107 | /* | 1109 | /* |
1108 | * There's no container structure here, as this is just the mci | 1110 | * There's no container structure here, as this is just the mci |
1109 | * parent device, used to create the /sys/devices/mc sysfs node. | 1111 | * parent device, used to create the /sys/devices/mc sysfs node. |
1110 | * So, there are no attributes on it. | 1112 | * So, there are no attributes on it. |
1111 | */ | 1113 | */ |
1112 | edac_dbg(1, "Releasing device %s\n", dev_name(dev)); | 1114 | edac_dbg(1, "Releasing device %s\n", dev_name(dev)); |
1113 | kfree(dev); | 1115 | kfree(dev); |
1114 | } | 1116 | } |
1115 | 1117 | ||
1116 | static struct device_type mc_attr_type = { | 1118 | static struct device_type mc_attr_type = { |
1117 | .release = mc_attr_release, | 1119 | .release = mc_attr_release, |
1118 | }; | 1120 | }; |
1119 | /* | 1121 | /* |
1120 | * Init/exit code for the module. Basically, creates/removes /sys/class/rc | 1122 | * Init/exit code for the module. Basically, creates/removes /sys/class/rc |
1121 | */ | 1123 | */ |
1122 | int __init edac_mc_sysfs_init(void) | 1124 | int __init edac_mc_sysfs_init(void) |
1123 | { | 1125 | { |
1124 | struct bus_type *edac_subsys; | 1126 | struct bus_type *edac_subsys; |
1125 | int err; | 1127 | int err; |
1126 | 1128 | ||
1127 | /* get the /sys/devices/system/edac subsys reference */ | 1129 | /* get the /sys/devices/system/edac subsys reference */ |
1128 | edac_subsys = edac_get_sysfs_subsys(); | 1130 | edac_subsys = edac_get_sysfs_subsys(); |
1129 | if (edac_subsys == NULL) { | 1131 | if (edac_subsys == NULL) { |
1130 | edac_dbg(1, "no edac_subsys\n"); | 1132 | edac_dbg(1, "no edac_subsys\n"); |
1131 | err = -EINVAL; | 1133 | err = -EINVAL; |
1132 | goto out; | 1134 | goto out; |
1133 | } | 1135 | } |
1134 | 1136 | ||
1135 | mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL); | 1137 | mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL); |
1136 | if (!mci_pdev) { | 1138 | if (!mci_pdev) { |
1137 | err = -ENOMEM; | 1139 | err = -ENOMEM; |
1138 | goto out_put_sysfs; | 1140 | goto out_put_sysfs; |
1139 | } | 1141 | } |
1140 | 1142 | ||
1141 | mci_pdev->bus = edac_subsys; | 1143 | mci_pdev->bus = edac_subsys; |
1142 | mci_pdev->type = &mc_attr_type; | 1144 | mci_pdev->type = &mc_attr_type; |
1143 | device_initialize(mci_pdev); | 1145 | device_initialize(mci_pdev); |
1144 | dev_set_name(mci_pdev, "mc"); | 1146 | dev_set_name(mci_pdev, "mc"); |
1145 | 1147 | ||
1146 | err = device_add(mci_pdev); | 1148 | err = device_add(mci_pdev); |
1147 | if (err < 0) | 1149 | if (err < 0) |
1148 | goto out_dev_free; | 1150 | goto out_dev_free; |
1149 | 1151 | ||
1150 | edac_dbg(0, "device %s created\n", dev_name(mci_pdev)); | 1152 | edac_dbg(0, "device %s created\n", dev_name(mci_pdev)); |
1151 | 1153 | ||
1152 | return 0; | 1154 | return 0; |
1153 | 1155 | ||
1154 | out_dev_free: | 1156 | out_dev_free: |
1155 | kfree(mci_pdev); | 1157 | kfree(mci_pdev); |
1156 | out_put_sysfs: | 1158 | out_put_sysfs: |
1157 | edac_put_sysfs_subsys(); | 1159 | edac_put_sysfs_subsys(); |
1158 | out: | 1160 | out: |
1159 | return err; | 1161 | return err; |
1160 | } | 1162 | } |
1161 | 1163 | ||
1162 | void __exit edac_mc_sysfs_exit(void) | 1164 | void __exit edac_mc_sysfs_exit(void) |
1163 | { | 1165 | { |
1164 | device_unregister(mci_pdev); | 1166 | device_unregister(mci_pdev); |
1165 | edac_put_sysfs_subsys(); | 1167 | edac_put_sysfs_subsys(); |
1166 | } | 1168 | } |
1167 | 1169 |
drivers/edac/i5100_edac.c
1 | /* | 1 | /* |
2 | * Intel 5100 Memory Controllers kernel module | 2 | * Intel 5100 Memory Controllers kernel module |
3 | * | 3 | * |
4 | * This file may be distributed under the terms of the | 4 | * This file may be distributed under the terms of the |
5 | * GNU General Public License. | 5 | * GNU General Public License. |
6 | * | 6 | * |
7 | * This module is based on the following document: | 7 | * This module is based on the following document: |
8 | * | 8 | * |
9 | * Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet | 9 | * Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet |
10 | * http://download.intel.com/design/chipsets/datashts/318378.pdf | 10 | * http://download.intel.com/design/chipsets/datashts/318378.pdf |
11 | * | 11 | * |
12 | * The intel 5100 has two independent channels. EDAC core currently | 12 | * The intel 5100 has two independent channels. EDAC core currently |
13 | * can not reflect this configuration so instead the chip-select | 13 | * can not reflect this configuration so instead the chip-select |
14 | * rows for each respective channel are laid out one after another, | 14 | * rows for each respective channel are laid out one after another, |
15 | * the first half belonging to channel 0, the second half belonging | 15 | * the first half belonging to channel 0, the second half belonging |
16 | * to channel 1. | 16 | * to channel 1. |
17 | * | 17 | * |
18 | * This driver is for DDR2 DIMMs, and it uses chip select to select among the | 18 | * This driver is for DDR2 DIMMs, and it uses chip select to select among the |
19 | * several ranks. However, instead of showing memories as ranks, it outputs | 19 | * several ranks. However, instead of showing memories as ranks, it outputs |
20 | * them as DIMM's. An internal table creates the association between ranks | 20 | * them as DIMM's. An internal table creates the association between ranks |
21 | * and DIMM's. | 21 | * and DIMM's. |
22 | */ | 22 | */ |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
26 | #include <linux/pci_ids.h> | 26 | #include <linux/pci_ids.h> |
27 | #include <linux/edac.h> | 27 | #include <linux/edac.h> |
28 | #include <linux/delay.h> | 28 | #include <linux/delay.h> |
29 | #include <linux/mmzone.h> | 29 | #include <linux/mmzone.h> |
30 | #include <linux/debugfs.h> | 30 | #include <linux/debugfs.h> |
31 | 31 | ||
32 | #include "edac_core.h" | 32 | #include "edac_core.h" |
33 | 33 | ||
34 | /* register addresses */ | 34 | /* register addresses */ |
35 | 35 | ||
36 | /* device 16, func 1 */ | 36 | /* device 16, func 1 */ |
37 | #define I5100_MC 0x40 /* Memory Control Register */ | 37 | #define I5100_MC 0x40 /* Memory Control Register */ |
38 | #define I5100_MC_SCRBEN_MASK (1 << 7) | 38 | #define I5100_MC_SCRBEN_MASK (1 << 7) |
39 | #define I5100_MC_SCRBDONE_MASK (1 << 4) | 39 | #define I5100_MC_SCRBDONE_MASK (1 << 4) |
40 | #define I5100_MS 0x44 /* Memory Status Register */ | 40 | #define I5100_MS 0x44 /* Memory Status Register */ |
41 | #define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */ | 41 | #define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */ |
42 | #define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */ | 42 | #define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */ |
43 | #define I5100_TOLM 0x6c /* Top of Low Memory */ | 43 | #define I5100_TOLM 0x6c /* Top of Low Memory */ |
44 | #define I5100_MIR0 0x80 /* Memory Interleave Range 0 */ | 44 | #define I5100_MIR0 0x80 /* Memory Interleave Range 0 */ |
45 | #define I5100_MIR1 0x84 /* Memory Interleave Range 1 */ | 45 | #define I5100_MIR1 0x84 /* Memory Interleave Range 1 */ |
46 | #define I5100_AMIR_0 0x8c /* Adjusted Memory Interleave Range 0 */ | 46 | #define I5100_AMIR_0 0x8c /* Adjusted Memory Interleave Range 0 */ |
47 | #define I5100_AMIR_1 0x90 /* Adjusted Memory Interleave Range 1 */ | 47 | #define I5100_AMIR_1 0x90 /* Adjusted Memory Interleave Range 1 */ |
48 | #define I5100_FERR_NF_MEM 0xa0 /* MC First Non Fatal Errors */ | 48 | #define I5100_FERR_NF_MEM 0xa0 /* MC First Non Fatal Errors */ |
49 | #define I5100_FERR_NF_MEM_M16ERR_MASK (1 << 16) | 49 | #define I5100_FERR_NF_MEM_M16ERR_MASK (1 << 16) |
50 | #define I5100_FERR_NF_MEM_M15ERR_MASK (1 << 15) | 50 | #define I5100_FERR_NF_MEM_M15ERR_MASK (1 << 15) |
51 | #define I5100_FERR_NF_MEM_M14ERR_MASK (1 << 14) | 51 | #define I5100_FERR_NF_MEM_M14ERR_MASK (1 << 14) |
52 | #define I5100_FERR_NF_MEM_M12ERR_MASK (1 << 12) | 52 | #define I5100_FERR_NF_MEM_M12ERR_MASK (1 << 12) |
53 | #define I5100_FERR_NF_MEM_M11ERR_MASK (1 << 11) | 53 | #define I5100_FERR_NF_MEM_M11ERR_MASK (1 << 11) |
54 | #define I5100_FERR_NF_MEM_M10ERR_MASK (1 << 10) | 54 | #define I5100_FERR_NF_MEM_M10ERR_MASK (1 << 10) |
55 | #define I5100_FERR_NF_MEM_M6ERR_MASK (1 << 6) | 55 | #define I5100_FERR_NF_MEM_M6ERR_MASK (1 << 6) |
56 | #define I5100_FERR_NF_MEM_M5ERR_MASK (1 << 5) | 56 | #define I5100_FERR_NF_MEM_M5ERR_MASK (1 << 5) |
57 | #define I5100_FERR_NF_MEM_M4ERR_MASK (1 << 4) | 57 | #define I5100_FERR_NF_MEM_M4ERR_MASK (1 << 4) |
58 | #define I5100_FERR_NF_MEM_M1ERR_MASK (1 << 1) | 58 | #define I5100_FERR_NF_MEM_M1ERR_MASK (1 << 1) |
59 | #define I5100_FERR_NF_MEM_ANY_MASK \ | 59 | #define I5100_FERR_NF_MEM_ANY_MASK \ |
60 | (I5100_FERR_NF_MEM_M16ERR_MASK | \ | 60 | (I5100_FERR_NF_MEM_M16ERR_MASK | \ |
61 | I5100_FERR_NF_MEM_M15ERR_MASK | \ | 61 | I5100_FERR_NF_MEM_M15ERR_MASK | \ |
62 | I5100_FERR_NF_MEM_M14ERR_MASK | \ | 62 | I5100_FERR_NF_MEM_M14ERR_MASK | \ |
63 | I5100_FERR_NF_MEM_M12ERR_MASK | \ | 63 | I5100_FERR_NF_MEM_M12ERR_MASK | \ |
64 | I5100_FERR_NF_MEM_M11ERR_MASK | \ | 64 | I5100_FERR_NF_MEM_M11ERR_MASK | \ |
65 | I5100_FERR_NF_MEM_M10ERR_MASK | \ | 65 | I5100_FERR_NF_MEM_M10ERR_MASK | \ |
66 | I5100_FERR_NF_MEM_M6ERR_MASK | \ | 66 | I5100_FERR_NF_MEM_M6ERR_MASK | \ |
67 | I5100_FERR_NF_MEM_M5ERR_MASK | \ | 67 | I5100_FERR_NF_MEM_M5ERR_MASK | \ |
68 | I5100_FERR_NF_MEM_M4ERR_MASK | \ | 68 | I5100_FERR_NF_MEM_M4ERR_MASK | \ |
69 | I5100_FERR_NF_MEM_M1ERR_MASK) | 69 | I5100_FERR_NF_MEM_M1ERR_MASK) |
70 | #define I5100_NERR_NF_MEM 0xa4 /* MC Next Non-Fatal Errors */ | 70 | #define I5100_NERR_NF_MEM 0xa4 /* MC Next Non-Fatal Errors */ |
71 | #define I5100_EMASK_MEM 0xa8 /* MC Error Mask Register */ | 71 | #define I5100_EMASK_MEM 0xa8 /* MC Error Mask Register */ |
72 | #define I5100_MEM0EINJMSK0 0x200 /* Injection Mask0 Register Channel 0 */ | 72 | #define I5100_MEM0EINJMSK0 0x200 /* Injection Mask0 Register Channel 0 */ |
73 | #define I5100_MEM1EINJMSK0 0x208 /* Injection Mask0 Register Channel 1 */ | 73 | #define I5100_MEM1EINJMSK0 0x208 /* Injection Mask0 Register Channel 1 */ |
74 | #define I5100_MEMXEINJMSK0_EINJEN (1 << 27) | 74 | #define I5100_MEMXEINJMSK0_EINJEN (1 << 27) |
75 | #define I5100_MEM0EINJMSK1 0x204 /* Injection Mask1 Register Channel 0 */ | 75 | #define I5100_MEM0EINJMSK1 0x204 /* Injection Mask1 Register Channel 0 */ |
76 | #define I5100_MEM1EINJMSK1 0x206 /* Injection Mask1 Register Channel 1 */ | 76 | #define I5100_MEM1EINJMSK1 0x206 /* Injection Mask1 Register Channel 1 */ |
77 | 77 | ||
78 | /* Device 19, Function 0 */ | 78 | /* Device 19, Function 0 */ |
79 | #define I5100_DINJ0 0x9a | 79 | #define I5100_DINJ0 0x9a |
80 | 80 | ||
81 | /* device 21 and 22, func 0 */ | 81 | /* device 21 and 22, func 0 */ |
82 | #define I5100_MTR_0 0x154 /* Memory Technology Registers 0-3 */ | 82 | #define I5100_MTR_0 0x154 /* Memory Technology Registers 0-3 */ |
83 | #define I5100_DMIR 0x15c /* DIMM Interleave Range */ | 83 | #define I5100_DMIR 0x15c /* DIMM Interleave Range */ |
84 | #define I5100_VALIDLOG 0x18c /* Valid Log Markers */ | 84 | #define I5100_VALIDLOG 0x18c /* Valid Log Markers */ |
85 | #define I5100_NRECMEMA 0x190 /* Non-Recoverable Memory Error Log Reg A */ | 85 | #define I5100_NRECMEMA 0x190 /* Non-Recoverable Memory Error Log Reg A */ |
86 | #define I5100_NRECMEMB 0x194 /* Non-Recoverable Memory Error Log Reg B */ | 86 | #define I5100_NRECMEMB 0x194 /* Non-Recoverable Memory Error Log Reg B */ |
87 | #define I5100_REDMEMA 0x198 /* Recoverable Memory Data Error Log Reg A */ | 87 | #define I5100_REDMEMA 0x198 /* Recoverable Memory Data Error Log Reg A */ |
88 | #define I5100_REDMEMB 0x19c /* Recoverable Memory Data Error Log Reg B */ | 88 | #define I5100_REDMEMB 0x19c /* Recoverable Memory Data Error Log Reg B */ |
89 | #define I5100_RECMEMA 0x1a0 /* Recoverable Memory Error Log Reg A */ | 89 | #define I5100_RECMEMA 0x1a0 /* Recoverable Memory Error Log Reg A */ |
90 | #define I5100_RECMEMB 0x1a4 /* Recoverable Memory Error Log Reg B */ | 90 | #define I5100_RECMEMB 0x1a4 /* Recoverable Memory Error Log Reg B */ |
91 | #define I5100_MTR_4 0x1b0 /* Memory Technology Registers 4,5 */ | 91 | #define I5100_MTR_4 0x1b0 /* Memory Technology Registers 4,5 */ |
92 | 92 | ||
93 | /* bit field accessors */ | 93 | /* bit field accessors */ |
94 | 94 | ||
95 | static inline u32 i5100_mc_scrben(u32 mc) | 95 | static inline u32 i5100_mc_scrben(u32 mc) |
96 | { | 96 | { |
97 | return mc >> 7 & 1; | 97 | return mc >> 7 & 1; |
98 | } | 98 | } |
99 | 99 | ||
100 | static inline u32 i5100_mc_errdeten(u32 mc) | 100 | static inline u32 i5100_mc_errdeten(u32 mc) |
101 | { | 101 | { |
102 | return mc >> 5 & 1; | 102 | return mc >> 5 & 1; |
103 | } | 103 | } |
104 | 104 | ||
105 | static inline u32 i5100_mc_scrbdone(u32 mc) | 105 | static inline u32 i5100_mc_scrbdone(u32 mc) |
106 | { | 106 | { |
107 | return mc >> 4 & 1; | 107 | return mc >> 4 & 1; |
108 | } | 108 | } |
109 | 109 | ||
110 | static inline u16 i5100_spddata_rdo(u16 a) | 110 | static inline u16 i5100_spddata_rdo(u16 a) |
111 | { | 111 | { |
112 | return a >> 15 & 1; | 112 | return a >> 15 & 1; |
113 | } | 113 | } |
114 | 114 | ||
115 | static inline u16 i5100_spddata_sbe(u16 a) | 115 | static inline u16 i5100_spddata_sbe(u16 a) |
116 | { | 116 | { |
117 | return a >> 13 & 1; | 117 | return a >> 13 & 1; |
118 | } | 118 | } |
119 | 119 | ||
120 | static inline u16 i5100_spddata_busy(u16 a) | 120 | static inline u16 i5100_spddata_busy(u16 a) |
121 | { | 121 | { |
122 | return a >> 12 & 1; | 122 | return a >> 12 & 1; |
123 | } | 123 | } |
124 | 124 | ||
125 | static inline u16 i5100_spddata_data(u16 a) | 125 | static inline u16 i5100_spddata_data(u16 a) |
126 | { | 126 | { |
127 | return a & ((1 << 8) - 1); | 127 | return a & ((1 << 8) - 1); |
128 | } | 128 | } |
129 | 129 | ||
130 | static inline u32 i5100_spdcmd_create(u32 dti, u32 ckovrd, u32 sa, u32 ba, | 130 | static inline u32 i5100_spdcmd_create(u32 dti, u32 ckovrd, u32 sa, u32 ba, |
131 | u32 data, u32 cmd) | 131 | u32 data, u32 cmd) |
132 | { | 132 | { |
133 | return ((dti & ((1 << 4) - 1)) << 28) | | 133 | return ((dti & ((1 << 4) - 1)) << 28) | |
134 | ((ckovrd & 1) << 27) | | 134 | ((ckovrd & 1) << 27) | |
135 | ((sa & ((1 << 3) - 1)) << 24) | | 135 | ((sa & ((1 << 3) - 1)) << 24) | |
136 | ((ba & ((1 << 8) - 1)) << 16) | | 136 | ((ba & ((1 << 8) - 1)) << 16) | |
137 | ((data & ((1 << 8) - 1)) << 8) | | 137 | ((data & ((1 << 8) - 1)) << 8) | |
138 | (cmd & 1); | 138 | (cmd & 1); |
139 | } | 139 | } |
140 | 140 | ||
141 | static inline u16 i5100_tolm_tolm(u16 a) | 141 | static inline u16 i5100_tolm_tolm(u16 a) |
142 | { | 142 | { |
143 | return a >> 12 & ((1 << 4) - 1); | 143 | return a >> 12 & ((1 << 4) - 1); |
144 | } | 144 | } |
145 | 145 | ||
146 | static inline u16 i5100_mir_limit(u16 a) | 146 | static inline u16 i5100_mir_limit(u16 a) |
147 | { | 147 | { |
148 | return a >> 4 & ((1 << 12) - 1); | 148 | return a >> 4 & ((1 << 12) - 1); |
149 | } | 149 | } |
150 | 150 | ||
151 | static inline u16 i5100_mir_way1(u16 a) | 151 | static inline u16 i5100_mir_way1(u16 a) |
152 | { | 152 | { |
153 | return a >> 1 & 1; | 153 | return a >> 1 & 1; |
154 | } | 154 | } |
155 | 155 | ||
156 | static inline u16 i5100_mir_way0(u16 a) | 156 | static inline u16 i5100_mir_way0(u16 a) |
157 | { | 157 | { |
158 | return a & 1; | 158 | return a & 1; |
159 | } | 159 | } |
160 | 160 | ||
161 | static inline u32 i5100_ferr_nf_mem_chan_indx(u32 a) | 161 | static inline u32 i5100_ferr_nf_mem_chan_indx(u32 a) |
162 | { | 162 | { |
163 | return a >> 28 & 1; | 163 | return a >> 28 & 1; |
164 | } | 164 | } |
165 | 165 | ||
166 | static inline u32 i5100_ferr_nf_mem_any(u32 a) | 166 | static inline u32 i5100_ferr_nf_mem_any(u32 a) |
167 | { | 167 | { |
168 | return a & I5100_FERR_NF_MEM_ANY_MASK; | 168 | return a & I5100_FERR_NF_MEM_ANY_MASK; |
169 | } | 169 | } |
170 | 170 | ||
171 | static inline u32 i5100_nerr_nf_mem_any(u32 a) | 171 | static inline u32 i5100_nerr_nf_mem_any(u32 a) |
172 | { | 172 | { |
173 | return i5100_ferr_nf_mem_any(a); | 173 | return i5100_ferr_nf_mem_any(a); |
174 | } | 174 | } |
175 | 175 | ||
176 | static inline u32 i5100_dmir_limit(u32 a) | 176 | static inline u32 i5100_dmir_limit(u32 a) |
177 | { | 177 | { |
178 | return a >> 16 & ((1 << 11) - 1); | 178 | return a >> 16 & ((1 << 11) - 1); |
179 | } | 179 | } |
180 | 180 | ||
181 | static inline u32 i5100_dmir_rank(u32 a, u32 i) | 181 | static inline u32 i5100_dmir_rank(u32 a, u32 i) |
182 | { | 182 | { |
183 | return a >> (4 * i) & ((1 << 2) - 1); | 183 | return a >> (4 * i) & ((1 << 2) - 1); |
184 | } | 184 | } |
185 | 185 | ||
186 | static inline u16 i5100_mtr_present(u16 a) | 186 | static inline u16 i5100_mtr_present(u16 a) |
187 | { | 187 | { |
188 | return a >> 10 & 1; | 188 | return a >> 10 & 1; |
189 | } | 189 | } |
190 | 190 | ||
191 | static inline u16 i5100_mtr_ethrottle(u16 a) | 191 | static inline u16 i5100_mtr_ethrottle(u16 a) |
192 | { | 192 | { |
193 | return a >> 9 & 1; | 193 | return a >> 9 & 1; |
194 | } | 194 | } |
195 | 195 | ||
196 | static inline u16 i5100_mtr_width(u16 a) | 196 | static inline u16 i5100_mtr_width(u16 a) |
197 | { | 197 | { |
198 | return a >> 8 & 1; | 198 | return a >> 8 & 1; |
199 | } | 199 | } |
200 | 200 | ||
201 | static inline u16 i5100_mtr_numbank(u16 a) | 201 | static inline u16 i5100_mtr_numbank(u16 a) |
202 | { | 202 | { |
203 | return a >> 6 & 1; | 203 | return a >> 6 & 1; |
204 | } | 204 | } |
205 | 205 | ||
206 | static inline u16 i5100_mtr_numrow(u16 a) | 206 | static inline u16 i5100_mtr_numrow(u16 a) |
207 | { | 207 | { |
208 | return a >> 2 & ((1 << 2) - 1); | 208 | return a >> 2 & ((1 << 2) - 1); |
209 | } | 209 | } |
210 | 210 | ||
211 | static inline u16 i5100_mtr_numcol(u16 a) | 211 | static inline u16 i5100_mtr_numcol(u16 a) |
212 | { | 212 | { |
213 | return a & ((1 << 2) - 1); | 213 | return a & ((1 << 2) - 1); |
214 | } | 214 | } |
215 | 215 | ||
216 | 216 | ||
217 | static inline u32 i5100_validlog_redmemvalid(u32 a) | 217 | static inline u32 i5100_validlog_redmemvalid(u32 a) |
218 | { | 218 | { |
219 | return a >> 2 & 1; | 219 | return a >> 2 & 1; |
220 | } | 220 | } |
221 | 221 | ||
222 | static inline u32 i5100_validlog_recmemvalid(u32 a) | 222 | static inline u32 i5100_validlog_recmemvalid(u32 a) |
223 | { | 223 | { |
224 | return a >> 1 & 1; | 224 | return a >> 1 & 1; |
225 | } | 225 | } |
226 | 226 | ||
227 | static inline u32 i5100_validlog_nrecmemvalid(u32 a) | 227 | static inline u32 i5100_validlog_nrecmemvalid(u32 a) |
228 | { | 228 | { |
229 | return a & 1; | 229 | return a & 1; |
230 | } | 230 | } |
231 | 231 | ||
232 | static inline u32 i5100_nrecmema_merr(u32 a) | 232 | static inline u32 i5100_nrecmema_merr(u32 a) |
233 | { | 233 | { |
234 | return a >> 15 & ((1 << 5) - 1); | 234 | return a >> 15 & ((1 << 5) - 1); |
235 | } | 235 | } |
236 | 236 | ||
237 | static inline u32 i5100_nrecmema_bank(u32 a) | 237 | static inline u32 i5100_nrecmema_bank(u32 a) |
238 | { | 238 | { |
239 | return a >> 12 & ((1 << 3) - 1); | 239 | return a >> 12 & ((1 << 3) - 1); |
240 | } | 240 | } |
241 | 241 | ||
242 | static inline u32 i5100_nrecmema_rank(u32 a) | 242 | static inline u32 i5100_nrecmema_rank(u32 a) |
243 | { | 243 | { |
244 | return a >> 8 & ((1 << 3) - 1); | 244 | return a >> 8 & ((1 << 3) - 1); |
245 | } | 245 | } |
246 | 246 | ||
247 | static inline u32 i5100_nrecmema_dm_buf_id(u32 a) | 247 | static inline u32 i5100_nrecmema_dm_buf_id(u32 a) |
248 | { | 248 | { |
249 | return a & ((1 << 8) - 1); | 249 | return a & ((1 << 8) - 1); |
250 | } | 250 | } |
251 | 251 | ||
252 | static inline u32 i5100_nrecmemb_cas(u32 a) | 252 | static inline u32 i5100_nrecmemb_cas(u32 a) |
253 | { | 253 | { |
254 | return a >> 16 & ((1 << 13) - 1); | 254 | return a >> 16 & ((1 << 13) - 1); |
255 | } | 255 | } |
256 | 256 | ||
257 | static inline u32 i5100_nrecmemb_ras(u32 a) | 257 | static inline u32 i5100_nrecmemb_ras(u32 a) |
258 | { | 258 | { |
259 | return a & ((1 << 16) - 1); | 259 | return a & ((1 << 16) - 1); |
260 | } | 260 | } |
261 | 261 | ||
262 | static inline u32 i5100_redmemb_ecc_locator(u32 a) | 262 | static inline u32 i5100_redmemb_ecc_locator(u32 a) |
263 | { | 263 | { |
264 | return a & ((1 << 18) - 1); | 264 | return a & ((1 << 18) - 1); |
265 | } | 265 | } |
266 | 266 | ||
267 | static inline u32 i5100_recmema_merr(u32 a) | 267 | static inline u32 i5100_recmema_merr(u32 a) |
268 | { | 268 | { |
269 | return i5100_nrecmema_merr(a); | 269 | return i5100_nrecmema_merr(a); |
270 | } | 270 | } |
271 | 271 | ||
272 | static inline u32 i5100_recmema_bank(u32 a) | 272 | static inline u32 i5100_recmema_bank(u32 a) |
273 | { | 273 | { |
274 | return i5100_nrecmema_bank(a); | 274 | return i5100_nrecmema_bank(a); |
275 | } | 275 | } |
276 | 276 | ||
277 | static inline u32 i5100_recmema_rank(u32 a) | 277 | static inline u32 i5100_recmema_rank(u32 a) |
278 | { | 278 | { |
279 | return i5100_nrecmema_rank(a); | 279 | return i5100_nrecmema_rank(a); |
280 | } | 280 | } |
281 | 281 | ||
282 | static inline u32 i5100_recmema_dm_buf_id(u32 a) | 282 | static inline u32 i5100_recmema_dm_buf_id(u32 a) |
283 | { | 283 | { |
284 | return i5100_nrecmema_dm_buf_id(a); | 284 | return i5100_nrecmema_dm_buf_id(a); |
285 | } | 285 | } |
286 | 286 | ||
287 | static inline u32 i5100_recmemb_cas(u32 a) | 287 | static inline u32 i5100_recmemb_cas(u32 a) |
288 | { | 288 | { |
289 | return i5100_nrecmemb_cas(a); | 289 | return i5100_nrecmemb_cas(a); |
290 | } | 290 | } |
291 | 291 | ||
292 | static inline u32 i5100_recmemb_ras(u32 a) | 292 | static inline u32 i5100_recmemb_ras(u32 a) |
293 | { | 293 | { |
294 | return i5100_nrecmemb_ras(a); | 294 | return i5100_nrecmemb_ras(a); |
295 | } | 295 | } |
296 | 296 | ||
297 | /* some generic limits */ | 297 | /* some generic limits */ |
298 | #define I5100_MAX_RANKS_PER_CHAN 6 | 298 | #define I5100_MAX_RANKS_PER_CHAN 6 |
299 | #define I5100_CHANNELS 2 | 299 | #define I5100_CHANNELS 2 |
300 | #define I5100_MAX_RANKS_PER_DIMM 4 | 300 | #define I5100_MAX_RANKS_PER_DIMM 4 |
301 | #define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */ | 301 | #define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */ |
302 | #define I5100_MAX_DIMM_SLOTS_PER_CHAN 4 | 302 | #define I5100_MAX_DIMM_SLOTS_PER_CHAN 4 |
303 | #define I5100_MAX_RANK_INTERLEAVE 4 | 303 | #define I5100_MAX_RANK_INTERLEAVE 4 |
304 | #define I5100_MAX_DMIRS 5 | 304 | #define I5100_MAX_DMIRS 5 |
305 | #define I5100_SCRUB_REFRESH_RATE (5 * 60 * HZ) | 305 | #define I5100_SCRUB_REFRESH_RATE (5 * 60 * HZ) |
306 | 306 | ||
307 | struct i5100_priv { | 307 | struct i5100_priv { |
308 | /* ranks on each dimm -- 0 maps to not present -- obtained via SPD */ | 308 | /* ranks on each dimm -- 0 maps to not present -- obtained via SPD */ |
309 | int dimm_numrank[I5100_CHANNELS][I5100_MAX_DIMM_SLOTS_PER_CHAN]; | 309 | int dimm_numrank[I5100_CHANNELS][I5100_MAX_DIMM_SLOTS_PER_CHAN]; |
310 | 310 | ||
311 | /* | 311 | /* |
312 | * mainboard chip select map -- maps i5100 chip selects to | 312 | * mainboard chip select map -- maps i5100 chip selects to |
313 | * DIMM slot chip selects. In the case of only 4 ranks per | 313 | * DIMM slot chip selects. In the case of only 4 ranks per |
314 | * channel, the mapping is fairly obvious but not unique. | 314 | * channel, the mapping is fairly obvious but not unique. |
315 | * we map -1 -> NC and assume both channels use the same | 315 | * we map -1 -> NC and assume both channels use the same |
316 | * map... | 316 | * map... |
317 | * | 317 | * |
318 | */ | 318 | */ |
319 | int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CHAN][I5100_MAX_RANKS_PER_DIMM]; | 319 | int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CHAN][I5100_MAX_RANKS_PER_DIMM]; |
320 | 320 | ||
321 | /* memory interleave range */ | 321 | /* memory interleave range */ |
322 | struct { | 322 | struct { |
323 | u64 limit; | 323 | u64 limit; |
324 | unsigned way[2]; | 324 | unsigned way[2]; |
325 | } mir[I5100_CHANNELS]; | 325 | } mir[I5100_CHANNELS]; |
326 | 326 | ||
327 | /* adjusted memory interleave range register */ | 327 | /* adjusted memory interleave range register */ |
328 | unsigned amir[I5100_CHANNELS]; | 328 | unsigned amir[I5100_CHANNELS]; |
329 | 329 | ||
330 | /* dimm interleave range */ | 330 | /* dimm interleave range */ |
331 | struct { | 331 | struct { |
332 | unsigned rank[I5100_MAX_RANK_INTERLEAVE]; | 332 | unsigned rank[I5100_MAX_RANK_INTERLEAVE]; |
333 | u64 limit; | 333 | u64 limit; |
334 | } dmir[I5100_CHANNELS][I5100_MAX_DMIRS]; | 334 | } dmir[I5100_CHANNELS][I5100_MAX_DMIRS]; |
335 | 335 | ||
336 | /* memory technology registers... */ | 336 | /* memory technology registers... */ |
337 | struct { | 337 | struct { |
338 | unsigned present; /* 0 or 1 */ | 338 | unsigned present; /* 0 or 1 */ |
339 | unsigned ethrottle; /* 0 or 1 */ | 339 | unsigned ethrottle; /* 0 or 1 */ |
340 | unsigned width; /* 4 or 8 bits */ | 340 | unsigned width; /* 4 or 8 bits */ |
341 | unsigned numbank; /* 2 or 3 lines */ | 341 | unsigned numbank; /* 2 or 3 lines */ |
342 | unsigned numrow; /* 13 .. 16 lines */ | 342 | unsigned numrow; /* 13 .. 16 lines */ |
343 | unsigned numcol; /* 11 .. 12 lines */ | 343 | unsigned numcol; /* 11 .. 12 lines */ |
344 | } mtr[I5100_CHANNELS][I5100_MAX_RANKS_PER_CHAN]; | 344 | } mtr[I5100_CHANNELS][I5100_MAX_RANKS_PER_CHAN]; |
345 | 345 | ||
346 | u64 tolm; /* top of low memory in bytes */ | 346 | u64 tolm; /* top of low memory in bytes */ |
347 | unsigned ranksperchan; /* number of ranks per channel */ | 347 | unsigned ranksperchan; /* number of ranks per channel */ |
348 | 348 | ||
349 | struct pci_dev *mc; /* device 16 func 1 */ | 349 | struct pci_dev *mc; /* device 16 func 1 */ |
350 | struct pci_dev *einj; /* device 19 func 0 */ | 350 | struct pci_dev *einj; /* device 19 func 0 */ |
351 | struct pci_dev *ch0mm; /* device 21 func 0 */ | 351 | struct pci_dev *ch0mm; /* device 21 func 0 */ |
352 | struct pci_dev *ch1mm; /* device 22 func 0 */ | 352 | struct pci_dev *ch1mm; /* device 22 func 0 */ |
353 | 353 | ||
354 | struct delayed_work i5100_scrubbing; | 354 | struct delayed_work i5100_scrubbing; |
355 | int scrub_enable; | 355 | int scrub_enable; |
356 | 356 | ||
357 | /* Error injection */ | 357 | /* Error injection */ |
358 | u8 inject_channel; | 358 | u8 inject_channel; |
359 | u8 inject_hlinesel; | 359 | u8 inject_hlinesel; |
360 | u8 inject_deviceptr1; | 360 | u8 inject_deviceptr1; |
361 | u8 inject_deviceptr2; | 361 | u8 inject_deviceptr2; |
362 | u16 inject_eccmask1; | 362 | u16 inject_eccmask1; |
363 | u16 inject_eccmask2; | 363 | u16 inject_eccmask2; |
364 | 364 | ||
365 | struct dentry *debugfs; | 365 | struct dentry *debugfs; |
366 | }; | 366 | }; |
367 | 367 | ||
368 | static struct dentry *i5100_debugfs; | 368 | static struct dentry *i5100_debugfs; |
369 | 369 | ||
370 | /* map a rank/chan to a slot number on the mainboard */ | 370 | /* map a rank/chan to a slot number on the mainboard */ |
371 | static int i5100_rank_to_slot(const struct mem_ctl_info *mci, | 371 | static int i5100_rank_to_slot(const struct mem_ctl_info *mci, |
372 | int chan, int rank) | 372 | int chan, int rank) |
373 | { | 373 | { |
374 | const struct i5100_priv *priv = mci->pvt_info; | 374 | const struct i5100_priv *priv = mci->pvt_info; |
375 | int i; | 375 | int i; |
376 | 376 | ||
377 | for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) { | 377 | for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) { |
378 | int j; | 378 | int j; |
379 | const int numrank = priv->dimm_numrank[chan][i]; | 379 | const int numrank = priv->dimm_numrank[chan][i]; |
380 | 380 | ||
381 | for (j = 0; j < numrank; j++) | 381 | for (j = 0; j < numrank; j++) |
382 | if (priv->dimm_csmap[i][j] == rank) | 382 | if (priv->dimm_csmap[i][j] == rank) |
383 | return i * 2 + chan; | 383 | return i * 2 + chan; |
384 | } | 384 | } |
385 | 385 | ||
386 | return -1; | 386 | return -1; |
387 | } | 387 | } |
388 | 388 | ||
389 | static const char *i5100_err_msg(unsigned err) | 389 | static const char *i5100_err_msg(unsigned err) |
390 | { | 390 | { |
391 | static const char *merrs[] = { | 391 | static const char *merrs[] = { |
392 | "unknown", /* 0 */ | 392 | "unknown", /* 0 */ |
393 | "uncorrectable data ECC on replay", /* 1 */ | 393 | "uncorrectable data ECC on replay", /* 1 */ |
394 | "unknown", /* 2 */ | 394 | "unknown", /* 2 */ |
395 | "unknown", /* 3 */ | 395 | "unknown", /* 3 */ |
396 | "aliased uncorrectable demand data ECC", /* 4 */ | 396 | "aliased uncorrectable demand data ECC", /* 4 */ |
397 | "aliased uncorrectable spare-copy data ECC", /* 5 */ | 397 | "aliased uncorrectable spare-copy data ECC", /* 5 */ |
398 | "aliased uncorrectable patrol data ECC", /* 6 */ | 398 | "aliased uncorrectable patrol data ECC", /* 6 */ |
399 | "unknown", /* 7 */ | 399 | "unknown", /* 7 */ |
400 | "unknown", /* 8 */ | 400 | "unknown", /* 8 */ |
401 | "unknown", /* 9 */ | 401 | "unknown", /* 9 */ |
402 | "non-aliased uncorrectable demand data ECC", /* 10 */ | 402 | "non-aliased uncorrectable demand data ECC", /* 10 */ |
403 | "non-aliased uncorrectable spare-copy data ECC", /* 11 */ | 403 | "non-aliased uncorrectable spare-copy data ECC", /* 11 */ |
404 | "non-aliased uncorrectable patrol data ECC", /* 12 */ | 404 | "non-aliased uncorrectable patrol data ECC", /* 12 */ |
405 | "unknown", /* 13 */ | 405 | "unknown", /* 13 */ |
406 | "correctable demand data ECC", /* 14 */ | 406 | "correctable demand data ECC", /* 14 */ |
407 | "correctable spare-copy data ECC", /* 15 */ | 407 | "correctable spare-copy data ECC", /* 15 */ |
408 | "correctable patrol data ECC", /* 16 */ | 408 | "correctable patrol data ECC", /* 16 */ |
409 | "unknown", /* 17 */ | 409 | "unknown", /* 17 */ |
410 | "SPD protocol error", /* 18 */ | 410 | "SPD protocol error", /* 18 */ |
411 | "unknown", /* 19 */ | 411 | "unknown", /* 19 */ |
412 | "spare copy initiated", /* 20 */ | 412 | "spare copy initiated", /* 20 */ |
413 | "spare copy completed", /* 21 */ | 413 | "spare copy completed", /* 21 */ |
414 | }; | 414 | }; |
415 | unsigned i; | 415 | unsigned i; |
416 | 416 | ||
417 | for (i = 0; i < ARRAY_SIZE(merrs); i++) | 417 | for (i = 0; i < ARRAY_SIZE(merrs); i++) |
418 | if (1 << i & err) | 418 | if (1 << i & err) |
419 | return merrs[i]; | 419 | return merrs[i]; |
420 | 420 | ||
421 | return "none"; | 421 | return "none"; |
422 | } | 422 | } |
423 | 423 | ||
424 | /* convert csrow index into a rank (per channel -- 0..5) */ | 424 | /* convert csrow index into a rank (per channel -- 0..5) */ |
425 | static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow) | 425 | static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow) |
426 | { | 426 | { |
427 | const struct i5100_priv *priv = mci->pvt_info; | 427 | const struct i5100_priv *priv = mci->pvt_info; |
428 | 428 | ||
429 | return csrow % priv->ranksperchan; | 429 | return csrow % priv->ranksperchan; |
430 | } | 430 | } |
431 | 431 | ||
432 | /* convert csrow index into a channel (0..1) */ | 432 | /* convert csrow index into a channel (0..1) */ |
433 | static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow) | 433 | static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow) |
434 | { | 434 | { |
435 | const struct i5100_priv *priv = mci->pvt_info; | 435 | const struct i5100_priv *priv = mci->pvt_info; |
436 | 436 | ||
437 | return csrow / priv->ranksperchan; | 437 | return csrow / priv->ranksperchan; |
438 | } | 438 | } |
439 | 439 | ||
440 | static void i5100_handle_ce(struct mem_ctl_info *mci, | 440 | static void i5100_handle_ce(struct mem_ctl_info *mci, |
441 | int chan, | 441 | int chan, |
442 | unsigned bank, | 442 | unsigned bank, |
443 | unsigned rank, | 443 | unsigned rank, |
444 | unsigned long syndrome, | 444 | unsigned long syndrome, |
445 | unsigned cas, | 445 | unsigned cas, |
446 | unsigned ras, | 446 | unsigned ras, |
447 | const char *msg) | 447 | const char *msg) |
448 | { | 448 | { |
449 | char detail[80]; | 449 | char detail[80]; |
450 | 450 | ||
451 | /* Form out message */ | 451 | /* Form out message */ |
452 | snprintf(detail, sizeof(detail), | 452 | snprintf(detail, sizeof(detail), |
453 | "bank %u, cas %u, ras %u\n", | 453 | "bank %u, cas %u, ras %u\n", |
454 | bank, cas, ras); | 454 | bank, cas, ras); |
455 | 455 | ||
456 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, | 456 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, |
457 | 0, 0, syndrome, | 457 | 0, 0, syndrome, |
458 | chan, rank, -1, | 458 | chan, rank, -1, |
459 | msg, detail); | 459 | msg, detail); |
460 | } | 460 | } |
461 | 461 | ||
462 | static void i5100_handle_ue(struct mem_ctl_info *mci, | 462 | static void i5100_handle_ue(struct mem_ctl_info *mci, |
463 | int chan, | 463 | int chan, |
464 | unsigned bank, | 464 | unsigned bank, |
465 | unsigned rank, | 465 | unsigned rank, |
466 | unsigned long syndrome, | 466 | unsigned long syndrome, |
467 | unsigned cas, | 467 | unsigned cas, |
468 | unsigned ras, | 468 | unsigned ras, |
469 | const char *msg) | 469 | const char *msg) |
470 | { | 470 | { |
471 | char detail[80]; | 471 | char detail[80]; |
472 | 472 | ||
473 | /* Form out message */ | 473 | /* Form out message */ |
474 | snprintf(detail, sizeof(detail), | 474 | snprintf(detail, sizeof(detail), |
475 | "bank %u, cas %u, ras %u\n", | 475 | "bank %u, cas %u, ras %u\n", |
476 | bank, cas, ras); | 476 | bank, cas, ras); |
477 | 477 | ||
478 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, | 478 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, |
479 | 0, 0, syndrome, | 479 | 0, 0, syndrome, |
480 | chan, rank, -1, | 480 | chan, rank, -1, |
481 | msg, detail); | 481 | msg, detail); |
482 | } | 482 | } |
483 | 483 | ||
484 | static void i5100_read_log(struct mem_ctl_info *mci, int chan, | 484 | static void i5100_read_log(struct mem_ctl_info *mci, int chan, |
485 | u32 ferr, u32 nerr) | 485 | u32 ferr, u32 nerr) |
486 | { | 486 | { |
487 | struct i5100_priv *priv = mci->pvt_info; | 487 | struct i5100_priv *priv = mci->pvt_info; |
488 | struct pci_dev *pdev = (chan) ? priv->ch1mm : priv->ch0mm; | 488 | struct pci_dev *pdev = (chan) ? priv->ch1mm : priv->ch0mm; |
489 | u32 dw; | 489 | u32 dw; |
490 | u32 dw2; | 490 | u32 dw2; |
491 | unsigned syndrome = 0; | 491 | unsigned syndrome = 0; |
492 | unsigned ecc_loc = 0; | 492 | unsigned ecc_loc = 0; |
493 | unsigned merr; | 493 | unsigned merr; |
494 | unsigned bank; | 494 | unsigned bank; |
495 | unsigned rank; | 495 | unsigned rank; |
496 | unsigned cas; | 496 | unsigned cas; |
497 | unsigned ras; | 497 | unsigned ras; |
498 | 498 | ||
499 | pci_read_config_dword(pdev, I5100_VALIDLOG, &dw); | 499 | pci_read_config_dword(pdev, I5100_VALIDLOG, &dw); |
500 | 500 | ||
501 | if (i5100_validlog_redmemvalid(dw)) { | 501 | if (i5100_validlog_redmemvalid(dw)) { |
502 | pci_read_config_dword(pdev, I5100_REDMEMA, &dw2); | 502 | pci_read_config_dword(pdev, I5100_REDMEMA, &dw2); |
503 | syndrome = dw2; | 503 | syndrome = dw2; |
504 | pci_read_config_dword(pdev, I5100_REDMEMB, &dw2); | 504 | pci_read_config_dword(pdev, I5100_REDMEMB, &dw2); |
505 | ecc_loc = i5100_redmemb_ecc_locator(dw2); | 505 | ecc_loc = i5100_redmemb_ecc_locator(dw2); |
506 | } | 506 | } |
507 | 507 | ||
508 | if (i5100_validlog_recmemvalid(dw)) { | 508 | if (i5100_validlog_recmemvalid(dw)) { |
509 | const char *msg; | 509 | const char *msg; |
510 | 510 | ||
511 | pci_read_config_dword(pdev, I5100_RECMEMA, &dw2); | 511 | pci_read_config_dword(pdev, I5100_RECMEMA, &dw2); |
512 | merr = i5100_recmema_merr(dw2); | 512 | merr = i5100_recmema_merr(dw2); |
513 | bank = i5100_recmema_bank(dw2); | 513 | bank = i5100_recmema_bank(dw2); |
514 | rank = i5100_recmema_rank(dw2); | 514 | rank = i5100_recmema_rank(dw2); |
515 | 515 | ||
516 | pci_read_config_dword(pdev, I5100_RECMEMB, &dw2); | 516 | pci_read_config_dword(pdev, I5100_RECMEMB, &dw2); |
517 | cas = i5100_recmemb_cas(dw2); | 517 | cas = i5100_recmemb_cas(dw2); |
518 | ras = i5100_recmemb_ras(dw2); | 518 | ras = i5100_recmemb_ras(dw2); |
519 | 519 | ||
520 | /* FIXME: not really sure if this is what merr is... | 520 | /* FIXME: not really sure if this is what merr is... |
521 | */ | 521 | */ |
522 | if (!merr) | 522 | if (!merr) |
523 | msg = i5100_err_msg(ferr); | 523 | msg = i5100_err_msg(ferr); |
524 | else | 524 | else |
525 | msg = i5100_err_msg(nerr); | 525 | msg = i5100_err_msg(nerr); |
526 | 526 | ||
527 | i5100_handle_ce(mci, chan, bank, rank, syndrome, cas, ras, msg); | 527 | i5100_handle_ce(mci, chan, bank, rank, syndrome, cas, ras, msg); |
528 | } | 528 | } |
529 | 529 | ||
530 | if (i5100_validlog_nrecmemvalid(dw)) { | 530 | if (i5100_validlog_nrecmemvalid(dw)) { |
531 | const char *msg; | 531 | const char *msg; |
532 | 532 | ||
533 | pci_read_config_dword(pdev, I5100_NRECMEMA, &dw2); | 533 | pci_read_config_dword(pdev, I5100_NRECMEMA, &dw2); |
534 | merr = i5100_nrecmema_merr(dw2); | 534 | merr = i5100_nrecmema_merr(dw2); |
535 | bank = i5100_nrecmema_bank(dw2); | 535 | bank = i5100_nrecmema_bank(dw2); |
536 | rank = i5100_nrecmema_rank(dw2); | 536 | rank = i5100_nrecmema_rank(dw2); |
537 | 537 | ||
538 | pci_read_config_dword(pdev, I5100_NRECMEMB, &dw2); | 538 | pci_read_config_dword(pdev, I5100_NRECMEMB, &dw2); |
539 | cas = i5100_nrecmemb_cas(dw2); | 539 | cas = i5100_nrecmemb_cas(dw2); |
540 | ras = i5100_nrecmemb_ras(dw2); | 540 | ras = i5100_nrecmemb_ras(dw2); |
541 | 541 | ||
542 | /* FIXME: not really sure if this is what merr is... | 542 | /* FIXME: not really sure if this is what merr is... |
543 | */ | 543 | */ |
544 | if (!merr) | 544 | if (!merr) |
545 | msg = i5100_err_msg(ferr); | 545 | msg = i5100_err_msg(ferr); |
546 | else | 546 | else |
547 | msg = i5100_err_msg(nerr); | 547 | msg = i5100_err_msg(nerr); |
548 | 548 | ||
549 | i5100_handle_ue(mci, chan, bank, rank, syndrome, cas, ras, msg); | 549 | i5100_handle_ue(mci, chan, bank, rank, syndrome, cas, ras, msg); |
550 | } | 550 | } |
551 | 551 | ||
552 | pci_write_config_dword(pdev, I5100_VALIDLOG, dw); | 552 | pci_write_config_dword(pdev, I5100_VALIDLOG, dw); |
553 | } | 553 | } |
554 | 554 | ||
555 | static void i5100_check_error(struct mem_ctl_info *mci) | 555 | static void i5100_check_error(struct mem_ctl_info *mci) |
556 | { | 556 | { |
557 | struct i5100_priv *priv = mci->pvt_info; | 557 | struct i5100_priv *priv = mci->pvt_info; |
558 | u32 dw, dw2; | 558 | u32 dw, dw2; |
559 | 559 | ||
560 | pci_read_config_dword(priv->mc, I5100_FERR_NF_MEM, &dw); | 560 | pci_read_config_dword(priv->mc, I5100_FERR_NF_MEM, &dw); |
561 | if (i5100_ferr_nf_mem_any(dw)) { | 561 | if (i5100_ferr_nf_mem_any(dw)) { |
562 | 562 | ||
563 | pci_read_config_dword(priv->mc, I5100_NERR_NF_MEM, &dw2); | 563 | pci_read_config_dword(priv->mc, I5100_NERR_NF_MEM, &dw2); |
564 | 564 | ||
565 | i5100_read_log(mci, i5100_ferr_nf_mem_chan_indx(dw), | 565 | i5100_read_log(mci, i5100_ferr_nf_mem_chan_indx(dw), |
566 | i5100_ferr_nf_mem_any(dw), | 566 | i5100_ferr_nf_mem_any(dw), |
567 | i5100_nerr_nf_mem_any(dw2)); | 567 | i5100_nerr_nf_mem_any(dw2)); |
568 | 568 | ||
569 | pci_write_config_dword(priv->mc, I5100_NERR_NF_MEM, dw2); | 569 | pci_write_config_dword(priv->mc, I5100_NERR_NF_MEM, dw2); |
570 | } | 570 | } |
571 | pci_write_config_dword(priv->mc, I5100_FERR_NF_MEM, dw); | 571 | pci_write_config_dword(priv->mc, I5100_FERR_NF_MEM, dw); |
572 | } | 572 | } |
573 | 573 | ||
574 | /* The i5100 chipset will scrub the entire memory once, then | 574 | /* The i5100 chipset will scrub the entire memory once, then |
575 | * set a done bit. Continuous scrubbing is achieved by enqueing | 575 | * set a done bit. Continuous scrubbing is achieved by enqueing |
576 | * delayed work to a workqueue, checking every few minutes if | 576 | * delayed work to a workqueue, checking every few minutes if |
577 | * the scrubbing has completed and if so reinitiating it. | 577 | * the scrubbing has completed and if so reinitiating it. |
578 | */ | 578 | */ |
579 | 579 | ||
580 | static void i5100_refresh_scrubbing(struct work_struct *work) | 580 | static void i5100_refresh_scrubbing(struct work_struct *work) |
581 | { | 581 | { |
582 | struct delayed_work *i5100_scrubbing = container_of(work, | 582 | struct delayed_work *i5100_scrubbing = container_of(work, |
583 | struct delayed_work, | 583 | struct delayed_work, |
584 | work); | 584 | work); |
585 | struct i5100_priv *priv = container_of(i5100_scrubbing, | 585 | struct i5100_priv *priv = container_of(i5100_scrubbing, |
586 | struct i5100_priv, | 586 | struct i5100_priv, |
587 | i5100_scrubbing); | 587 | i5100_scrubbing); |
588 | u32 dw; | 588 | u32 dw; |
589 | 589 | ||
590 | pci_read_config_dword(priv->mc, I5100_MC, &dw); | 590 | pci_read_config_dword(priv->mc, I5100_MC, &dw); |
591 | 591 | ||
592 | if (priv->scrub_enable) { | 592 | if (priv->scrub_enable) { |
593 | 593 | ||
594 | pci_read_config_dword(priv->mc, I5100_MC, &dw); | 594 | pci_read_config_dword(priv->mc, I5100_MC, &dw); |
595 | 595 | ||
596 | if (i5100_mc_scrbdone(dw)) { | 596 | if (i5100_mc_scrbdone(dw)) { |
597 | dw |= I5100_MC_SCRBEN_MASK; | 597 | dw |= I5100_MC_SCRBEN_MASK; |
598 | pci_write_config_dword(priv->mc, I5100_MC, dw); | 598 | pci_write_config_dword(priv->mc, I5100_MC, dw); |
599 | pci_read_config_dword(priv->mc, I5100_MC, &dw); | 599 | pci_read_config_dword(priv->mc, I5100_MC, &dw); |
600 | } | 600 | } |
601 | 601 | ||
602 | schedule_delayed_work(&(priv->i5100_scrubbing), | 602 | schedule_delayed_work(&(priv->i5100_scrubbing), |
603 | I5100_SCRUB_REFRESH_RATE); | 603 | I5100_SCRUB_REFRESH_RATE); |
604 | } | 604 | } |
605 | } | 605 | } |
606 | /* | 606 | /* |
607 | * The bandwidth is based on experimentation, feel free to refine it. | 607 | * The bandwidth is based on experimentation, feel free to refine it. |
608 | */ | 608 | */ |
609 | static int i5100_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth) | 609 | static int i5100_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth) |
610 | { | 610 | { |
611 | struct i5100_priv *priv = mci->pvt_info; | 611 | struct i5100_priv *priv = mci->pvt_info; |
612 | u32 dw; | 612 | u32 dw; |
613 | 613 | ||
614 | pci_read_config_dword(priv->mc, I5100_MC, &dw); | 614 | pci_read_config_dword(priv->mc, I5100_MC, &dw); |
615 | if (bandwidth) { | 615 | if (bandwidth) { |
616 | priv->scrub_enable = 1; | 616 | priv->scrub_enable = 1; |
617 | dw |= I5100_MC_SCRBEN_MASK; | 617 | dw |= I5100_MC_SCRBEN_MASK; |
618 | schedule_delayed_work(&(priv->i5100_scrubbing), | 618 | schedule_delayed_work(&(priv->i5100_scrubbing), |
619 | I5100_SCRUB_REFRESH_RATE); | 619 | I5100_SCRUB_REFRESH_RATE); |
620 | } else { | 620 | } else { |
621 | priv->scrub_enable = 0; | 621 | priv->scrub_enable = 0; |
622 | dw &= ~I5100_MC_SCRBEN_MASK; | 622 | dw &= ~I5100_MC_SCRBEN_MASK; |
623 | cancel_delayed_work(&(priv->i5100_scrubbing)); | 623 | cancel_delayed_work(&(priv->i5100_scrubbing)); |
624 | } | 624 | } |
625 | pci_write_config_dword(priv->mc, I5100_MC, dw); | 625 | pci_write_config_dword(priv->mc, I5100_MC, dw); |
626 | 626 | ||
627 | pci_read_config_dword(priv->mc, I5100_MC, &dw); | 627 | pci_read_config_dword(priv->mc, I5100_MC, &dw); |
628 | 628 | ||
629 | bandwidth = 5900000 * i5100_mc_scrben(dw); | 629 | bandwidth = 5900000 * i5100_mc_scrben(dw); |
630 | 630 | ||
631 | return bandwidth; | 631 | return bandwidth; |
632 | } | 632 | } |
633 | 633 | ||
634 | static int i5100_get_scrub_rate(struct mem_ctl_info *mci) | 634 | static int i5100_get_scrub_rate(struct mem_ctl_info *mci) |
635 | { | 635 | { |
636 | struct i5100_priv *priv = mci->pvt_info; | 636 | struct i5100_priv *priv = mci->pvt_info; |
637 | u32 dw; | 637 | u32 dw; |
638 | 638 | ||
639 | pci_read_config_dword(priv->mc, I5100_MC, &dw); | 639 | pci_read_config_dword(priv->mc, I5100_MC, &dw); |
640 | 640 | ||
641 | return 5900000 * i5100_mc_scrben(dw); | 641 | return 5900000 * i5100_mc_scrben(dw); |
642 | } | 642 | } |
643 | 643 | ||
644 | static struct pci_dev *pci_get_device_func(unsigned vendor, | 644 | static struct pci_dev *pci_get_device_func(unsigned vendor, |
645 | unsigned device, | 645 | unsigned device, |
646 | unsigned func) | 646 | unsigned func) |
647 | { | 647 | { |
648 | struct pci_dev *ret = NULL; | 648 | struct pci_dev *ret = NULL; |
649 | 649 | ||
650 | while (1) { | 650 | while (1) { |
651 | ret = pci_get_device(vendor, device, ret); | 651 | ret = pci_get_device(vendor, device, ret); |
652 | 652 | ||
653 | if (!ret) | 653 | if (!ret) |
654 | break; | 654 | break; |
655 | 655 | ||
656 | if (PCI_FUNC(ret->devfn) == func) | 656 | if (PCI_FUNC(ret->devfn) == func) |
657 | break; | 657 | break; |
658 | } | 658 | } |
659 | 659 | ||
660 | return ret; | 660 | return ret; |
661 | } | 661 | } |
662 | 662 | ||
663 | static unsigned long i5100_npages(struct mem_ctl_info *mci, int csrow) | 663 | static unsigned long i5100_npages(struct mem_ctl_info *mci, int csrow) |
664 | { | 664 | { |
665 | struct i5100_priv *priv = mci->pvt_info; | 665 | struct i5100_priv *priv = mci->pvt_info; |
666 | const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow); | 666 | const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow); |
667 | const unsigned chan = i5100_csrow_to_chan(mci, csrow); | 667 | const unsigned chan = i5100_csrow_to_chan(mci, csrow); |
668 | unsigned addr_lines; | 668 | unsigned addr_lines; |
669 | 669 | ||
670 | /* dimm present? */ | 670 | /* dimm present? */ |
671 | if (!priv->mtr[chan][chan_rank].present) | 671 | if (!priv->mtr[chan][chan_rank].present) |
672 | return 0ULL; | 672 | return 0ULL; |
673 | 673 | ||
674 | addr_lines = | 674 | addr_lines = |
675 | I5100_DIMM_ADDR_LINES + | 675 | I5100_DIMM_ADDR_LINES + |
676 | priv->mtr[chan][chan_rank].numcol + | 676 | priv->mtr[chan][chan_rank].numcol + |
677 | priv->mtr[chan][chan_rank].numrow + | 677 | priv->mtr[chan][chan_rank].numrow + |
678 | priv->mtr[chan][chan_rank].numbank; | 678 | priv->mtr[chan][chan_rank].numbank; |
679 | 679 | ||
680 | return (unsigned long) | 680 | return (unsigned long) |
681 | ((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE); | 681 | ((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE); |
682 | } | 682 | } |
683 | 683 | ||
684 | static void i5100_init_mtr(struct mem_ctl_info *mci) | 684 | static void i5100_init_mtr(struct mem_ctl_info *mci) |
685 | { | 685 | { |
686 | struct i5100_priv *priv = mci->pvt_info; | 686 | struct i5100_priv *priv = mci->pvt_info; |
687 | struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm }; | 687 | struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm }; |
688 | int i; | 688 | int i; |
689 | 689 | ||
690 | for (i = 0; i < I5100_CHANNELS; i++) { | 690 | for (i = 0; i < I5100_CHANNELS; i++) { |
691 | int j; | 691 | int j; |
692 | struct pci_dev *pdev = mms[i]; | 692 | struct pci_dev *pdev = mms[i]; |
693 | 693 | ||
694 | for (j = 0; j < I5100_MAX_RANKS_PER_CHAN; j++) { | 694 | for (j = 0; j < I5100_MAX_RANKS_PER_CHAN; j++) { |
695 | const unsigned addr = | 695 | const unsigned addr = |
696 | (j < 4) ? I5100_MTR_0 + j * 2 : | 696 | (j < 4) ? I5100_MTR_0 + j * 2 : |
697 | I5100_MTR_4 + (j - 4) * 2; | 697 | I5100_MTR_4 + (j - 4) * 2; |
698 | u16 w; | 698 | u16 w; |
699 | 699 | ||
700 | pci_read_config_word(pdev, addr, &w); | 700 | pci_read_config_word(pdev, addr, &w); |
701 | 701 | ||
702 | priv->mtr[i][j].present = i5100_mtr_present(w); | 702 | priv->mtr[i][j].present = i5100_mtr_present(w); |
703 | priv->mtr[i][j].ethrottle = i5100_mtr_ethrottle(w); | 703 | priv->mtr[i][j].ethrottle = i5100_mtr_ethrottle(w); |
704 | priv->mtr[i][j].width = 4 + 4 * i5100_mtr_width(w); | 704 | priv->mtr[i][j].width = 4 + 4 * i5100_mtr_width(w); |
705 | priv->mtr[i][j].numbank = 2 + i5100_mtr_numbank(w); | 705 | priv->mtr[i][j].numbank = 2 + i5100_mtr_numbank(w); |
706 | priv->mtr[i][j].numrow = 13 + i5100_mtr_numrow(w); | 706 | priv->mtr[i][j].numrow = 13 + i5100_mtr_numrow(w); |
707 | priv->mtr[i][j].numcol = 10 + i5100_mtr_numcol(w); | 707 | priv->mtr[i][j].numcol = 10 + i5100_mtr_numcol(w); |
708 | } | 708 | } |
709 | } | 709 | } |
710 | } | 710 | } |
711 | 711 | ||
712 | /* | 712 | /* |
713 | * FIXME: make this into a real i2c adapter (so that dimm-decode | 713 | * FIXME: make this into a real i2c adapter (so that dimm-decode |
714 | * will work)? | 714 | * will work)? |
715 | */ | 715 | */ |
716 | static int i5100_read_spd_byte(const struct mem_ctl_info *mci, | 716 | static int i5100_read_spd_byte(const struct mem_ctl_info *mci, |
717 | u8 ch, u8 slot, u8 addr, u8 *byte) | 717 | u8 ch, u8 slot, u8 addr, u8 *byte) |
718 | { | 718 | { |
719 | struct i5100_priv *priv = mci->pvt_info; | 719 | struct i5100_priv *priv = mci->pvt_info; |
720 | u16 w; | 720 | u16 w; |
721 | unsigned long et; | 721 | unsigned long et; |
722 | 722 | ||
723 | pci_read_config_word(priv->mc, I5100_SPDDATA, &w); | 723 | pci_read_config_word(priv->mc, I5100_SPDDATA, &w); |
724 | if (i5100_spddata_busy(w)) | 724 | if (i5100_spddata_busy(w)) |
725 | return -1; | 725 | return -1; |
726 | 726 | ||
727 | pci_write_config_dword(priv->mc, I5100_SPDCMD, | 727 | pci_write_config_dword(priv->mc, I5100_SPDCMD, |
728 | i5100_spdcmd_create(0xa, 1, ch * 4 + slot, addr, | 728 | i5100_spdcmd_create(0xa, 1, ch * 4 + slot, addr, |
729 | 0, 0)); | 729 | 0, 0)); |
730 | 730 | ||
731 | /* wait up to 100ms */ | 731 | /* wait up to 100ms */ |
732 | et = jiffies + HZ / 10; | 732 | et = jiffies + HZ / 10; |
733 | udelay(100); | 733 | udelay(100); |
734 | while (1) { | 734 | while (1) { |
735 | pci_read_config_word(priv->mc, I5100_SPDDATA, &w); | 735 | pci_read_config_word(priv->mc, I5100_SPDDATA, &w); |
736 | if (!i5100_spddata_busy(w)) | 736 | if (!i5100_spddata_busy(w)) |
737 | break; | 737 | break; |
738 | udelay(100); | 738 | udelay(100); |
739 | } | 739 | } |
740 | 740 | ||
741 | if (!i5100_spddata_rdo(w) || i5100_spddata_sbe(w)) | 741 | if (!i5100_spddata_rdo(w) || i5100_spddata_sbe(w)) |
742 | return -1; | 742 | return -1; |
743 | 743 | ||
744 | *byte = i5100_spddata_data(w); | 744 | *byte = i5100_spddata_data(w); |
745 | 745 | ||
746 | return 0; | 746 | return 0; |
747 | } | 747 | } |
748 | 748 | ||
749 | /* | 749 | /* |
750 | * fill dimm chip select map | 750 | * fill dimm chip select map |
751 | * | 751 | * |
752 | * FIXME: | 752 | * FIXME: |
753 | * o not the only way to may chip selects to dimm slots | 753 | * o not the only way to may chip selects to dimm slots |
754 | * o investigate if there is some way to obtain this map from the bios | 754 | * o investigate if there is some way to obtain this map from the bios |
755 | */ | 755 | */ |
756 | static void i5100_init_dimm_csmap(struct mem_ctl_info *mci) | 756 | static void i5100_init_dimm_csmap(struct mem_ctl_info *mci) |
757 | { | 757 | { |
758 | struct i5100_priv *priv = mci->pvt_info; | 758 | struct i5100_priv *priv = mci->pvt_info; |
759 | int i; | 759 | int i; |
760 | 760 | ||
761 | for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) { | 761 | for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) { |
762 | int j; | 762 | int j; |
763 | 763 | ||
764 | for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++) | 764 | for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++) |
765 | priv->dimm_csmap[i][j] = -1; /* default NC */ | 765 | priv->dimm_csmap[i][j] = -1; /* default NC */ |
766 | } | 766 | } |
767 | 767 | ||
768 | /* only 2 chip selects per slot... */ | 768 | /* only 2 chip selects per slot... */ |
769 | if (priv->ranksperchan == 4) { | 769 | if (priv->ranksperchan == 4) { |
770 | priv->dimm_csmap[0][0] = 0; | 770 | priv->dimm_csmap[0][0] = 0; |
771 | priv->dimm_csmap[0][1] = 3; | 771 | priv->dimm_csmap[0][1] = 3; |
772 | priv->dimm_csmap[1][0] = 1; | 772 | priv->dimm_csmap[1][0] = 1; |
773 | priv->dimm_csmap[1][1] = 2; | 773 | priv->dimm_csmap[1][1] = 2; |
774 | priv->dimm_csmap[2][0] = 2; | 774 | priv->dimm_csmap[2][0] = 2; |
775 | priv->dimm_csmap[3][0] = 3; | 775 | priv->dimm_csmap[3][0] = 3; |
776 | } else { | 776 | } else { |
777 | priv->dimm_csmap[0][0] = 0; | 777 | priv->dimm_csmap[0][0] = 0; |
778 | priv->dimm_csmap[0][1] = 1; | 778 | priv->dimm_csmap[0][1] = 1; |
779 | priv->dimm_csmap[1][0] = 2; | 779 | priv->dimm_csmap[1][0] = 2; |
780 | priv->dimm_csmap[1][1] = 3; | 780 | priv->dimm_csmap[1][1] = 3; |
781 | priv->dimm_csmap[2][0] = 4; | 781 | priv->dimm_csmap[2][0] = 4; |
782 | priv->dimm_csmap[2][1] = 5; | 782 | priv->dimm_csmap[2][1] = 5; |
783 | } | 783 | } |
784 | } | 784 | } |
785 | 785 | ||
786 | static void i5100_init_dimm_layout(struct pci_dev *pdev, | 786 | static void i5100_init_dimm_layout(struct pci_dev *pdev, |
787 | struct mem_ctl_info *mci) | 787 | struct mem_ctl_info *mci) |
788 | { | 788 | { |
789 | struct i5100_priv *priv = mci->pvt_info; | 789 | struct i5100_priv *priv = mci->pvt_info; |
790 | int i; | 790 | int i; |
791 | 791 | ||
792 | for (i = 0; i < I5100_CHANNELS; i++) { | 792 | for (i = 0; i < I5100_CHANNELS; i++) { |
793 | int j; | 793 | int j; |
794 | 794 | ||
795 | for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CHAN; j++) { | 795 | for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CHAN; j++) { |
796 | u8 rank; | 796 | u8 rank; |
797 | 797 | ||
798 | if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0) | 798 | if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0) |
799 | priv->dimm_numrank[i][j] = 0; | 799 | priv->dimm_numrank[i][j] = 0; |
800 | else | 800 | else |
801 | priv->dimm_numrank[i][j] = (rank & 3) + 1; | 801 | priv->dimm_numrank[i][j] = (rank & 3) + 1; |
802 | } | 802 | } |
803 | } | 803 | } |
804 | 804 | ||
805 | i5100_init_dimm_csmap(mci); | 805 | i5100_init_dimm_csmap(mci); |
806 | } | 806 | } |
807 | 807 | ||
808 | static void i5100_init_interleaving(struct pci_dev *pdev, | 808 | static void i5100_init_interleaving(struct pci_dev *pdev, |
809 | struct mem_ctl_info *mci) | 809 | struct mem_ctl_info *mci) |
810 | { | 810 | { |
811 | u16 w; | 811 | u16 w; |
812 | u32 dw; | 812 | u32 dw; |
813 | struct i5100_priv *priv = mci->pvt_info; | 813 | struct i5100_priv *priv = mci->pvt_info; |
814 | struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm }; | 814 | struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm }; |
815 | int i; | 815 | int i; |
816 | 816 | ||
817 | pci_read_config_word(pdev, I5100_TOLM, &w); | 817 | pci_read_config_word(pdev, I5100_TOLM, &w); |
818 | priv->tolm = (u64) i5100_tolm_tolm(w) * 256 * 1024 * 1024; | 818 | priv->tolm = (u64) i5100_tolm_tolm(w) * 256 * 1024 * 1024; |
819 | 819 | ||
820 | pci_read_config_word(pdev, I5100_MIR0, &w); | 820 | pci_read_config_word(pdev, I5100_MIR0, &w); |
821 | priv->mir[0].limit = (u64) i5100_mir_limit(w) << 28; | 821 | priv->mir[0].limit = (u64) i5100_mir_limit(w) << 28; |
822 | priv->mir[0].way[1] = i5100_mir_way1(w); | 822 | priv->mir[0].way[1] = i5100_mir_way1(w); |
823 | priv->mir[0].way[0] = i5100_mir_way0(w); | 823 | priv->mir[0].way[0] = i5100_mir_way0(w); |
824 | 824 | ||
825 | pci_read_config_word(pdev, I5100_MIR1, &w); | 825 | pci_read_config_word(pdev, I5100_MIR1, &w); |
826 | priv->mir[1].limit = (u64) i5100_mir_limit(w) << 28; | 826 | priv->mir[1].limit = (u64) i5100_mir_limit(w) << 28; |
827 | priv->mir[1].way[1] = i5100_mir_way1(w); | 827 | priv->mir[1].way[1] = i5100_mir_way1(w); |
828 | priv->mir[1].way[0] = i5100_mir_way0(w); | 828 | priv->mir[1].way[0] = i5100_mir_way0(w); |
829 | 829 | ||
830 | pci_read_config_word(pdev, I5100_AMIR_0, &w); | 830 | pci_read_config_word(pdev, I5100_AMIR_0, &w); |
831 | priv->amir[0] = w; | 831 | priv->amir[0] = w; |
832 | pci_read_config_word(pdev, I5100_AMIR_1, &w); | 832 | pci_read_config_word(pdev, I5100_AMIR_1, &w); |
833 | priv->amir[1] = w; | 833 | priv->amir[1] = w; |
834 | 834 | ||
835 | for (i = 0; i < I5100_CHANNELS; i++) { | 835 | for (i = 0; i < I5100_CHANNELS; i++) { |
836 | int j; | 836 | int j; |
837 | 837 | ||
838 | for (j = 0; j < 5; j++) { | 838 | for (j = 0; j < 5; j++) { |
839 | int k; | 839 | int k; |
840 | 840 | ||
841 | pci_read_config_dword(mms[i], I5100_DMIR + j * 4, &dw); | 841 | pci_read_config_dword(mms[i], I5100_DMIR + j * 4, &dw); |
842 | 842 | ||
843 | priv->dmir[i][j].limit = | 843 | priv->dmir[i][j].limit = |
844 | (u64) i5100_dmir_limit(dw) << 28; | 844 | (u64) i5100_dmir_limit(dw) << 28; |
845 | for (k = 0; k < I5100_MAX_RANKS_PER_DIMM; k++) | 845 | for (k = 0; k < I5100_MAX_RANKS_PER_DIMM; k++) |
846 | priv->dmir[i][j].rank[k] = | 846 | priv->dmir[i][j].rank[k] = |
847 | i5100_dmir_rank(dw, k); | 847 | i5100_dmir_rank(dw, k); |
848 | } | 848 | } |
849 | } | 849 | } |
850 | 850 | ||
851 | i5100_init_mtr(mci); | 851 | i5100_init_mtr(mci); |
852 | } | 852 | } |
853 | 853 | ||
854 | static void i5100_init_csrows(struct mem_ctl_info *mci) | 854 | static void i5100_init_csrows(struct mem_ctl_info *mci) |
855 | { | 855 | { |
856 | int i; | 856 | int i; |
857 | struct i5100_priv *priv = mci->pvt_info; | 857 | struct i5100_priv *priv = mci->pvt_info; |
858 | 858 | ||
859 | for (i = 0; i < mci->tot_dimms; i++) { | 859 | for (i = 0; i < mci->tot_dimms; i++) { |
860 | struct dimm_info *dimm; | 860 | struct dimm_info *dimm; |
861 | const unsigned long npages = i5100_npages(mci, i); | 861 | const unsigned long npages = i5100_npages(mci, i); |
862 | const unsigned chan = i5100_csrow_to_chan(mci, i); | 862 | const unsigned chan = i5100_csrow_to_chan(mci, i); |
863 | const unsigned rank = i5100_csrow_to_rank(mci, i); | 863 | const unsigned rank = i5100_csrow_to_rank(mci, i); |
864 | 864 | ||
865 | if (!npages) | 865 | if (!npages) |
866 | continue; | 866 | continue; |
867 | 867 | ||
868 | dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, | 868 | dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, |
869 | chan, rank, 0); | 869 | chan, rank, 0); |
870 | 870 | ||
871 | dimm->nr_pages = npages; | 871 | dimm->nr_pages = npages; |
872 | if (npages) { | 872 | if (npages) { |
873 | dimm->grain = 32; | 873 | dimm->grain = 32; |
874 | dimm->dtype = (priv->mtr[chan][rank].width == 4) ? | 874 | dimm->dtype = (priv->mtr[chan][rank].width == 4) ? |
875 | DEV_X4 : DEV_X8; | 875 | DEV_X4 : DEV_X8; |
876 | dimm->mtype = MEM_RDDR2; | 876 | dimm->mtype = MEM_RDDR2; |
877 | dimm->edac_mode = EDAC_SECDED; | 877 | dimm->edac_mode = EDAC_SECDED; |
878 | snprintf(dimm->label, sizeof(dimm->label), | 878 | snprintf(dimm->label, sizeof(dimm->label), |
879 | "DIMM%u", | 879 | "DIMM%u", |
880 | i5100_rank_to_slot(mci, chan, rank)); | 880 | i5100_rank_to_slot(mci, chan, rank)); |
881 | } | 881 | } |
882 | 882 | ||
883 | edac_dbg(2, "dimm channel %d, rank %d, size %ld\n", | 883 | edac_dbg(2, "dimm channel %d, rank %d, size %ld\n", |
884 | chan, rank, (long)PAGES_TO_MiB(npages)); | 884 | chan, rank, (long)PAGES_TO_MiB(npages)); |
885 | } | 885 | } |
886 | } | 886 | } |
887 | 887 | ||
888 | /**************************************************************************** | 888 | /**************************************************************************** |
889 | * Error injection routines | 889 | * Error injection routines |
890 | ****************************************************************************/ | 890 | ****************************************************************************/ |
891 | 891 | ||
892 | static void i5100_do_inject(struct mem_ctl_info *mci) | 892 | static void i5100_do_inject(struct mem_ctl_info *mci) |
893 | { | 893 | { |
894 | struct i5100_priv *priv = mci->pvt_info; | 894 | struct i5100_priv *priv = mci->pvt_info; |
895 | u32 mask0; | 895 | u32 mask0; |
896 | u16 mask1; | 896 | u16 mask1; |
897 | 897 | ||
898 | /* MEM[1:0]EINJMSK0 | 898 | /* MEM[1:0]EINJMSK0 |
899 | * 31 - ADDRMATCHEN | 899 | * 31 - ADDRMATCHEN |
900 | * 29:28 - HLINESEL | 900 | * 29:28 - HLINESEL |
901 | * 00 Reserved | 901 | * 00 Reserved |
902 | * 01 Lower half of cache line | 902 | * 01 Lower half of cache line |
903 | * 10 Upper half of cache line | 903 | * 10 Upper half of cache line |
904 | * 11 Both upper and lower parts of cache line | 904 | * 11 Both upper and lower parts of cache line |
905 | * 27 - EINJEN | 905 | * 27 - EINJEN |
906 | * 25:19 - XORMASK1 for deviceptr1 | 906 | * 25:19 - XORMASK1 for deviceptr1 |
907 | * 9:5 - SEC2RAM for deviceptr2 | 907 | * 9:5 - SEC2RAM for deviceptr2 |
908 | * 4:0 - FIR2RAM for deviceptr1 | 908 | * 4:0 - FIR2RAM for deviceptr1 |
909 | */ | 909 | */ |
910 | mask0 = ((priv->inject_hlinesel & 0x3) << 28) | | 910 | mask0 = ((priv->inject_hlinesel & 0x3) << 28) | |
911 | I5100_MEMXEINJMSK0_EINJEN | | 911 | I5100_MEMXEINJMSK0_EINJEN | |
912 | ((priv->inject_eccmask1 & 0xffff) << 10) | | 912 | ((priv->inject_eccmask1 & 0xffff) << 10) | |
913 | ((priv->inject_deviceptr2 & 0x1f) << 5) | | 913 | ((priv->inject_deviceptr2 & 0x1f) << 5) | |
914 | (priv->inject_deviceptr1 & 0x1f); | 914 | (priv->inject_deviceptr1 & 0x1f); |
915 | 915 | ||
916 | /* MEM[1:0]EINJMSK1 | 916 | /* MEM[1:0]EINJMSK1 |
917 | * 15:0 - XORMASK2 for deviceptr2 | 917 | * 15:0 - XORMASK2 for deviceptr2 |
918 | */ | 918 | */ |
919 | mask1 = priv->inject_eccmask2; | 919 | mask1 = priv->inject_eccmask2; |
920 | 920 | ||
921 | if (priv->inject_channel == 0) { | 921 | if (priv->inject_channel == 0) { |
922 | pci_write_config_dword(priv->mc, I5100_MEM0EINJMSK0, mask0); | 922 | pci_write_config_dword(priv->mc, I5100_MEM0EINJMSK0, mask0); |
923 | pci_write_config_word(priv->mc, I5100_MEM0EINJMSK1, mask1); | 923 | pci_write_config_word(priv->mc, I5100_MEM0EINJMSK1, mask1); |
924 | } else { | 924 | } else { |
925 | pci_write_config_dword(priv->mc, I5100_MEM1EINJMSK0, mask0); | 925 | pci_write_config_dword(priv->mc, I5100_MEM1EINJMSK0, mask0); |
926 | pci_write_config_word(priv->mc, I5100_MEM1EINJMSK1, mask1); | 926 | pci_write_config_word(priv->mc, I5100_MEM1EINJMSK1, mask1); |
927 | } | 927 | } |
928 | 928 | ||
929 | /* Error Injection Response Function | 929 | /* Error Injection Response Function |
930 | * Intel 5100 Memory Controller Hub Chipset (318378) datasheet | 930 | * Intel 5100 Memory Controller Hub Chipset (318378) datasheet |
931 | * hints about this register but carry no data about them. All | 931 | * hints about this register but carry no data about them. All |
932 | * data regarding device 19 is based on experimentation and the | 932 | * data regarding device 19 is based on experimentation and the |
933 | * Intel 7300 Chipset Memory Controller Hub (318082) datasheet | 933 | * Intel 7300 Chipset Memory Controller Hub (318082) datasheet |
934 | * which appears to be accurate for the i5100 in this area. | 934 | * which appears to be accurate for the i5100 in this area. |
935 | * | 935 | * |
936 | * The injection code don't work without setting this register. | 936 | * The injection code don't work without setting this register. |
937 | * The register needs to be flipped off then on else the hardware | 937 | * The register needs to be flipped off then on else the hardware |
938 | * will only preform the first injection. | 938 | * will only preform the first injection. |
939 | * | 939 | * |
940 | * Stop condition bits 7:4 | 940 | * Stop condition bits 7:4 |
941 | * 1010 - Stop after one injection | 941 | * 1010 - Stop after one injection |
942 | * 1011 - Never stop injecting faults | 942 | * 1011 - Never stop injecting faults |
943 | * | 943 | * |
944 | * Start condition bits 3:0 | 944 | * Start condition bits 3:0 |
945 | * 1010 - Never start | 945 | * 1010 - Never start |
946 | * 1011 - Start immediately | 946 | * 1011 - Start immediately |
947 | */ | 947 | */ |
948 | pci_write_config_byte(priv->einj, I5100_DINJ0, 0xaa); | 948 | pci_write_config_byte(priv->einj, I5100_DINJ0, 0xaa); |
949 | pci_write_config_byte(priv->einj, I5100_DINJ0, 0xab); | 949 | pci_write_config_byte(priv->einj, I5100_DINJ0, 0xab); |
950 | } | 950 | } |
951 | 951 | ||
952 | #define to_mci(k) container_of(k, struct mem_ctl_info, dev) | 952 | #define to_mci(k) container_of(k, struct mem_ctl_info, dev) |
953 | static ssize_t inject_enable_write(struct file *file, const char __user *data, | 953 | static ssize_t inject_enable_write(struct file *file, const char __user *data, |
954 | size_t count, loff_t *ppos) | 954 | size_t count, loff_t *ppos) |
955 | { | 955 | { |
956 | struct device *dev = file->private_data; | 956 | struct device *dev = file->private_data; |
957 | struct mem_ctl_info *mci = to_mci(dev); | 957 | struct mem_ctl_info *mci = to_mci(dev); |
958 | 958 | ||
959 | i5100_do_inject(mci); | 959 | i5100_do_inject(mci); |
960 | 960 | ||
961 | return count; | 961 | return count; |
962 | } | 962 | } |
963 | 963 | ||
964 | static const struct file_operations i5100_inject_enable_fops = { | 964 | static const struct file_operations i5100_inject_enable_fops = { |
965 | .open = simple_open, | 965 | .open = simple_open, |
966 | .write = inject_enable_write, | 966 | .write = inject_enable_write, |
967 | .llseek = generic_file_llseek, | 967 | .llseek = generic_file_llseek, |
968 | }; | 968 | }; |
969 | 969 | ||
970 | static int i5100_setup_debugfs(struct mem_ctl_info *mci) | 970 | static int i5100_setup_debugfs(struct mem_ctl_info *mci) |
971 | { | 971 | { |
972 | struct i5100_priv *priv = mci->pvt_info; | 972 | struct i5100_priv *priv = mci->pvt_info; |
973 | 973 | ||
974 | if (!i5100_debugfs) | 974 | if (!i5100_debugfs) |
975 | return -ENODEV; | 975 | return -ENODEV; |
976 | 976 | ||
977 | priv->debugfs = debugfs_create_dir(mci->bus.name, i5100_debugfs); | 977 | priv->debugfs = debugfs_create_dir(mci->bus->name, i5100_debugfs); |
978 | 978 | ||
979 | if (!priv->debugfs) | 979 | if (!priv->debugfs) |
980 | return -ENOMEM; | 980 | return -ENOMEM; |
981 | 981 | ||
982 | debugfs_create_x8("inject_channel", S_IRUGO | S_IWUSR, priv->debugfs, | 982 | debugfs_create_x8("inject_channel", S_IRUGO | S_IWUSR, priv->debugfs, |
983 | &priv->inject_channel); | 983 | &priv->inject_channel); |
984 | debugfs_create_x8("inject_hlinesel", S_IRUGO | S_IWUSR, priv->debugfs, | 984 | debugfs_create_x8("inject_hlinesel", S_IRUGO | S_IWUSR, priv->debugfs, |
985 | &priv->inject_hlinesel); | 985 | &priv->inject_hlinesel); |
986 | debugfs_create_x8("inject_deviceptr1", S_IRUGO | S_IWUSR, priv->debugfs, | 986 | debugfs_create_x8("inject_deviceptr1", S_IRUGO | S_IWUSR, priv->debugfs, |
987 | &priv->inject_deviceptr1); | 987 | &priv->inject_deviceptr1); |
988 | debugfs_create_x8("inject_deviceptr2", S_IRUGO | S_IWUSR, priv->debugfs, | 988 | debugfs_create_x8("inject_deviceptr2", S_IRUGO | S_IWUSR, priv->debugfs, |
989 | &priv->inject_deviceptr2); | 989 | &priv->inject_deviceptr2); |
990 | debugfs_create_x16("inject_eccmask1", S_IRUGO | S_IWUSR, priv->debugfs, | 990 | debugfs_create_x16("inject_eccmask1", S_IRUGO | S_IWUSR, priv->debugfs, |
991 | &priv->inject_eccmask1); | 991 | &priv->inject_eccmask1); |
992 | debugfs_create_x16("inject_eccmask2", S_IRUGO | S_IWUSR, priv->debugfs, | 992 | debugfs_create_x16("inject_eccmask2", S_IRUGO | S_IWUSR, priv->debugfs, |
993 | &priv->inject_eccmask2); | 993 | &priv->inject_eccmask2); |
994 | debugfs_create_file("inject_enable", S_IWUSR, priv->debugfs, | 994 | debugfs_create_file("inject_enable", S_IWUSR, priv->debugfs, |
995 | &mci->dev, &i5100_inject_enable_fops); | 995 | &mci->dev, &i5100_inject_enable_fops); |
996 | 996 | ||
997 | return 0; | 997 | return 0; |
998 | 998 | ||
999 | } | 999 | } |
1000 | 1000 | ||
1001 | static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 1001 | static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id) |
1002 | { | 1002 | { |
1003 | int rc; | 1003 | int rc; |
1004 | struct mem_ctl_info *mci; | 1004 | struct mem_ctl_info *mci; |
1005 | struct edac_mc_layer layers[2]; | 1005 | struct edac_mc_layer layers[2]; |
1006 | struct i5100_priv *priv; | 1006 | struct i5100_priv *priv; |
1007 | struct pci_dev *ch0mm, *ch1mm, *einj; | 1007 | struct pci_dev *ch0mm, *ch1mm, *einj; |
1008 | int ret = 0; | 1008 | int ret = 0; |
1009 | u32 dw; | 1009 | u32 dw; |
1010 | int ranksperch; | 1010 | int ranksperch; |
1011 | 1011 | ||
1012 | if (PCI_FUNC(pdev->devfn) != 1) | 1012 | if (PCI_FUNC(pdev->devfn) != 1) |
1013 | return -ENODEV; | 1013 | return -ENODEV; |
1014 | 1014 | ||
1015 | rc = pci_enable_device(pdev); | 1015 | rc = pci_enable_device(pdev); |
1016 | if (rc < 0) { | 1016 | if (rc < 0) { |
1017 | ret = rc; | 1017 | ret = rc; |
1018 | goto bail; | 1018 | goto bail; |
1019 | } | 1019 | } |
1020 | 1020 | ||
1021 | /* ECC enabled? */ | 1021 | /* ECC enabled? */ |
1022 | pci_read_config_dword(pdev, I5100_MC, &dw); | 1022 | pci_read_config_dword(pdev, I5100_MC, &dw); |
1023 | if (!i5100_mc_errdeten(dw)) { | 1023 | if (!i5100_mc_errdeten(dw)) { |
1024 | printk(KERN_INFO "i5100_edac: ECC not enabled.\n"); | 1024 | printk(KERN_INFO "i5100_edac: ECC not enabled.\n"); |
1025 | ret = -ENODEV; | 1025 | ret = -ENODEV; |
1026 | goto bail_pdev; | 1026 | goto bail_pdev; |
1027 | } | 1027 | } |
1028 | 1028 | ||
1029 | /* figure out how many ranks, from strapped state of 48GB_Mode input */ | 1029 | /* figure out how many ranks, from strapped state of 48GB_Mode input */ |
1030 | pci_read_config_dword(pdev, I5100_MS, &dw); | 1030 | pci_read_config_dword(pdev, I5100_MS, &dw); |
1031 | ranksperch = !!(dw & (1 << 8)) * 2 + 4; | 1031 | ranksperch = !!(dw & (1 << 8)) * 2 + 4; |
1032 | 1032 | ||
1033 | /* enable error reporting... */ | 1033 | /* enable error reporting... */ |
1034 | pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw); | 1034 | pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw); |
1035 | dw &= ~I5100_FERR_NF_MEM_ANY_MASK; | 1035 | dw &= ~I5100_FERR_NF_MEM_ANY_MASK; |
1036 | pci_write_config_dword(pdev, I5100_EMASK_MEM, dw); | 1036 | pci_write_config_dword(pdev, I5100_EMASK_MEM, dw); |
1037 | 1037 | ||
1038 | /* device 21, func 0, Channel 0 Memory Map, Error Flag/Mask, etc... */ | 1038 | /* device 21, func 0, Channel 0 Memory Map, Error Flag/Mask, etc... */ |
1039 | ch0mm = pci_get_device_func(PCI_VENDOR_ID_INTEL, | 1039 | ch0mm = pci_get_device_func(PCI_VENDOR_ID_INTEL, |
1040 | PCI_DEVICE_ID_INTEL_5100_21, 0); | 1040 | PCI_DEVICE_ID_INTEL_5100_21, 0); |
1041 | if (!ch0mm) { | 1041 | if (!ch0mm) { |
1042 | ret = -ENODEV; | 1042 | ret = -ENODEV; |
1043 | goto bail_pdev; | 1043 | goto bail_pdev; |
1044 | } | 1044 | } |
1045 | 1045 | ||
1046 | rc = pci_enable_device(ch0mm); | 1046 | rc = pci_enable_device(ch0mm); |
1047 | if (rc < 0) { | 1047 | if (rc < 0) { |
1048 | ret = rc; | 1048 | ret = rc; |
1049 | goto bail_ch0; | 1049 | goto bail_ch0; |
1050 | } | 1050 | } |
1051 | 1051 | ||
1052 | /* device 22, func 0, Channel 1 Memory Map, Error Flag/Mask, etc... */ | 1052 | /* device 22, func 0, Channel 1 Memory Map, Error Flag/Mask, etc... */ |
1053 | ch1mm = pci_get_device_func(PCI_VENDOR_ID_INTEL, | 1053 | ch1mm = pci_get_device_func(PCI_VENDOR_ID_INTEL, |
1054 | PCI_DEVICE_ID_INTEL_5100_22, 0); | 1054 | PCI_DEVICE_ID_INTEL_5100_22, 0); |
1055 | if (!ch1mm) { | 1055 | if (!ch1mm) { |
1056 | ret = -ENODEV; | 1056 | ret = -ENODEV; |
1057 | goto bail_disable_ch0; | 1057 | goto bail_disable_ch0; |
1058 | } | 1058 | } |
1059 | 1059 | ||
1060 | rc = pci_enable_device(ch1mm); | 1060 | rc = pci_enable_device(ch1mm); |
1061 | if (rc < 0) { | 1061 | if (rc < 0) { |
1062 | ret = rc; | 1062 | ret = rc; |
1063 | goto bail_ch1; | 1063 | goto bail_ch1; |
1064 | } | 1064 | } |
1065 | 1065 | ||
1066 | layers[0].type = EDAC_MC_LAYER_CHANNEL; | 1066 | layers[0].type = EDAC_MC_LAYER_CHANNEL; |
1067 | layers[0].size = 2; | 1067 | layers[0].size = 2; |
1068 | layers[0].is_virt_csrow = false; | 1068 | layers[0].is_virt_csrow = false; |
1069 | layers[1].type = EDAC_MC_LAYER_SLOT; | 1069 | layers[1].type = EDAC_MC_LAYER_SLOT; |
1070 | layers[1].size = ranksperch; | 1070 | layers[1].size = ranksperch; |
1071 | layers[1].is_virt_csrow = true; | 1071 | layers[1].is_virt_csrow = true; |
1072 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, | 1072 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, |
1073 | sizeof(*priv)); | 1073 | sizeof(*priv)); |
1074 | if (!mci) { | 1074 | if (!mci) { |
1075 | ret = -ENOMEM; | 1075 | ret = -ENOMEM; |
1076 | goto bail_disable_ch1; | 1076 | goto bail_disable_ch1; |
1077 | } | 1077 | } |
1078 | 1078 | ||
1079 | 1079 | ||
1080 | /* device 19, func 0, Error injection */ | 1080 | /* device 19, func 0, Error injection */ |
1081 | einj = pci_get_device_func(PCI_VENDOR_ID_INTEL, | 1081 | einj = pci_get_device_func(PCI_VENDOR_ID_INTEL, |
1082 | PCI_DEVICE_ID_INTEL_5100_19, 0); | 1082 | PCI_DEVICE_ID_INTEL_5100_19, 0); |
1083 | if (!einj) { | 1083 | if (!einj) { |
1084 | ret = -ENODEV; | 1084 | ret = -ENODEV; |
1085 | goto bail_einj; | 1085 | goto bail_einj; |
1086 | } | 1086 | } |
1087 | 1087 | ||
1088 | rc = pci_enable_device(einj); | 1088 | rc = pci_enable_device(einj); |
1089 | if (rc < 0) { | 1089 | if (rc < 0) { |
1090 | ret = rc; | 1090 | ret = rc; |
1091 | goto bail_disable_einj; | 1091 | goto bail_disable_einj; |
1092 | } | 1092 | } |
1093 | 1093 | ||
1094 | 1094 | ||
1095 | mci->pdev = &pdev->dev; | 1095 | mci->pdev = &pdev->dev; |
1096 | 1096 | ||
1097 | priv = mci->pvt_info; | 1097 | priv = mci->pvt_info; |
1098 | priv->ranksperchan = ranksperch; | 1098 | priv->ranksperchan = ranksperch; |
1099 | priv->mc = pdev; | 1099 | priv->mc = pdev; |
1100 | priv->ch0mm = ch0mm; | 1100 | priv->ch0mm = ch0mm; |
1101 | priv->ch1mm = ch1mm; | 1101 | priv->ch1mm = ch1mm; |
1102 | priv->einj = einj; | 1102 | priv->einj = einj; |
1103 | 1103 | ||
1104 | INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing); | 1104 | INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing); |
1105 | 1105 | ||
1106 | /* If scrubbing was already enabled by the bios, start maintaining it */ | 1106 | /* If scrubbing was already enabled by the bios, start maintaining it */ |
1107 | pci_read_config_dword(pdev, I5100_MC, &dw); | 1107 | pci_read_config_dword(pdev, I5100_MC, &dw); |
1108 | if (i5100_mc_scrben(dw)) { | 1108 | if (i5100_mc_scrben(dw)) { |
1109 | priv->scrub_enable = 1; | 1109 | priv->scrub_enable = 1; |
1110 | schedule_delayed_work(&(priv->i5100_scrubbing), | 1110 | schedule_delayed_work(&(priv->i5100_scrubbing), |
1111 | I5100_SCRUB_REFRESH_RATE); | 1111 | I5100_SCRUB_REFRESH_RATE); |
1112 | } | 1112 | } |
1113 | 1113 | ||
1114 | i5100_init_dimm_layout(pdev, mci); | 1114 | i5100_init_dimm_layout(pdev, mci); |
1115 | i5100_init_interleaving(pdev, mci); | 1115 | i5100_init_interleaving(pdev, mci); |
1116 | 1116 | ||
1117 | mci->mtype_cap = MEM_FLAG_FB_DDR2; | 1117 | mci->mtype_cap = MEM_FLAG_FB_DDR2; |
1118 | mci->edac_ctl_cap = EDAC_FLAG_SECDED; | 1118 | mci->edac_ctl_cap = EDAC_FLAG_SECDED; |
1119 | mci->edac_cap = EDAC_FLAG_SECDED; | 1119 | mci->edac_cap = EDAC_FLAG_SECDED; |
1120 | mci->mod_name = "i5100_edac.c"; | 1120 | mci->mod_name = "i5100_edac.c"; |
1121 | mci->mod_ver = "not versioned"; | 1121 | mci->mod_ver = "not versioned"; |
1122 | mci->ctl_name = "i5100"; | 1122 | mci->ctl_name = "i5100"; |
1123 | mci->dev_name = pci_name(pdev); | 1123 | mci->dev_name = pci_name(pdev); |
1124 | mci->ctl_page_to_phys = NULL; | 1124 | mci->ctl_page_to_phys = NULL; |
1125 | 1125 | ||
1126 | mci->edac_check = i5100_check_error; | 1126 | mci->edac_check = i5100_check_error; |
1127 | mci->set_sdram_scrub_rate = i5100_set_scrub_rate; | 1127 | mci->set_sdram_scrub_rate = i5100_set_scrub_rate; |
1128 | mci->get_sdram_scrub_rate = i5100_get_scrub_rate; | 1128 | mci->get_sdram_scrub_rate = i5100_get_scrub_rate; |
1129 | 1129 | ||
1130 | priv->inject_channel = 0; | 1130 | priv->inject_channel = 0; |
1131 | priv->inject_hlinesel = 0; | 1131 | priv->inject_hlinesel = 0; |
1132 | priv->inject_deviceptr1 = 0; | 1132 | priv->inject_deviceptr1 = 0; |
1133 | priv->inject_deviceptr2 = 0; | 1133 | priv->inject_deviceptr2 = 0; |
1134 | priv->inject_eccmask1 = 0; | 1134 | priv->inject_eccmask1 = 0; |
1135 | priv->inject_eccmask2 = 0; | 1135 | priv->inject_eccmask2 = 0; |
1136 | 1136 | ||
1137 | i5100_init_csrows(mci); | 1137 | i5100_init_csrows(mci); |
1138 | 1138 | ||
1139 | /* this strange construction seems to be in every driver, dunno why */ | 1139 | /* this strange construction seems to be in every driver, dunno why */ |
1140 | switch (edac_op_state) { | 1140 | switch (edac_op_state) { |
1141 | case EDAC_OPSTATE_POLL: | 1141 | case EDAC_OPSTATE_POLL: |
1142 | case EDAC_OPSTATE_NMI: | 1142 | case EDAC_OPSTATE_NMI: |
1143 | break; | 1143 | break; |
1144 | default: | 1144 | default: |
1145 | edac_op_state = EDAC_OPSTATE_POLL; | 1145 | edac_op_state = EDAC_OPSTATE_POLL; |
1146 | break; | 1146 | break; |
1147 | } | 1147 | } |
1148 | 1148 | ||
1149 | if (edac_mc_add_mc(mci)) { | 1149 | if (edac_mc_add_mc(mci)) { |
1150 | ret = -ENODEV; | 1150 | ret = -ENODEV; |
1151 | goto bail_scrub; | 1151 | goto bail_scrub; |
1152 | } | 1152 | } |
1153 | 1153 | ||
1154 | i5100_setup_debugfs(mci); | 1154 | i5100_setup_debugfs(mci); |
1155 | 1155 | ||
1156 | return ret; | 1156 | return ret; |
1157 | 1157 | ||
1158 | bail_scrub: | 1158 | bail_scrub: |
1159 | priv->scrub_enable = 0; | 1159 | priv->scrub_enable = 0; |
1160 | cancel_delayed_work_sync(&(priv->i5100_scrubbing)); | 1160 | cancel_delayed_work_sync(&(priv->i5100_scrubbing)); |
1161 | edac_mc_free(mci); | 1161 | edac_mc_free(mci); |
1162 | 1162 | ||
1163 | bail_disable_einj: | 1163 | bail_disable_einj: |
1164 | pci_disable_device(einj); | 1164 | pci_disable_device(einj); |
1165 | 1165 | ||
1166 | bail_einj: | 1166 | bail_einj: |
1167 | pci_dev_put(einj); | 1167 | pci_dev_put(einj); |
1168 | 1168 | ||
1169 | bail_disable_ch1: | 1169 | bail_disable_ch1: |
1170 | pci_disable_device(ch1mm); | 1170 | pci_disable_device(ch1mm); |
1171 | 1171 | ||
1172 | bail_ch1: | 1172 | bail_ch1: |
1173 | pci_dev_put(ch1mm); | 1173 | pci_dev_put(ch1mm); |
1174 | 1174 | ||
1175 | bail_disable_ch0: | 1175 | bail_disable_ch0: |
1176 | pci_disable_device(ch0mm); | 1176 | pci_disable_device(ch0mm); |
1177 | 1177 | ||
1178 | bail_ch0: | 1178 | bail_ch0: |
1179 | pci_dev_put(ch0mm); | 1179 | pci_dev_put(ch0mm); |
1180 | 1180 | ||
1181 | bail_pdev: | 1181 | bail_pdev: |
1182 | pci_disable_device(pdev); | 1182 | pci_disable_device(pdev); |
1183 | 1183 | ||
1184 | bail: | 1184 | bail: |
1185 | return ret; | 1185 | return ret; |
1186 | } | 1186 | } |
1187 | 1187 | ||
1188 | static void i5100_remove_one(struct pci_dev *pdev) | 1188 | static void i5100_remove_one(struct pci_dev *pdev) |
1189 | { | 1189 | { |
1190 | struct mem_ctl_info *mci; | 1190 | struct mem_ctl_info *mci; |
1191 | struct i5100_priv *priv; | 1191 | struct i5100_priv *priv; |
1192 | 1192 | ||
1193 | mci = edac_mc_del_mc(&pdev->dev); | 1193 | mci = edac_mc_del_mc(&pdev->dev); |
1194 | 1194 | ||
1195 | if (!mci) | 1195 | if (!mci) |
1196 | return; | 1196 | return; |
1197 | 1197 | ||
1198 | priv = mci->pvt_info; | 1198 | priv = mci->pvt_info; |
1199 | 1199 | ||
1200 | debugfs_remove_recursive(priv->debugfs); | 1200 | debugfs_remove_recursive(priv->debugfs); |
1201 | 1201 | ||
1202 | priv->scrub_enable = 0; | 1202 | priv->scrub_enable = 0; |
1203 | cancel_delayed_work_sync(&(priv->i5100_scrubbing)); | 1203 | cancel_delayed_work_sync(&(priv->i5100_scrubbing)); |
1204 | 1204 | ||
1205 | pci_disable_device(pdev); | 1205 | pci_disable_device(pdev); |
1206 | pci_disable_device(priv->ch0mm); | 1206 | pci_disable_device(priv->ch0mm); |
1207 | pci_disable_device(priv->ch1mm); | 1207 | pci_disable_device(priv->ch1mm); |
1208 | pci_disable_device(priv->einj); | 1208 | pci_disable_device(priv->einj); |
1209 | pci_dev_put(priv->ch0mm); | 1209 | pci_dev_put(priv->ch0mm); |
1210 | pci_dev_put(priv->ch1mm); | 1210 | pci_dev_put(priv->ch1mm); |
1211 | pci_dev_put(priv->einj); | 1211 | pci_dev_put(priv->einj); |
1212 | 1212 | ||
1213 | edac_mc_free(mci); | 1213 | edac_mc_free(mci); |
1214 | } | 1214 | } |
1215 | 1215 | ||
1216 | static DEFINE_PCI_DEVICE_TABLE(i5100_pci_tbl) = { | 1216 | static DEFINE_PCI_DEVICE_TABLE(i5100_pci_tbl) = { |
1217 | /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */ | 1217 | /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */ |
1218 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) }, | 1218 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) }, |
1219 | { 0, } | 1219 | { 0, } |
1220 | }; | 1220 | }; |
1221 | MODULE_DEVICE_TABLE(pci, i5100_pci_tbl); | 1221 | MODULE_DEVICE_TABLE(pci, i5100_pci_tbl); |
1222 | 1222 | ||
1223 | static struct pci_driver i5100_driver = { | 1223 | static struct pci_driver i5100_driver = { |
1224 | .name = KBUILD_BASENAME, | 1224 | .name = KBUILD_BASENAME, |
1225 | .probe = i5100_init_one, | 1225 | .probe = i5100_init_one, |
1226 | .remove = i5100_remove_one, | 1226 | .remove = i5100_remove_one, |
1227 | .id_table = i5100_pci_tbl, | 1227 | .id_table = i5100_pci_tbl, |
1228 | }; | 1228 | }; |
1229 | 1229 | ||
1230 | static int __init i5100_init(void) | 1230 | static int __init i5100_init(void) |
1231 | { | 1231 | { |
1232 | int pci_rc; | 1232 | int pci_rc; |
1233 | 1233 | ||
1234 | i5100_debugfs = debugfs_create_dir("i5100_edac", NULL); | 1234 | i5100_debugfs = debugfs_create_dir("i5100_edac", NULL); |
1235 | 1235 | ||
1236 | pci_rc = pci_register_driver(&i5100_driver); | 1236 | pci_rc = pci_register_driver(&i5100_driver); |
1237 | return (pci_rc < 0) ? pci_rc : 0; | 1237 | return (pci_rc < 0) ? pci_rc : 0; |
1238 | } | 1238 | } |
1239 | 1239 | ||
1240 | static void __exit i5100_exit(void) | 1240 | static void __exit i5100_exit(void) |
1241 | { | 1241 | { |
1242 | debugfs_remove(i5100_debugfs); | 1242 | debugfs_remove(i5100_debugfs); |
1243 | 1243 | ||
1244 | pci_unregister_driver(&i5100_driver); | 1244 | pci_unregister_driver(&i5100_driver); |
1245 | } | 1245 | } |
1246 | 1246 | ||
1247 | module_init(i5100_init); | 1247 | module_init(i5100_init); |
1248 | module_exit(i5100_exit); | 1248 | module_exit(i5100_exit); |
1249 | 1249 | ||
1250 | MODULE_LICENSE("GPL"); | 1250 | MODULE_LICENSE("GPL"); |
1251 | MODULE_AUTHOR | 1251 | MODULE_AUTHOR |
1252 | ("Arthur Jones <ajones@riverbed.com>"); | 1252 | ("Arthur Jones <ajones@riverbed.com>"); |
1253 | MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers"); | 1253 | MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers"); |
1254 | 1254 |
include/linux/edac.h
1 | /* | 1 | /* |
2 | * Generic EDAC defs | 2 | * Generic EDAC defs |
3 | * | 3 | * |
4 | * Author: Dave Jiang <djiang@mvista.com> | 4 | * Author: Dave Jiang <djiang@mvista.com> |
5 | * | 5 | * |
6 | * 2006-2008 (c) MontaVista Software, Inc. This file is licensed under | 6 | * 2006-2008 (c) MontaVista Software, Inc. This file is licensed under |
7 | * the terms of the GNU General Public License version 2. This program | 7 | * the terms of the GNU General Public License version 2. This program |
8 | * is licensed "as is" without any warranty of any kind, whether express | 8 | * is licensed "as is" without any warranty of any kind, whether express |
9 | * or implied. | 9 | * or implied. |
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | #ifndef _LINUX_EDAC_H_ | 12 | #ifndef _LINUX_EDAC_H_ |
13 | #define _LINUX_EDAC_H_ | 13 | #define _LINUX_EDAC_H_ |
14 | 14 | ||
15 | #include <linux/atomic.h> | 15 | #include <linux/atomic.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/completion.h> | 17 | #include <linux/completion.h> |
18 | #include <linux/workqueue.h> | 18 | #include <linux/workqueue.h> |
19 | #include <linux/debugfs.h> | 19 | #include <linux/debugfs.h> |
20 | 20 | ||
21 | struct device; | 21 | struct device; |
22 | 22 | ||
23 | #define EDAC_OPSTATE_INVAL -1 | 23 | #define EDAC_OPSTATE_INVAL -1 |
24 | #define EDAC_OPSTATE_POLL 0 | 24 | #define EDAC_OPSTATE_POLL 0 |
25 | #define EDAC_OPSTATE_NMI 1 | 25 | #define EDAC_OPSTATE_NMI 1 |
26 | #define EDAC_OPSTATE_INT 2 | 26 | #define EDAC_OPSTATE_INT 2 |
27 | 27 | ||
28 | extern int edac_op_state; | 28 | extern int edac_op_state; |
29 | extern int edac_err_assert; | 29 | extern int edac_err_assert; |
30 | extern atomic_t edac_handlers; | 30 | extern atomic_t edac_handlers; |
31 | extern struct bus_type edac_subsys; | 31 | extern struct bus_type edac_subsys; |
32 | 32 | ||
33 | extern int edac_handler_set(void); | 33 | extern int edac_handler_set(void); |
34 | extern void edac_atomic_assert_error(void); | 34 | extern void edac_atomic_assert_error(void); |
35 | extern struct bus_type *edac_get_sysfs_subsys(void); | 35 | extern struct bus_type *edac_get_sysfs_subsys(void); |
36 | extern void edac_put_sysfs_subsys(void); | 36 | extern void edac_put_sysfs_subsys(void); |
37 | 37 | ||
38 | static inline void opstate_init(void) | 38 | static inline void opstate_init(void) |
39 | { | 39 | { |
40 | switch (edac_op_state) { | 40 | switch (edac_op_state) { |
41 | case EDAC_OPSTATE_POLL: | 41 | case EDAC_OPSTATE_POLL: |
42 | case EDAC_OPSTATE_NMI: | 42 | case EDAC_OPSTATE_NMI: |
43 | break; | 43 | break; |
44 | default: | 44 | default: |
45 | edac_op_state = EDAC_OPSTATE_POLL; | 45 | edac_op_state = EDAC_OPSTATE_POLL; |
46 | } | 46 | } |
47 | return; | 47 | return; |
48 | } | 48 | } |
49 | 49 | ||
50 | /* Max length of a DIMM label*/ | 50 | /* Max length of a DIMM label*/ |
51 | #define EDAC_MC_LABEL_LEN 31 | 51 | #define EDAC_MC_LABEL_LEN 31 |
52 | 52 | ||
53 | /* Maximum size of the location string */ | 53 | /* Maximum size of the location string */ |
54 | #define LOCATION_SIZE 80 | 54 | #define LOCATION_SIZE 80 |
55 | 55 | ||
56 | /* Defines the maximum number of labels that can be reported */ | 56 | /* Defines the maximum number of labels that can be reported */ |
57 | #define EDAC_MAX_LABELS 8 | 57 | #define EDAC_MAX_LABELS 8 |
58 | 58 | ||
59 | /* String used to join two or more labels */ | 59 | /* String used to join two or more labels */ |
60 | #define OTHER_LABEL " or " | 60 | #define OTHER_LABEL " or " |
61 | 61 | ||
62 | /** | 62 | /** |
63 | * enum dev_type - describe the type of memory DRAM chips used at the stick | 63 | * enum dev_type - describe the type of memory DRAM chips used at the stick |
64 | * @DEV_UNKNOWN: Can't be determined, or MC doesn't support detect it | 64 | * @DEV_UNKNOWN: Can't be determined, or MC doesn't support detect it |
65 | * @DEV_X1: 1 bit for data | 65 | * @DEV_X1: 1 bit for data |
66 | * @DEV_X2: 2 bits for data | 66 | * @DEV_X2: 2 bits for data |
67 | * @DEV_X4: 4 bits for data | 67 | * @DEV_X4: 4 bits for data |
68 | * @DEV_X8: 8 bits for data | 68 | * @DEV_X8: 8 bits for data |
69 | * @DEV_X16: 16 bits for data | 69 | * @DEV_X16: 16 bits for data |
70 | * @DEV_X32: 32 bits for data | 70 | * @DEV_X32: 32 bits for data |
71 | * @DEV_X64: 64 bits for data | 71 | * @DEV_X64: 64 bits for data |
72 | * | 72 | * |
73 | * Typical values are x4 and x8. | 73 | * Typical values are x4 and x8. |
74 | */ | 74 | */ |
75 | enum dev_type { | 75 | enum dev_type { |
76 | DEV_UNKNOWN = 0, | 76 | DEV_UNKNOWN = 0, |
77 | DEV_X1, | 77 | DEV_X1, |
78 | DEV_X2, | 78 | DEV_X2, |
79 | DEV_X4, | 79 | DEV_X4, |
80 | DEV_X8, | 80 | DEV_X8, |
81 | DEV_X16, | 81 | DEV_X16, |
82 | DEV_X32, /* Do these parts exist? */ | 82 | DEV_X32, /* Do these parts exist? */ |
83 | DEV_X64 /* Do these parts exist? */ | 83 | DEV_X64 /* Do these parts exist? */ |
84 | }; | 84 | }; |
85 | 85 | ||
86 | #define DEV_FLAG_UNKNOWN BIT(DEV_UNKNOWN) | 86 | #define DEV_FLAG_UNKNOWN BIT(DEV_UNKNOWN) |
87 | #define DEV_FLAG_X1 BIT(DEV_X1) | 87 | #define DEV_FLAG_X1 BIT(DEV_X1) |
88 | #define DEV_FLAG_X2 BIT(DEV_X2) | 88 | #define DEV_FLAG_X2 BIT(DEV_X2) |
89 | #define DEV_FLAG_X4 BIT(DEV_X4) | 89 | #define DEV_FLAG_X4 BIT(DEV_X4) |
90 | #define DEV_FLAG_X8 BIT(DEV_X8) | 90 | #define DEV_FLAG_X8 BIT(DEV_X8) |
91 | #define DEV_FLAG_X16 BIT(DEV_X16) | 91 | #define DEV_FLAG_X16 BIT(DEV_X16) |
92 | #define DEV_FLAG_X32 BIT(DEV_X32) | 92 | #define DEV_FLAG_X32 BIT(DEV_X32) |
93 | #define DEV_FLAG_X64 BIT(DEV_X64) | 93 | #define DEV_FLAG_X64 BIT(DEV_X64) |
94 | 94 | ||
95 | /** | 95 | /** |
96 | * enum hw_event_mc_err_type - type of the detected error | 96 | * enum hw_event_mc_err_type - type of the detected error |
97 | * | 97 | * |
98 | * @HW_EVENT_ERR_CORRECTED: Corrected Error - Indicates that an ECC | 98 | * @HW_EVENT_ERR_CORRECTED: Corrected Error - Indicates that an ECC |
99 | * corrected error was detected | 99 | * corrected error was detected |
100 | * @HW_EVENT_ERR_UNCORRECTED: Uncorrected Error - Indicates an error that | 100 | * @HW_EVENT_ERR_UNCORRECTED: Uncorrected Error - Indicates an error that |
101 | * can't be corrected by ECC, but it is not | 101 | * can't be corrected by ECC, but it is not |
102 | * fatal (maybe it is on an unused memory area, | 102 | * fatal (maybe it is on an unused memory area, |
103 | * or the memory controller could recover from | 103 | * or the memory controller could recover from |
104 | * it for example, by re-trying the operation). | 104 | * it for example, by re-trying the operation). |
105 | * @HW_EVENT_ERR_FATAL: Fatal Error - Uncorrected error that could not | 105 | * @HW_EVENT_ERR_FATAL: Fatal Error - Uncorrected error that could not |
106 | * be recovered. | 106 | * be recovered. |
107 | */ | 107 | */ |
108 | enum hw_event_mc_err_type { | 108 | enum hw_event_mc_err_type { |
109 | HW_EVENT_ERR_CORRECTED, | 109 | HW_EVENT_ERR_CORRECTED, |
110 | HW_EVENT_ERR_UNCORRECTED, | 110 | HW_EVENT_ERR_UNCORRECTED, |
111 | HW_EVENT_ERR_FATAL, | 111 | HW_EVENT_ERR_FATAL, |
112 | HW_EVENT_ERR_INFO, | 112 | HW_EVENT_ERR_INFO, |
113 | }; | 113 | }; |
114 | 114 | ||
115 | static inline char *mc_event_error_type(const unsigned int err_type) | 115 | static inline char *mc_event_error_type(const unsigned int err_type) |
116 | { | 116 | { |
117 | switch (err_type) { | 117 | switch (err_type) { |
118 | case HW_EVENT_ERR_CORRECTED: | 118 | case HW_EVENT_ERR_CORRECTED: |
119 | return "Corrected"; | 119 | return "Corrected"; |
120 | case HW_EVENT_ERR_UNCORRECTED: | 120 | case HW_EVENT_ERR_UNCORRECTED: |
121 | return "Uncorrected"; | 121 | return "Uncorrected"; |
122 | case HW_EVENT_ERR_FATAL: | 122 | case HW_EVENT_ERR_FATAL: |
123 | return "Fatal"; | 123 | return "Fatal"; |
124 | default: | 124 | default: |
125 | case HW_EVENT_ERR_INFO: | 125 | case HW_EVENT_ERR_INFO: |
126 | return "Info"; | 126 | return "Info"; |
127 | } | 127 | } |
128 | } | 128 | } |
129 | 129 | ||
130 | /** | 130 | /** |
131 | * enum mem_type - memory types. For a more detailed reference, please see | 131 | * enum mem_type - memory types. For a more detailed reference, please see |
132 | * http://en.wikipedia.org/wiki/DRAM | 132 | * http://en.wikipedia.org/wiki/DRAM |
133 | * | 133 | * |
134 | * @MEM_EMPTY Empty csrow | 134 | * @MEM_EMPTY Empty csrow |
135 | * @MEM_RESERVED: Reserved csrow type | 135 | * @MEM_RESERVED: Reserved csrow type |
136 | * @MEM_UNKNOWN: Unknown csrow type | 136 | * @MEM_UNKNOWN: Unknown csrow type |
137 | * @MEM_FPM: FPM - Fast Page Mode, used on systems up to 1995. | 137 | * @MEM_FPM: FPM - Fast Page Mode, used on systems up to 1995. |
138 | * @MEM_EDO: EDO - Extended data out, used on systems up to 1998. | 138 | * @MEM_EDO: EDO - Extended data out, used on systems up to 1998. |
139 | * @MEM_BEDO: BEDO - Burst Extended data out, an EDO variant. | 139 | * @MEM_BEDO: BEDO - Burst Extended data out, an EDO variant. |
140 | * @MEM_SDR: SDR - Single data rate SDRAM | 140 | * @MEM_SDR: SDR - Single data rate SDRAM |
141 | * http://en.wikipedia.org/wiki/Synchronous_dynamic_random-access_memory | 141 | * http://en.wikipedia.org/wiki/Synchronous_dynamic_random-access_memory |
142 | * They use 3 pins for chip select: Pins 0 and 2 are | 142 | * They use 3 pins for chip select: Pins 0 and 2 are |
143 | * for rank 0; pins 1 and 3 are for rank 1, if the memory | 143 | * for rank 0; pins 1 and 3 are for rank 1, if the memory |
144 | * is dual-rank. | 144 | * is dual-rank. |
145 | * @MEM_RDR: Registered SDR SDRAM | 145 | * @MEM_RDR: Registered SDR SDRAM |
146 | * @MEM_DDR: Double data rate SDRAM | 146 | * @MEM_DDR: Double data rate SDRAM |
147 | * http://en.wikipedia.org/wiki/DDR_SDRAM | 147 | * http://en.wikipedia.org/wiki/DDR_SDRAM |
148 | * @MEM_RDDR: Registered Double data rate SDRAM | 148 | * @MEM_RDDR: Registered Double data rate SDRAM |
149 | * This is a variant of the DDR memories. | 149 | * This is a variant of the DDR memories. |
150 | * A registered memory has a buffer inside it, hiding | 150 | * A registered memory has a buffer inside it, hiding |
151 | * part of the memory details to the memory controller. | 151 | * part of the memory details to the memory controller. |
152 | * @MEM_RMBS: Rambus DRAM, used on a few Pentium III/IV controllers. | 152 | * @MEM_RMBS: Rambus DRAM, used on a few Pentium III/IV controllers. |
153 | * @MEM_DDR2: DDR2 RAM, as described at JEDEC JESD79-2F. | 153 | * @MEM_DDR2: DDR2 RAM, as described at JEDEC JESD79-2F. |
154 | * Those memories are labed as "PC2-" instead of "PC" to | 154 | * Those memories are labed as "PC2-" instead of "PC" to |
155 | * differenciate from DDR. | 155 | * differenciate from DDR. |
156 | * @MEM_FB_DDR2: Fully-Buffered DDR2, as described at JEDEC Std No. 205 | 156 | * @MEM_FB_DDR2: Fully-Buffered DDR2, as described at JEDEC Std No. 205 |
157 | * and JESD206. | 157 | * and JESD206. |
158 | * Those memories are accessed per DIMM slot, and not by | 158 | * Those memories are accessed per DIMM slot, and not by |
159 | * a chip select signal. | 159 | * a chip select signal. |
160 | * @MEM_RDDR2: Registered DDR2 RAM | 160 | * @MEM_RDDR2: Registered DDR2 RAM |
161 | * This is a variant of the DDR2 memories. | 161 | * This is a variant of the DDR2 memories. |
162 | * @MEM_XDR: Rambus XDR | 162 | * @MEM_XDR: Rambus XDR |
163 | * It is an evolution of the original RAMBUS memories, | 163 | * It is an evolution of the original RAMBUS memories, |
164 | * created to compete with DDR2. Weren't used on any | 164 | * created to compete with DDR2. Weren't used on any |
165 | * x86 arch, but cell_edac PPC memory controller uses it. | 165 | * x86 arch, but cell_edac PPC memory controller uses it. |
166 | * @MEM_DDR3: DDR3 RAM | 166 | * @MEM_DDR3: DDR3 RAM |
167 | * @MEM_RDDR3: Registered DDR3 RAM | 167 | * @MEM_RDDR3: Registered DDR3 RAM |
168 | * This is a variant of the DDR3 memories. | 168 | * This is a variant of the DDR3 memories. |
169 | */ | 169 | */ |
170 | enum mem_type { | 170 | enum mem_type { |
171 | MEM_EMPTY = 0, | 171 | MEM_EMPTY = 0, |
172 | MEM_RESERVED, | 172 | MEM_RESERVED, |
173 | MEM_UNKNOWN, | 173 | MEM_UNKNOWN, |
174 | MEM_FPM, | 174 | MEM_FPM, |
175 | MEM_EDO, | 175 | MEM_EDO, |
176 | MEM_BEDO, | 176 | MEM_BEDO, |
177 | MEM_SDR, | 177 | MEM_SDR, |
178 | MEM_RDR, | 178 | MEM_RDR, |
179 | MEM_DDR, | 179 | MEM_DDR, |
180 | MEM_RDDR, | 180 | MEM_RDDR, |
181 | MEM_RMBS, | 181 | MEM_RMBS, |
182 | MEM_DDR2, | 182 | MEM_DDR2, |
183 | MEM_FB_DDR2, | 183 | MEM_FB_DDR2, |
184 | MEM_RDDR2, | 184 | MEM_RDDR2, |
185 | MEM_XDR, | 185 | MEM_XDR, |
186 | MEM_DDR3, | 186 | MEM_DDR3, |
187 | MEM_RDDR3, | 187 | MEM_RDDR3, |
188 | }; | 188 | }; |
189 | 189 | ||
190 | #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) | 190 | #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) |
191 | #define MEM_FLAG_RESERVED BIT(MEM_RESERVED) | 191 | #define MEM_FLAG_RESERVED BIT(MEM_RESERVED) |
192 | #define MEM_FLAG_UNKNOWN BIT(MEM_UNKNOWN) | 192 | #define MEM_FLAG_UNKNOWN BIT(MEM_UNKNOWN) |
193 | #define MEM_FLAG_FPM BIT(MEM_FPM) | 193 | #define MEM_FLAG_FPM BIT(MEM_FPM) |
194 | #define MEM_FLAG_EDO BIT(MEM_EDO) | 194 | #define MEM_FLAG_EDO BIT(MEM_EDO) |
195 | #define MEM_FLAG_BEDO BIT(MEM_BEDO) | 195 | #define MEM_FLAG_BEDO BIT(MEM_BEDO) |
196 | #define MEM_FLAG_SDR BIT(MEM_SDR) | 196 | #define MEM_FLAG_SDR BIT(MEM_SDR) |
197 | #define MEM_FLAG_RDR BIT(MEM_RDR) | 197 | #define MEM_FLAG_RDR BIT(MEM_RDR) |
198 | #define MEM_FLAG_DDR BIT(MEM_DDR) | 198 | #define MEM_FLAG_DDR BIT(MEM_DDR) |
199 | #define MEM_FLAG_RDDR BIT(MEM_RDDR) | 199 | #define MEM_FLAG_RDDR BIT(MEM_RDDR) |
200 | #define MEM_FLAG_RMBS BIT(MEM_RMBS) | 200 | #define MEM_FLAG_RMBS BIT(MEM_RMBS) |
201 | #define MEM_FLAG_DDR2 BIT(MEM_DDR2) | 201 | #define MEM_FLAG_DDR2 BIT(MEM_DDR2) |
202 | #define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2) | 202 | #define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2) |
203 | #define MEM_FLAG_RDDR2 BIT(MEM_RDDR2) | 203 | #define MEM_FLAG_RDDR2 BIT(MEM_RDDR2) |
204 | #define MEM_FLAG_XDR BIT(MEM_XDR) | 204 | #define MEM_FLAG_XDR BIT(MEM_XDR) |
205 | #define MEM_FLAG_DDR3 BIT(MEM_DDR3) | 205 | #define MEM_FLAG_DDR3 BIT(MEM_DDR3) |
206 | #define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) | 206 | #define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) |
207 | 207 | ||
208 | /** | 208 | /** |
209 | * enum edac-type - Error Detection and Correction capabilities and mode | 209 | * enum edac-type - Error Detection and Correction capabilities and mode |
210 | * @EDAC_UNKNOWN: Unknown if ECC is available | 210 | * @EDAC_UNKNOWN: Unknown if ECC is available |
211 | * @EDAC_NONE: Doesn't support ECC | 211 | * @EDAC_NONE: Doesn't support ECC |
212 | * @EDAC_RESERVED: Reserved ECC type | 212 | * @EDAC_RESERVED: Reserved ECC type |
213 | * @EDAC_PARITY: Detects parity errors | 213 | * @EDAC_PARITY: Detects parity errors |
214 | * @EDAC_EC: Error Checking - no correction | 214 | * @EDAC_EC: Error Checking - no correction |
215 | * @EDAC_SECDED: Single bit error correction, Double detection | 215 | * @EDAC_SECDED: Single bit error correction, Double detection |
216 | * @EDAC_S2ECD2ED: Chipkill x2 devices - do these exist? | 216 | * @EDAC_S2ECD2ED: Chipkill x2 devices - do these exist? |
217 | * @EDAC_S4ECD4ED: Chipkill x4 devices | 217 | * @EDAC_S4ECD4ED: Chipkill x4 devices |
218 | * @EDAC_S8ECD8ED: Chipkill x8 devices | 218 | * @EDAC_S8ECD8ED: Chipkill x8 devices |
219 | * @EDAC_S16ECD16ED: Chipkill x16 devices | 219 | * @EDAC_S16ECD16ED: Chipkill x16 devices |
220 | */ | 220 | */ |
221 | enum edac_type { | 221 | enum edac_type { |
222 | EDAC_UNKNOWN = 0, | 222 | EDAC_UNKNOWN = 0, |
223 | EDAC_NONE, | 223 | EDAC_NONE, |
224 | EDAC_RESERVED, | 224 | EDAC_RESERVED, |
225 | EDAC_PARITY, | 225 | EDAC_PARITY, |
226 | EDAC_EC, | 226 | EDAC_EC, |
227 | EDAC_SECDED, | 227 | EDAC_SECDED, |
228 | EDAC_S2ECD2ED, | 228 | EDAC_S2ECD2ED, |
229 | EDAC_S4ECD4ED, | 229 | EDAC_S4ECD4ED, |
230 | EDAC_S8ECD8ED, | 230 | EDAC_S8ECD8ED, |
231 | EDAC_S16ECD16ED, | 231 | EDAC_S16ECD16ED, |
232 | }; | 232 | }; |
233 | 233 | ||
234 | #define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN) | 234 | #define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN) |
235 | #define EDAC_FLAG_NONE BIT(EDAC_NONE) | 235 | #define EDAC_FLAG_NONE BIT(EDAC_NONE) |
236 | #define EDAC_FLAG_PARITY BIT(EDAC_PARITY) | 236 | #define EDAC_FLAG_PARITY BIT(EDAC_PARITY) |
237 | #define EDAC_FLAG_EC BIT(EDAC_EC) | 237 | #define EDAC_FLAG_EC BIT(EDAC_EC) |
238 | #define EDAC_FLAG_SECDED BIT(EDAC_SECDED) | 238 | #define EDAC_FLAG_SECDED BIT(EDAC_SECDED) |
239 | #define EDAC_FLAG_S2ECD2ED BIT(EDAC_S2ECD2ED) | 239 | #define EDAC_FLAG_S2ECD2ED BIT(EDAC_S2ECD2ED) |
240 | #define EDAC_FLAG_S4ECD4ED BIT(EDAC_S4ECD4ED) | 240 | #define EDAC_FLAG_S4ECD4ED BIT(EDAC_S4ECD4ED) |
241 | #define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED) | 241 | #define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED) |
242 | #define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED) | 242 | #define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED) |
243 | 243 | ||
244 | /** | 244 | /** |
245 | * enum scrub_type - scrubbing capabilities | 245 | * enum scrub_type - scrubbing capabilities |
246 | * @SCRUB_UNKNOWN Unknown if scrubber is available | 246 | * @SCRUB_UNKNOWN Unknown if scrubber is available |
247 | * @SCRUB_NONE: No scrubber | 247 | * @SCRUB_NONE: No scrubber |
248 | * @SCRUB_SW_PROG: SW progressive (sequential) scrubbing | 248 | * @SCRUB_SW_PROG: SW progressive (sequential) scrubbing |
249 | * @SCRUB_SW_SRC: Software scrub only errors | 249 | * @SCRUB_SW_SRC: Software scrub only errors |
250 | * @SCRUB_SW_PROG_SRC: Progressive software scrub from an error | 250 | * @SCRUB_SW_PROG_SRC: Progressive software scrub from an error |
251 | * @SCRUB_SW_TUNABLE: Software scrub frequency is tunable | 251 | * @SCRUB_SW_TUNABLE: Software scrub frequency is tunable |
252 | * @SCRUB_HW_PROG: HW progressive (sequential) scrubbing | 252 | * @SCRUB_HW_PROG: HW progressive (sequential) scrubbing |
253 | * @SCRUB_HW_SRC: Hardware scrub only errors | 253 | * @SCRUB_HW_SRC: Hardware scrub only errors |
254 | * @SCRUB_HW_PROG_SRC: Progressive hardware scrub from an error | 254 | * @SCRUB_HW_PROG_SRC: Progressive hardware scrub from an error |
255 | * SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable | 255 | * SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable |
256 | */ | 256 | */ |
257 | enum scrub_type { | 257 | enum scrub_type { |
258 | SCRUB_UNKNOWN = 0, | 258 | SCRUB_UNKNOWN = 0, |
259 | SCRUB_NONE, | 259 | SCRUB_NONE, |
260 | SCRUB_SW_PROG, | 260 | SCRUB_SW_PROG, |
261 | SCRUB_SW_SRC, | 261 | SCRUB_SW_SRC, |
262 | SCRUB_SW_PROG_SRC, | 262 | SCRUB_SW_PROG_SRC, |
263 | SCRUB_SW_TUNABLE, | 263 | SCRUB_SW_TUNABLE, |
264 | SCRUB_HW_PROG, | 264 | SCRUB_HW_PROG, |
265 | SCRUB_HW_SRC, | 265 | SCRUB_HW_SRC, |
266 | SCRUB_HW_PROG_SRC, | 266 | SCRUB_HW_PROG_SRC, |
267 | SCRUB_HW_TUNABLE | 267 | SCRUB_HW_TUNABLE |
268 | }; | 268 | }; |
269 | 269 | ||
270 | #define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG) | 270 | #define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG) |
271 | #define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC) | 271 | #define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC) |
272 | #define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC) | 272 | #define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC) |
273 | #define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE) | 273 | #define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE) |
274 | #define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG) | 274 | #define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG) |
275 | #define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC) | 275 | #define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC) |
276 | #define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC) | 276 | #define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC) |
277 | #define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE) | 277 | #define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE) |
278 | 278 | ||
279 | /* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */ | 279 | /* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */ |
280 | 280 | ||
281 | /* EDAC internal operation states */ | 281 | /* EDAC internal operation states */ |
282 | #define OP_ALLOC 0x100 | 282 | #define OP_ALLOC 0x100 |
283 | #define OP_RUNNING_POLL 0x201 | 283 | #define OP_RUNNING_POLL 0x201 |
284 | #define OP_RUNNING_INTERRUPT 0x202 | 284 | #define OP_RUNNING_INTERRUPT 0x202 |
285 | #define OP_RUNNING_POLL_INTR 0x203 | 285 | #define OP_RUNNING_POLL_INTR 0x203 |
286 | #define OP_OFFLINE 0x300 | 286 | #define OP_OFFLINE 0x300 |
287 | 287 | ||
288 | /* | 288 | /* |
289 | * Concepts used at the EDAC subsystem | 289 | * Concepts used at the EDAC subsystem |
290 | * | 290 | * |
291 | * There are several things to be aware of that aren't at all obvious: | 291 | * There are several things to be aware of that aren't at all obvious: |
292 | * | 292 | * |
293 | * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc.. | 293 | * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc.. |
294 | * | 294 | * |
295 | * These are some of the many terms that are thrown about that don't always | 295 | * These are some of the many terms that are thrown about that don't always |
296 | * mean what people think they mean (Inconceivable!). In the interest of | 296 | * mean what people think they mean (Inconceivable!). In the interest of |
297 | * creating a common ground for discussion, terms and their definitions | 297 | * creating a common ground for discussion, terms and their definitions |
298 | * will be established. | 298 | * will be established. |
299 | * | 299 | * |
300 | * Memory devices: The individual DRAM chips on a memory stick. These | 300 | * Memory devices: The individual DRAM chips on a memory stick. These |
301 | * devices commonly output 4 and 8 bits each (x4, x8). | 301 | * devices commonly output 4 and 8 bits each (x4, x8). |
302 | * Grouping several of these in parallel provides the | 302 | * Grouping several of these in parallel provides the |
303 | * number of bits that the memory controller expects: | 303 | * number of bits that the memory controller expects: |
304 | * typically 72 bits, in order to provide 64 bits + | 304 | * typically 72 bits, in order to provide 64 bits + |
305 | * 8 bits of ECC data. | 305 | * 8 bits of ECC data. |
306 | * | 306 | * |
307 | * Memory Stick: A printed circuit board that aggregates multiple | 307 | * Memory Stick: A printed circuit board that aggregates multiple |
308 | * memory devices in parallel. In general, this is the | 308 | * memory devices in parallel. In general, this is the |
309 | * Field Replaceable Unit (FRU) which gets replaced, in | 309 | * Field Replaceable Unit (FRU) which gets replaced, in |
310 | * the case of excessive errors. Most often it is also | 310 | * the case of excessive errors. Most often it is also |
311 | * called DIMM (Dual Inline Memory Module). | 311 | * called DIMM (Dual Inline Memory Module). |
312 | * | 312 | * |
313 | * Memory Socket: A physical connector on the motherboard that accepts | 313 | * Memory Socket: A physical connector on the motherboard that accepts |
314 | * a single memory stick. Also called as "slot" on several | 314 | * a single memory stick. Also called as "slot" on several |
315 | * datasheets. | 315 | * datasheets. |
316 | * | 316 | * |
317 | * Channel: A memory controller channel, responsible to communicate | 317 | * Channel: A memory controller channel, responsible to communicate |
318 | * with a group of DIMMs. Each channel has its own | 318 | * with a group of DIMMs. Each channel has its own |
319 | * independent control (command) and data bus, and can | 319 | * independent control (command) and data bus, and can |
320 | * be used independently or grouped with other channels. | 320 | * be used independently or grouped with other channels. |
321 | * | 321 | * |
322 | * Branch: It is typically the highest hierarchy on a | 322 | * Branch: It is typically the highest hierarchy on a |
323 | * Fully-Buffered DIMM memory controller. | 323 | * Fully-Buffered DIMM memory controller. |
324 | * Typically, it contains two channels. | 324 | * Typically, it contains two channels. |
325 | * Two channels at the same branch can be used in single | 325 | * Two channels at the same branch can be used in single |
326 | * mode or in lockstep mode. | 326 | * mode or in lockstep mode. |
327 | * When lockstep is enabled, the cacheline is doubled, | 327 | * When lockstep is enabled, the cacheline is doubled, |
328 | * but it generally brings some performance penalty. | 328 | * but it generally brings some performance penalty. |
329 | * Also, it is generally not possible to point to just one | 329 | * Also, it is generally not possible to point to just one |
330 | * memory stick when an error occurs, as the error | 330 | * memory stick when an error occurs, as the error |
331 | * correction code is calculated using two DIMMs instead | 331 | * correction code is calculated using two DIMMs instead |
332 | * of one. Due to that, it is capable of correcting more | 332 | * of one. Due to that, it is capable of correcting more |
333 | * errors than on single mode. | 333 | * errors than on single mode. |
334 | * | 334 | * |
335 | * Single-channel: The data accessed by the memory controller is contained | 335 | * Single-channel: The data accessed by the memory controller is contained |
336 | * into one dimm only. E. g. if the data is 64 bits-wide, | 336 | * into one dimm only. E. g. if the data is 64 bits-wide, |
337 | * the data flows to the CPU using one 64 bits parallel | 337 | * the data flows to the CPU using one 64 bits parallel |
338 | * access. | 338 | * access. |
339 | * Typically used with SDR, DDR, DDR2 and DDR3 memories. | 339 | * Typically used with SDR, DDR, DDR2 and DDR3 memories. |
340 | * FB-DIMM and RAMBUS use a different concept for channel, | 340 | * FB-DIMM and RAMBUS use a different concept for channel, |
341 | * so this concept doesn't apply there. | 341 | * so this concept doesn't apply there. |
342 | * | 342 | * |
343 | * Double-channel: The data size accessed by the memory controller is | 343 | * Double-channel: The data size accessed by the memory controller is |
344 | * interlaced into two dimms, accessed at the same time. | 344 | * interlaced into two dimms, accessed at the same time. |
345 | * E. g. if the DIMM is 64 bits-wide (72 bits with ECC), | 345 | * E. g. if the DIMM is 64 bits-wide (72 bits with ECC), |
346 | * the data flows to the CPU using a 128 bits parallel | 346 | * the data flows to the CPU using a 128 bits parallel |
347 | * access. | 347 | * access. |
348 | * | 348 | * |
349 | * Chip-select row: This is the name of the DRAM signal used to select the | 349 | * Chip-select row: This is the name of the DRAM signal used to select the |
350 | * DRAM ranks to be accessed. Common chip-select rows for | 350 | * DRAM ranks to be accessed. Common chip-select rows for |
351 | * single channel are 64 bits, for dual channel 128 bits. | 351 | * single channel are 64 bits, for dual channel 128 bits. |
352 | * It may not be visible by the memory controller, as some | 352 | * It may not be visible by the memory controller, as some |
353 | * DIMM types have a memory buffer that can hide direct | 353 | * DIMM types have a memory buffer that can hide direct |
354 | * access to it from the Memory Controller. | 354 | * access to it from the Memory Controller. |
355 | * | 355 | * |
356 | * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memory. | 356 | * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memory. |
357 | * Motherboards commonly drive two chip-select pins to | 357 | * Motherboards commonly drive two chip-select pins to |
358 | * a memory stick. A single-ranked stick, will occupy | 358 | * a memory stick. A single-ranked stick, will occupy |
359 | * only one of those rows. The other will be unused. | 359 | * only one of those rows. The other will be unused. |
360 | * | 360 | * |
361 | * Double-Ranked stick: A double-ranked stick has two chip-select rows which | 361 | * Double-Ranked stick: A double-ranked stick has two chip-select rows which |
362 | * access different sets of memory devices. The two | 362 | * access different sets of memory devices. The two |
363 | * rows cannot be accessed concurrently. | 363 | * rows cannot be accessed concurrently. |
364 | * | 364 | * |
365 | * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick. | 365 | * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick. |
366 | * A double-sided stick has two chip-select rows which | 366 | * A double-sided stick has two chip-select rows which |
367 | * access different sets of memory devices. The two | 367 | * access different sets of memory devices. The two |
368 | * rows cannot be accessed concurrently. "Double-sided" | 368 | * rows cannot be accessed concurrently. "Double-sided" |
369 | * is irrespective of the memory devices being mounted | 369 | * is irrespective of the memory devices being mounted |
370 | * on both sides of the memory stick. | 370 | * on both sides of the memory stick. |
371 | * | 371 | * |
372 | * Socket set: All of the memory sticks that are required for | 372 | * Socket set: All of the memory sticks that are required for |
373 | * a single memory access or all of the memory sticks | 373 | * a single memory access or all of the memory sticks |
374 | * spanned by a chip-select row. A single socket set | 374 | * spanned by a chip-select row. A single socket set |
375 | * has two chip-select rows and if double-sided sticks | 375 | * has two chip-select rows and if double-sided sticks |
376 | * are used these will occupy those chip-select rows. | 376 | * are used these will occupy those chip-select rows. |
377 | * | 377 | * |
378 | * Bank: This term is avoided because it is unclear when | 378 | * Bank: This term is avoided because it is unclear when |
379 | * needing to distinguish between chip-select rows and | 379 | * needing to distinguish between chip-select rows and |
380 | * socket sets. | 380 | * socket sets. |
381 | * | 381 | * |
382 | * Controller pages: | 382 | * Controller pages: |
383 | * | 383 | * |
384 | * Physical pages: | 384 | * Physical pages: |
385 | * | 385 | * |
386 | * Virtual pages: | 386 | * Virtual pages: |
387 | * | 387 | * |
388 | * | 388 | * |
389 | * STRUCTURE ORGANIZATION AND CHOICES | 389 | * STRUCTURE ORGANIZATION AND CHOICES |
390 | * | 390 | * |
391 | * | 391 | * |
392 | * | 392 | * |
393 | * PS - I enjoyed writing all that about as much as you enjoyed reading it. | 393 | * PS - I enjoyed writing all that about as much as you enjoyed reading it. |
394 | */ | 394 | */ |
395 | 395 | ||
396 | /** | 396 | /** |
397 | * enum edac_mc_layer - memory controller hierarchy layer | 397 | * enum edac_mc_layer - memory controller hierarchy layer |
398 | * | 398 | * |
399 | * @EDAC_MC_LAYER_BRANCH: memory layer is named "branch" | 399 | * @EDAC_MC_LAYER_BRANCH: memory layer is named "branch" |
400 | * @EDAC_MC_LAYER_CHANNEL: memory layer is named "channel" | 400 | * @EDAC_MC_LAYER_CHANNEL: memory layer is named "channel" |
401 | * @EDAC_MC_LAYER_SLOT: memory layer is named "slot" | 401 | * @EDAC_MC_LAYER_SLOT: memory layer is named "slot" |
402 | * @EDAC_MC_LAYER_CHIP_SELECT: memory layer is named "chip select" | 402 | * @EDAC_MC_LAYER_CHIP_SELECT: memory layer is named "chip select" |
403 | * @EDAC_MC_LAYER_ALL_MEM: memory layout is unknown. All memory is mapped | 403 | * @EDAC_MC_LAYER_ALL_MEM: memory layout is unknown. All memory is mapped |
404 | * as a single memory area. This is used when | 404 | * as a single memory area. This is used when |
405 | * retrieving errors from a firmware driven driver. | 405 | * retrieving errors from a firmware driven driver. |
406 | * | 406 | * |
407 | * This enum is used by the drivers to tell edac_mc_sysfs what name should | 407 | * This enum is used by the drivers to tell edac_mc_sysfs what name should |
408 | * be used when describing a memory stick location. | 408 | * be used when describing a memory stick location. |
409 | */ | 409 | */ |
410 | enum edac_mc_layer_type { | 410 | enum edac_mc_layer_type { |
411 | EDAC_MC_LAYER_BRANCH, | 411 | EDAC_MC_LAYER_BRANCH, |
412 | EDAC_MC_LAYER_CHANNEL, | 412 | EDAC_MC_LAYER_CHANNEL, |
413 | EDAC_MC_LAYER_SLOT, | 413 | EDAC_MC_LAYER_SLOT, |
414 | EDAC_MC_LAYER_CHIP_SELECT, | 414 | EDAC_MC_LAYER_CHIP_SELECT, |
415 | EDAC_MC_LAYER_ALL_MEM, | 415 | EDAC_MC_LAYER_ALL_MEM, |
416 | }; | 416 | }; |
417 | 417 | ||
418 | /** | 418 | /** |
419 | * struct edac_mc_layer - describes the memory controller hierarchy | 419 | * struct edac_mc_layer - describes the memory controller hierarchy |
420 | * @layer: layer type | 420 | * @layer: layer type |
421 | * @size: number of components per layer. For example, | 421 | * @size: number of components per layer. For example, |
422 | * if the channel layer has two channels, size = 2 | 422 | * if the channel layer has two channels, size = 2 |
423 | * @is_virt_csrow: This layer is part of the "csrow" when old API | 423 | * @is_virt_csrow: This layer is part of the "csrow" when old API |
424 | * compatibility mode is enabled. Otherwise, it is | 424 | * compatibility mode is enabled. Otherwise, it is |
425 | * a channel | 425 | * a channel |
426 | */ | 426 | */ |
427 | struct edac_mc_layer { | 427 | struct edac_mc_layer { |
428 | enum edac_mc_layer_type type; | 428 | enum edac_mc_layer_type type; |
429 | unsigned size; | 429 | unsigned size; |
430 | bool is_virt_csrow; | 430 | bool is_virt_csrow; |
431 | }; | 431 | }; |
432 | 432 | ||
433 | /* | 433 | /* |
434 | * Maximum number of layers used by the memory controller to uniquely | 434 | * Maximum number of layers used by the memory controller to uniquely |
435 | * identify a single memory stick. | 435 | * identify a single memory stick. |
436 | * NOTE: Changing this constant requires not only to change the constant | 436 | * NOTE: Changing this constant requires not only to change the constant |
437 | * below, but also to change the existing code at the core, as there are | 437 | * below, but also to change the existing code at the core, as there are |
438 | * some code there that are optimized for 3 layers. | 438 | * some code there that are optimized for 3 layers. |
439 | */ | 439 | */ |
440 | #define EDAC_MAX_LAYERS 3 | 440 | #define EDAC_MAX_LAYERS 3 |
441 | 441 | ||
442 | /** | 442 | /** |
443 | * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer array | 443 | * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer array |
444 | * for the element given by [layer0,layer1,layer2] position | 444 | * for the element given by [layer0,layer1,layer2] position |
445 | * | 445 | * |
446 | * @layers: a struct edac_mc_layer array, describing how many elements | 446 | * @layers: a struct edac_mc_layer array, describing how many elements |
447 | * were allocated for each layer | 447 | * were allocated for each layer |
448 | * @n_layers: Number of layers at the @layers array | 448 | * @n_layers: Number of layers at the @layers array |
449 | * @layer0: layer0 position | 449 | * @layer0: layer0 position |
450 | * @layer1: layer1 position. Unused if n_layers < 2 | 450 | * @layer1: layer1 position. Unused if n_layers < 2 |
451 | * @layer2: layer2 position. Unused if n_layers < 3 | 451 | * @layer2: layer2 position. Unused if n_layers < 3 |
452 | * | 452 | * |
453 | * For 1 layer, this macro returns &var[layer0] - &var | 453 | * For 1 layer, this macro returns &var[layer0] - &var |
454 | * For 2 layers, this macro is similar to allocate a bi-dimensional array | 454 | * For 2 layers, this macro is similar to allocate a bi-dimensional array |
455 | * and to return "&var[layer0][layer1] - &var" | 455 | * and to return "&var[layer0][layer1] - &var" |
456 | * For 3 layers, this macro is similar to allocate a tri-dimensional array | 456 | * For 3 layers, this macro is similar to allocate a tri-dimensional array |
457 | * and to return "&var[layer0][layer1][layer2] - &var" | 457 | * and to return "&var[layer0][layer1][layer2] - &var" |
458 | * | 458 | * |
459 | * A loop could be used here to make it more generic, but, as we only have | 459 | * A loop could be used here to make it more generic, but, as we only have |
460 | * 3 layers, this is a little faster. | 460 | * 3 layers, this is a little faster. |
461 | * By design, layers can never be 0 or more than 3. If that ever happens, | 461 | * By design, layers can never be 0 or more than 3. If that ever happens, |
462 | * a NULL is returned, causing an OOPS during the memory allocation routine, | 462 | * a NULL is returned, causing an OOPS during the memory allocation routine, |
463 | * with would point to the developer that he's doing something wrong. | 463 | * with would point to the developer that he's doing something wrong. |
464 | */ | 464 | */ |
465 | #define EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2) ({ \ | 465 | #define EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2) ({ \ |
466 | int __i; \ | 466 | int __i; \ |
467 | if ((nlayers) == 1) \ | 467 | if ((nlayers) == 1) \ |
468 | __i = layer0; \ | 468 | __i = layer0; \ |
469 | else if ((nlayers) == 2) \ | 469 | else if ((nlayers) == 2) \ |
470 | __i = (layer1) + ((layers[1]).size * (layer0)); \ | 470 | __i = (layer1) + ((layers[1]).size * (layer0)); \ |
471 | else if ((nlayers) == 3) \ | 471 | else if ((nlayers) == 3) \ |
472 | __i = (layer2) + ((layers[2]).size * ((layer1) + \ | 472 | __i = (layer2) + ((layers[2]).size * ((layer1) + \ |
473 | ((layers[1]).size * (layer0)))); \ | 473 | ((layers[1]).size * (layer0)))); \ |
474 | else \ | 474 | else \ |
475 | __i = -EINVAL; \ | 475 | __i = -EINVAL; \ |
476 | __i; \ | 476 | __i; \ |
477 | }) | 477 | }) |
478 | 478 | ||
479 | /** | 479 | /** |
480 | * EDAC_DIMM_PTR - Macro responsible to get a pointer inside a pointer array | 480 | * EDAC_DIMM_PTR - Macro responsible to get a pointer inside a pointer array |
481 | * for the element given by [layer0,layer1,layer2] position | 481 | * for the element given by [layer0,layer1,layer2] position |
482 | * | 482 | * |
483 | * @layers: a struct edac_mc_layer array, describing how many elements | 483 | * @layers: a struct edac_mc_layer array, describing how many elements |
484 | * were allocated for each layer | 484 | * were allocated for each layer |
485 | * @var: name of the var where we want to get the pointer | 485 | * @var: name of the var where we want to get the pointer |
486 | * (like mci->dimms) | 486 | * (like mci->dimms) |
487 | * @n_layers: Number of layers at the @layers array | 487 | * @n_layers: Number of layers at the @layers array |
488 | * @layer0: layer0 position | 488 | * @layer0: layer0 position |
489 | * @layer1: layer1 position. Unused if n_layers < 2 | 489 | * @layer1: layer1 position. Unused if n_layers < 2 |
490 | * @layer2: layer2 position. Unused if n_layers < 3 | 490 | * @layer2: layer2 position. Unused if n_layers < 3 |
491 | * | 491 | * |
492 | * For 1 layer, this macro returns &var[layer0] | 492 | * For 1 layer, this macro returns &var[layer0] |
493 | * For 2 layers, this macro is similar to allocate a bi-dimensional array | 493 | * For 2 layers, this macro is similar to allocate a bi-dimensional array |
494 | * and to return "&var[layer0][layer1]" | 494 | * and to return "&var[layer0][layer1]" |
495 | * For 3 layers, this macro is similar to allocate a tri-dimensional array | 495 | * For 3 layers, this macro is similar to allocate a tri-dimensional array |
496 | * and to return "&var[layer0][layer1][layer2]" | 496 | * and to return "&var[layer0][layer1][layer2]" |
497 | */ | 497 | */ |
498 | #define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \ | 498 | #define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \ |
499 | typeof(*var) __p; \ | 499 | typeof(*var) __p; \ |
500 | int ___i = EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2); \ | 500 | int ___i = EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2); \ |
501 | if (___i < 0) \ | 501 | if (___i < 0) \ |
502 | __p = NULL; \ | 502 | __p = NULL; \ |
503 | else \ | 503 | else \ |
504 | __p = (var)[___i]; \ | 504 | __p = (var)[___i]; \ |
505 | __p; \ | 505 | __p; \ |
506 | }) | 506 | }) |
507 | 507 | ||
508 | struct dimm_info { | 508 | struct dimm_info { |
509 | struct device dev; | 509 | struct device dev; |
510 | 510 | ||
511 | char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ | 511 | char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ |
512 | 512 | ||
513 | /* Memory location data */ | 513 | /* Memory location data */ |
514 | unsigned location[EDAC_MAX_LAYERS]; | 514 | unsigned location[EDAC_MAX_LAYERS]; |
515 | 515 | ||
516 | struct mem_ctl_info *mci; /* the parent */ | 516 | struct mem_ctl_info *mci; /* the parent */ |
517 | 517 | ||
518 | u32 grain; /* granularity of reported error in bytes */ | 518 | u32 grain; /* granularity of reported error in bytes */ |
519 | enum dev_type dtype; /* memory device type */ | 519 | enum dev_type dtype; /* memory device type */ |
520 | enum mem_type mtype; /* memory dimm type */ | 520 | enum mem_type mtype; /* memory dimm type */ |
521 | enum edac_type edac_mode; /* EDAC mode for this dimm */ | 521 | enum edac_type edac_mode; /* EDAC mode for this dimm */ |
522 | 522 | ||
523 | u32 nr_pages; /* number of pages on this dimm */ | 523 | u32 nr_pages; /* number of pages on this dimm */ |
524 | 524 | ||
525 | unsigned csrow, cschannel; /* Points to the old API data */ | 525 | unsigned csrow, cschannel; /* Points to the old API data */ |
526 | }; | 526 | }; |
527 | 527 | ||
528 | /** | 528 | /** |
529 | * struct rank_info - contains the information for one DIMM rank | 529 | * struct rank_info - contains the information for one DIMM rank |
530 | * | 530 | * |
531 | * @chan_idx: channel number where the rank is (typically, 0 or 1) | 531 | * @chan_idx: channel number where the rank is (typically, 0 or 1) |
532 | * @ce_count: number of correctable errors for this rank | 532 | * @ce_count: number of correctable errors for this rank |
533 | * @csrow: A pointer to the chip select row structure (the parent | 533 | * @csrow: A pointer to the chip select row structure (the parent |
534 | * structure). The location of the rank is given by | 534 | * structure). The location of the rank is given by |
535 | * the (csrow->csrow_idx, chan_idx) vector. | 535 | * the (csrow->csrow_idx, chan_idx) vector. |
536 | * @dimm: A pointer to the DIMM structure, where the DIMM label | 536 | * @dimm: A pointer to the DIMM structure, where the DIMM label |
537 | * information is stored. | 537 | * information is stored. |
538 | * | 538 | * |
539 | * FIXME: Currently, the EDAC core model will assume one DIMM per rank. | 539 | * FIXME: Currently, the EDAC core model will assume one DIMM per rank. |
540 | * This is a bad assumption, but it makes this patch easier. Later | 540 | * This is a bad assumption, but it makes this patch easier. Later |
541 | * patches in this series will fix this issue. | 541 | * patches in this series will fix this issue. |
542 | */ | 542 | */ |
543 | struct rank_info { | 543 | struct rank_info { |
544 | int chan_idx; | 544 | int chan_idx; |
545 | struct csrow_info *csrow; | 545 | struct csrow_info *csrow; |
546 | struct dimm_info *dimm; | 546 | struct dimm_info *dimm; |
547 | 547 | ||
548 | u32 ce_count; /* Correctable Errors for this csrow */ | 548 | u32 ce_count; /* Correctable Errors for this csrow */ |
549 | }; | 549 | }; |
550 | 550 | ||
551 | struct csrow_info { | 551 | struct csrow_info { |
552 | struct device dev; | 552 | struct device dev; |
553 | 553 | ||
554 | /* Used only by edac_mc_find_csrow_by_page() */ | 554 | /* Used only by edac_mc_find_csrow_by_page() */ |
555 | unsigned long first_page; /* first page number in csrow */ | 555 | unsigned long first_page; /* first page number in csrow */ |
556 | unsigned long last_page; /* last page number in csrow */ | 556 | unsigned long last_page; /* last page number in csrow */ |
557 | unsigned long page_mask; /* used for interleaving - | 557 | unsigned long page_mask; /* used for interleaving - |
558 | * 0UL for non intlv */ | 558 | * 0UL for non intlv */ |
559 | 559 | ||
560 | int csrow_idx; /* the chip-select row */ | 560 | int csrow_idx; /* the chip-select row */ |
561 | 561 | ||
562 | u32 ue_count; /* Uncorrectable Errors for this csrow */ | 562 | u32 ue_count; /* Uncorrectable Errors for this csrow */ |
563 | u32 ce_count; /* Correctable Errors for this csrow */ | 563 | u32 ce_count; /* Correctable Errors for this csrow */ |
564 | 564 | ||
565 | struct mem_ctl_info *mci; /* the parent */ | 565 | struct mem_ctl_info *mci; /* the parent */ |
566 | 566 | ||
567 | /* channel information for this csrow */ | 567 | /* channel information for this csrow */ |
568 | u32 nr_channels; | 568 | u32 nr_channels; |
569 | struct rank_info **channels; | 569 | struct rank_info **channels; |
570 | }; | 570 | }; |
571 | 571 | ||
572 | /* | 572 | /* |
573 | * struct errcount_attribute - used to store the several error counts | 573 | * struct errcount_attribute - used to store the several error counts |
574 | */ | 574 | */ |
575 | struct errcount_attribute_data { | 575 | struct errcount_attribute_data { |
576 | int n_layers; | 576 | int n_layers; |
577 | int pos[EDAC_MAX_LAYERS]; | 577 | int pos[EDAC_MAX_LAYERS]; |
578 | int layer0, layer1, layer2; | 578 | int layer0, layer1, layer2; |
579 | }; | 579 | }; |
580 | 580 | ||
581 | /** | 581 | /** |
582 | * edac_raw_error_desc - Raw error report structure | 582 | * edac_raw_error_desc - Raw error report structure |
583 | * @grain: minimum granularity for an error report, in bytes | 583 | * @grain: minimum granularity for an error report, in bytes |
584 | * @error_count: number of errors of the same type | 584 | * @error_count: number of errors of the same type |
585 | * @top_layer: top layer of the error (layer[0]) | 585 | * @top_layer: top layer of the error (layer[0]) |
586 | * @mid_layer: middle layer of the error (layer[1]) | 586 | * @mid_layer: middle layer of the error (layer[1]) |
587 | * @low_layer: low layer of the error (layer[2]) | 587 | * @low_layer: low layer of the error (layer[2]) |
588 | * @page_frame_number: page where the error happened | 588 | * @page_frame_number: page where the error happened |
589 | * @offset_in_page: page offset | 589 | * @offset_in_page: page offset |
590 | * @syndrome: syndrome of the error (or 0 if unknown or if | 590 | * @syndrome: syndrome of the error (or 0 if unknown or if |
591 | * the syndrome is not applicable) | 591 | * the syndrome is not applicable) |
592 | * @msg: error message | 592 | * @msg: error message |
593 | * @location: location of the error | 593 | * @location: location of the error |
594 | * @label: label of the affected DIMM(s) | 594 | * @label: label of the affected DIMM(s) |
595 | * @other_detail: other driver-specific detail about the error | 595 | * @other_detail: other driver-specific detail about the error |
596 | * @enable_per_layer_report: if false, the error affects all layers | 596 | * @enable_per_layer_report: if false, the error affects all layers |
597 | * (typically, a memory controller error) | 597 | * (typically, a memory controller error) |
598 | */ | 598 | */ |
599 | struct edac_raw_error_desc { | 599 | struct edac_raw_error_desc { |
600 | /* | 600 | /* |
601 | * NOTE: everything before grain won't be cleaned by | 601 | * NOTE: everything before grain won't be cleaned by |
602 | * edac_raw_error_desc_clean() | 602 | * edac_raw_error_desc_clean() |
603 | */ | 603 | */ |
604 | char location[LOCATION_SIZE]; | 604 | char location[LOCATION_SIZE]; |
605 | char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * EDAC_MAX_LABELS]; | 605 | char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * EDAC_MAX_LABELS]; |
606 | long grain; | 606 | long grain; |
607 | 607 | ||
608 | /* the vars below and grain will be cleaned on every new error report */ | 608 | /* the vars below and grain will be cleaned on every new error report */ |
609 | u16 error_count; | 609 | u16 error_count; |
610 | int top_layer; | 610 | int top_layer; |
611 | int mid_layer; | 611 | int mid_layer; |
612 | int low_layer; | 612 | int low_layer; |
613 | unsigned long page_frame_number; | 613 | unsigned long page_frame_number; |
614 | unsigned long offset_in_page; | 614 | unsigned long offset_in_page; |
615 | unsigned long syndrome; | 615 | unsigned long syndrome; |
616 | const char *msg; | 616 | const char *msg; |
617 | const char *other_detail; | 617 | const char *other_detail; |
618 | bool enable_per_layer_report; | 618 | bool enable_per_layer_report; |
619 | }; | 619 | }; |
620 | 620 | ||
621 | /* MEMORY controller information structure | 621 | /* MEMORY controller information structure |
622 | */ | 622 | */ |
623 | struct mem_ctl_info { | 623 | struct mem_ctl_info { |
624 | struct device dev; | 624 | struct device dev; |
625 | struct bus_type bus; | 625 | struct bus_type *bus; |
626 | 626 | ||
627 | struct list_head link; /* for global list of mem_ctl_info structs */ | 627 | struct list_head link; /* for global list of mem_ctl_info structs */ |
628 | 628 | ||
629 | struct module *owner; /* Module owner of this control struct */ | 629 | struct module *owner; /* Module owner of this control struct */ |
630 | 630 | ||
631 | unsigned long mtype_cap; /* memory types supported by mc */ | 631 | unsigned long mtype_cap; /* memory types supported by mc */ |
632 | unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */ | 632 | unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */ |
633 | unsigned long edac_cap; /* configuration capabilities - this is | 633 | unsigned long edac_cap; /* configuration capabilities - this is |
634 | * closely related to edac_ctl_cap. The | 634 | * closely related to edac_ctl_cap. The |
635 | * difference is that the controller may be | 635 | * difference is that the controller may be |
636 | * capable of s4ecd4ed which would be listed | 636 | * capable of s4ecd4ed which would be listed |
637 | * in edac_ctl_cap, but if channels aren't | 637 | * in edac_ctl_cap, but if channels aren't |
638 | * capable of s4ecd4ed then the edac_cap would | 638 | * capable of s4ecd4ed then the edac_cap would |
639 | * not have that capability. | 639 | * not have that capability. |
640 | */ | 640 | */ |
641 | unsigned long scrub_cap; /* chipset scrub capabilities */ | 641 | unsigned long scrub_cap; /* chipset scrub capabilities */ |
642 | enum scrub_type scrub_mode; /* current scrub mode */ | 642 | enum scrub_type scrub_mode; /* current scrub mode */ |
643 | 643 | ||
644 | /* Translates sdram memory scrub rate given in bytes/sec to the | 644 | /* Translates sdram memory scrub rate given in bytes/sec to the |
645 | internal representation and configures whatever else needs | 645 | internal representation and configures whatever else needs |
646 | to be configured. | 646 | to be configured. |
647 | */ | 647 | */ |
648 | int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 bw); | 648 | int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 bw); |
649 | 649 | ||
650 | /* Get the current sdram memory scrub rate from the internal | 650 | /* Get the current sdram memory scrub rate from the internal |
651 | representation and converts it to the closest matching | 651 | representation and converts it to the closest matching |
652 | bandwidth in bytes/sec. | 652 | bandwidth in bytes/sec. |
653 | */ | 653 | */ |
654 | int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci); | 654 | int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci); |
655 | 655 | ||
656 | 656 | ||
657 | /* pointer to edac checking routine */ | 657 | /* pointer to edac checking routine */ |
658 | void (*edac_check) (struct mem_ctl_info * mci); | 658 | void (*edac_check) (struct mem_ctl_info * mci); |
659 | 659 | ||
660 | /* | 660 | /* |
661 | * Remaps memory pages: controller pages to physical pages. | 661 | * Remaps memory pages: controller pages to physical pages. |
662 | * For most MC's, this will be NULL. | 662 | * For most MC's, this will be NULL. |
663 | */ | 663 | */ |
664 | /* FIXME - why not send the phys page to begin with? */ | 664 | /* FIXME - why not send the phys page to begin with? */ |
665 | unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci, | 665 | unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci, |
666 | unsigned long page); | 666 | unsigned long page); |
667 | int mc_idx; | 667 | int mc_idx; |
668 | struct csrow_info **csrows; | 668 | struct csrow_info **csrows; |
669 | unsigned nr_csrows, num_cschannel; | 669 | unsigned nr_csrows, num_cschannel; |
670 | 670 | ||
671 | /* | 671 | /* |
672 | * Memory Controller hierarchy | 672 | * Memory Controller hierarchy |
673 | * | 673 | * |
674 | * There are basically two types of memory controller: the ones that | 674 | * There are basically two types of memory controller: the ones that |
675 | * sees memory sticks ("dimms"), and the ones that sees memory ranks. | 675 | * sees memory sticks ("dimms"), and the ones that sees memory ranks. |
676 | * All old memory controllers enumerate memories per rank, but most | 676 | * All old memory controllers enumerate memories per rank, but most |
677 | * of the recent drivers enumerate memories per DIMM, instead. | 677 | * of the recent drivers enumerate memories per DIMM, instead. |
678 | * When the memory controller is per rank, csbased is true. | 678 | * When the memory controller is per rank, csbased is true. |
679 | */ | 679 | */ |
680 | unsigned n_layers; | 680 | unsigned n_layers; |
681 | struct edac_mc_layer *layers; | 681 | struct edac_mc_layer *layers; |
682 | bool csbased; | 682 | bool csbased; |
683 | 683 | ||
684 | /* | 684 | /* |
685 | * DIMM info. Will eventually remove the entire csrows_info some day | 685 | * DIMM info. Will eventually remove the entire csrows_info some day |
686 | */ | 686 | */ |
687 | unsigned tot_dimms; | 687 | unsigned tot_dimms; |
688 | struct dimm_info **dimms; | 688 | struct dimm_info **dimms; |
689 | 689 | ||
690 | /* | 690 | /* |
691 | * FIXME - what about controllers on other busses? - IDs must be | 691 | * FIXME - what about controllers on other busses? - IDs must be |
692 | * unique. dev pointer should be sufficiently unique, but | 692 | * unique. dev pointer should be sufficiently unique, but |
693 | * BUS:SLOT.FUNC numbers may not be unique. | 693 | * BUS:SLOT.FUNC numbers may not be unique. |
694 | */ | 694 | */ |
695 | struct device *pdev; | 695 | struct device *pdev; |
696 | const char *mod_name; | 696 | const char *mod_name; |
697 | const char *mod_ver; | 697 | const char *mod_ver; |
698 | const char *ctl_name; | 698 | const char *ctl_name; |
699 | const char *dev_name; | 699 | const char *dev_name; |
700 | void *pvt_info; | 700 | void *pvt_info; |
701 | unsigned long start_time; /* mci load start time (in jiffies) */ | 701 | unsigned long start_time; /* mci load start time (in jiffies) */ |
702 | 702 | ||
703 | /* | 703 | /* |
704 | * drivers shouldn't access those fields directly, as the core | 704 | * drivers shouldn't access those fields directly, as the core |
705 | * already handles that. | 705 | * already handles that. |
706 | */ | 706 | */ |
707 | u32 ce_noinfo_count, ue_noinfo_count; | 707 | u32 ce_noinfo_count, ue_noinfo_count; |
708 | u32 ue_mc, ce_mc; | 708 | u32 ue_mc, ce_mc; |
709 | u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS]; | 709 | u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS]; |
710 | 710 | ||
711 | struct completion complete; | 711 | struct completion complete; |
712 | 712 | ||
713 | /* Additional top controller level attributes, but specified | 713 | /* Additional top controller level attributes, but specified |
714 | * by the low level driver. | 714 | * by the low level driver. |
715 | * | 715 | * |
716 | * Set by the low level driver to provide attributes at the | 716 | * Set by the low level driver to provide attributes at the |
717 | * controller level. | 717 | * controller level. |
718 | * An array of structures, NULL terminated | 718 | * An array of structures, NULL terminated |
719 | * | 719 | * |
720 | * If attributes are desired, then set to array of attributes | 720 | * If attributes are desired, then set to array of attributes |
721 | * If no attributes are desired, leave NULL | 721 | * If no attributes are desired, leave NULL |
722 | */ | 722 | */ |
723 | const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes; | 723 | const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes; |
724 | 724 | ||
725 | /* work struct for this MC */ | 725 | /* work struct for this MC */ |
726 | struct delayed_work work; | 726 | struct delayed_work work; |
727 | 727 | ||
728 | /* | 728 | /* |
729 | * Used to report an error - by being at the global struct | 729 | * Used to report an error - by being at the global struct |
730 | * makes the memory allocated by the EDAC core | 730 | * makes the memory allocated by the EDAC core |
731 | */ | 731 | */ |
732 | struct edac_raw_error_desc error_desc; | 732 | struct edac_raw_error_desc error_desc; |
733 | 733 | ||
734 | /* the internal state of this controller instance */ | 734 | /* the internal state of this controller instance */ |
735 | int op_state; | 735 | int op_state; |
736 | 736 | ||
737 | #ifdef CONFIG_EDAC_DEBUG | 737 | #ifdef CONFIG_EDAC_DEBUG |
738 | struct dentry *debugfs; | 738 | struct dentry *debugfs; |
739 | u8 fake_inject_layer[EDAC_MAX_LAYERS]; | 739 | u8 fake_inject_layer[EDAC_MAX_LAYERS]; |
740 | u32 fake_inject_ue; | 740 | u32 fake_inject_ue; |
741 | u16 fake_inject_count; | 741 | u16 fake_inject_count; |
742 | #endif | 742 | #endif |
743 | }; | 743 | }; |
744 | |||
745 | /* | ||
746 | * Maximum number of memory controllers in the coherent fabric. | ||
747 | */ | ||
748 | #define EDAC_MAX_MCS 16 | ||
744 | 749 | ||
745 | #endif | 750 | #endif |
746 | 751 |