Blame view

arch/x86/kernel/cpu/mcheck/mce_amd.c 15.9 KB
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
1
  /*
952686643   Jacob Shin   [PATCH] x86_64: m...
2
   *  (c) 2005, 2006 Advanced Micro Devices, Inc.
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
3
4
5
6
7
8
9
10
   *  Your use of this code is subject to the terms and conditions of the
   *  GNU general public license version 2. See "COPYING" or
   *  http://www.gnu.org/licenses/gpl.html
   *
   *  Written by Jacob Shin - AMD, Inc.
   *
   *  Support : jacob.shin@amd.com
   *
952686643   Jacob Shin   [PATCH] x86_64: m...
11
12
   *  April 2006
   *     - added support for AMD Family 0x10 processors
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
13
   *
952686643   Jacob Shin   [PATCH] x86_64: m...
14
   *  All MC4_MISCi registers are shared between multi-cores
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
15
   */
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
16
  #include <linux/interrupt.h>
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
17
  #include <linux/notifier.h>
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
18
  #include <linux/kobject.h>
34fa1967a   Hidetoshi Seto   x86, mce: trivial...
19
  #include <linux/percpu.h>
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
20
21
  #include <linux/errno.h>
  #include <linux/sched.h>
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
22
  #include <linux/sysfs.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
23
  #include <linux/slab.h>
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
24
25
26
  #include <linux/init.h>
  #include <linux/cpu.h>
  #include <linux/smp.h>
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
27
  #include <asm/apic.h>
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
28
  #include <asm/idle.h>
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
29
30
  #include <asm/mce.h>
  #include <asm/msr.h>
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
31

2903ee85c   Jacob Shin   [PATCH] x86_64: m...
32
33
34
35
36
  #define NR_BANKS          6
  #define NR_BLOCKS         9
  #define THRESHOLD_MAX     0xFFF
  #define INT_TYPE_APIC     0x00020000
  #define MASK_VALID_HI     0x80000000
24ce0e96f   Jan Beulich   [PATCH] x86-64: T...
37
38
  #define MASK_CNTP_HI      0x40000000
  #define MASK_LOCKED_HI    0x20000000
2903ee85c   Jacob Shin   [PATCH] x86_64: m...
39
40
41
42
  #define MASK_LVTOFF_HI    0x00F00000
  #define MASK_COUNT_EN_HI  0x00080000
  #define MASK_INT_TYPE_HI  0x00060000
  #define MASK_OVERFLOW_HI  0x00010000
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
43
  #define MASK_ERR_COUNT_HI 0x00000FFF
952686643   Jacob Shin   [PATCH] x86_64: m...
44
45
  #define MASK_BLKPTR_LO    0xFF000000
  #define MCG_XBLK_ADDR     0xC0000400
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
46

952686643   Jacob Shin   [PATCH] x86_64: m...
47
  struct threshold_block {
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
48
49
50
51
52
53
54
55
  	unsigned int		block;
  	unsigned int		bank;
  	unsigned int		cpu;
  	u32			address;
  	u16			interrupt_enable;
  	u16			threshold_limit;
  	struct kobject		kobj;
  	struct list_head	miscj;
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
56
  };
952686643   Jacob Shin   [PATCH] x86_64: m...
57
  struct threshold_bank {
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
58
59
60
  	struct kobject		*kobj;
  	struct threshold_block	*blocks;
  	cpumask_var_t		cpus;
952686643   Jacob Shin   [PATCH] x86_64: m...
61
  };
204fba4aa   Tejun Heo   percpu: cleanup p...
62
  static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
952686643   Jacob Shin   [PATCH] x86_64: m...
63

89b831ef8   Jacob Shin   [PATCH] x86_64: S...
64
65
66
  static unsigned char shared_bank[NR_BANKS] = {
  	0, 0, 0, 0, 1
  };
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
67
68
  
  static DEFINE_PER_CPU(unsigned char, bank_map);	/* see which banks are on */
b27626863   Andi Kleen   x86, mce, cmci: f...
69
  static void amd_threshold_interrupt(void);
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
70
71
72
  /*
   * CPU Initialization
   */
4cd4601d5   Mike Travis   x86: use work_on_...
73
  struct thresh_restart {
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
74
75
  	struct threshold_block	*b;
  	int			reset;
9c37c9d89   Robert Richter   mce, amd: Impleme...
76
77
  	int			set_lvt_off;
  	int			lvt_off;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
78
  	u16			old_limit;
4cd4601d5   Mike Travis   x86: use work_on_...
79
  };
bbaff08dc   Robert Richter   mce, amd: Add hel...
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
  static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
  {
  	int msr = (hi & MASK_LVTOFF_HI) >> 20;
  
  	if (apic < 0) {
  		pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt "
  		       "for bank %d, block %d (MSR%08X=0x%x%08x)
  ", b->cpu,
  		       b->bank, b->block, b->address, hi, lo);
  		return 0;
  	}
  
  	if (apic != msr) {
  		pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
  		       "for bank %d, block %d (MSR%08X=0x%x%08x)
  ",
  		       b->cpu, apic, b->bank, b->block, b->address, hi, lo);
  		return 0;
  	}
  
  	return 1;
  };
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
102
  /* must be called with correct cpu affinity */
a6b6a14e0   Andrew Morton   x86: use smp_call...
103
104
  /* Called via smp_call_function_single() */
  static void threshold_restart_bank(void *_tr)
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
105
  {
4cd4601d5   Mike Travis   x86: use work_on_...
106
  	struct thresh_restart *tr = _tr;
7203a0494   Robert Richter   mce, amd: Shorten...
107
  	u32 hi, lo;
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
108

7203a0494   Robert Richter   mce, amd: Shorten...
109
  	rdmsr(tr->b->address, lo, hi);
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
110

7203a0494   Robert Richter   mce, amd: Shorten...
111
  	if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
4cd4601d5   Mike Travis   x86: use work_on_...
112
  		tr->reset = 1;	/* limit cannot be lower than err count */
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
113

4cd4601d5   Mike Travis   x86: use work_on_...
114
  	if (tr->reset) {		/* reset err count and overflow bit */
7203a0494   Robert Richter   mce, amd: Shorten...
115
116
  		hi =
  		    (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
4cd4601d5   Mike Travis   x86: use work_on_...
117
118
  		    (THRESHOLD_MAX - tr->b->threshold_limit);
  	} else if (tr->old_limit) {	/* change limit w/o reset */
7203a0494   Robert Richter   mce, amd: Shorten...
119
  		int new_count = (hi & THRESHOLD_MAX) +
4cd4601d5   Mike Travis   x86: use work_on_...
120
  		    (tr->old_limit - tr->b->threshold_limit);
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
121

7203a0494   Robert Richter   mce, amd: Shorten...
122
  		hi = (hi & ~MASK_ERR_COUNT_HI) |
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
123
124
  		    (new_count & THRESHOLD_MAX);
  	}
9c37c9d89   Robert Richter   mce, amd: Impleme...
125
  	if (tr->set_lvt_off) {
bbaff08dc   Robert Richter   mce, amd: Add hel...
126
127
128
129
130
  		if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
  			/* set new lvt offset */
  			hi &= ~MASK_LVTOFF_HI;
  			hi |= tr->lvt_off << 20;
  		}
9c37c9d89   Robert Richter   mce, amd: Impleme...
131
  	}
4cd4601d5   Mike Travis   x86: use work_on_...
132
  	tr->b->interrupt_enable ?
7203a0494   Robert Richter   mce, amd: Shorten...
133
134
  	    (hi = (hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
  	    (hi &= ~MASK_INT_TYPE_HI);
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
135

7203a0494   Robert Richter   mce, amd: Shorten...
136
137
  	hi |= MASK_COUNT_EN_HI;
  	wrmsr(tr->b->address, lo, hi);
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
138
  }
9c37c9d89   Robert Richter   mce, amd: Impleme...
139
140
141
142
143
144
145
146
147
148
149
  static void mce_threshold_block_init(struct threshold_block *b, int offset)
  {
  	struct thresh_restart tr = {
  		.b			= b,
  		.set_lvt_off		= 1,
  		.lvt_off		= offset,
  	};
  
  	b->threshold_limit		= THRESHOLD_MAX;
  	threshold_restart_bank(&tr);
  };
bbaff08dc   Robert Richter   mce, amd: Add hel...
150
151
152
153
154
155
156
157
  static int setup_APIC_mce(int reserved, int new)
  {
  	if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
  					      APIC_EILVT_MSG_FIX, 0))
  		return new;
  
  	return reserved;
  }
952686643   Jacob Shin   [PATCH] x86_64: m...
158
  /* cpu init entry point, called from mce.c with preempt off */
cc3ca2206   H. Peter Anvin   x86, mce: remove ...
159
  void mce_amd_feature_init(struct cpuinfo_x86 *c)
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
160
  {
9c37c9d89   Robert Richter   mce, amd: Impleme...
161
  	struct threshold_block b;
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
162
  	unsigned int cpu = smp_processor_id();
952686643   Jacob Shin   [PATCH] x86_64: m...
163
  	u32 low = 0, high = 0, address = 0;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
164
  	unsigned int bank, block;
bbaff08dc   Robert Richter   mce, amd: Add hel...
165
  	int offset = -1;
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
166
167
  
  	for (bank = 0; bank < NR_BANKS; ++bank) {
952686643   Jacob Shin   [PATCH] x86_64: m...
168
169
170
  		for (block = 0; block < NR_BLOCKS; ++block) {
  			if (block == 0)
  				address = MSR_IA32_MC0_MISC + bank * 4;
24ce0e96f   Jan Beulich   [PATCH] x86-64: T...
171
172
173
174
  			else if (block == 1) {
  				address = (low & MASK_BLKPTR_LO) >> 21;
  				if (!address)
  					break;
6dcbfe4f0   Borislav Petkov   x86, AMD, MCE thr...
175

24ce0e96f   Jan Beulich   [PATCH] x86-64: T...
176
  				address += MCG_XBLK_ADDR;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
177
  			} else
952686643   Jacob Shin   [PATCH] x86_64: m...
178
179
180
  				++address;
  
  			if (rdmsr_safe(address, &low, &high))
24ce0e96f   Jan Beulich   [PATCH] x86-64: T...
181
  				break;
952686643   Jacob Shin   [PATCH] x86_64: m...
182

6dcbfe4f0   Borislav Petkov   x86, AMD, MCE thr...
183
184
  			if (!(high & MASK_VALID_HI))
  				continue;
952686643   Jacob Shin   [PATCH] x86_64: m...
185

24ce0e96f   Jan Beulich   [PATCH] x86-64: T...
186
187
  			if (!(high & MASK_CNTP_HI)  ||
  			     (high & MASK_LOCKED_HI))
952686643   Jacob Shin   [PATCH] x86_64: m...
188
189
190
191
  				continue;
  
  			if (!block)
  				per_cpu(bank_map, cpu) |= (1 << bank);
952686643   Jacob Shin   [PATCH] x86_64: m...
192
193
  			if (shared_bank[bank] && c->cpu_core_id)
  				break;
141168c36   Kevin Winchester   x86: Simplify cod...
194

bbaff08dc   Robert Richter   mce, amd: Add hel...
195
196
  			offset = setup_APIC_mce(offset,
  						(high & MASK_LVTOFF_HI) >> 20);
7b83dae7a   Robert Richter   x86: extended int...
197

9c37c9d89   Robert Richter   mce, amd: Impleme...
198
199
200
201
202
  			memset(&b, 0, sizeof(b));
  			b.cpu		= cpu;
  			b.bank		= bank;
  			b.block		= block;
  			b.address	= address;
b27626863   Andi Kleen   x86, mce, cmci: f...
203

9c37c9d89   Robert Richter   mce, amd: Impleme...
204
  			mce_threshold_block_init(&b, offset);
b27626863   Andi Kleen   x86, mce, cmci: f...
205
  			mce_threshold_vector = amd_threshold_interrupt;
952686643   Jacob Shin   [PATCH] x86_64: m...
206
  		}
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
207
208
209
210
211
212
213
214
215
216
217
218
  	}
  }
  
  /*
   * APIC Interrupt Handler
   */
  
  /*
   * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
   * the interrupt goes off when error_count reaches threshold_limit.
   * the handler will simply log mcelog w/ software defined bank number.
   */
b27626863   Andi Kleen   x86, mce, cmci: f...
219
  static void amd_threshold_interrupt(void)
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
220
  {
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
221
  	u32 low = 0, high = 0, address = 0;
952686643   Jacob Shin   [PATCH] x86_64: m...
222
  	unsigned int bank, block;
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
223
  	struct mce m;
b5f2fa4ea   Andi Kleen   x86, mce: factor ...
224
  	mce_setup(&m);
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
225
226
227
  
  	/* assume first bank caused it */
  	for (bank = 0; bank < NR_BANKS; ++bank) {
24ce0e96f   Jan Beulich   [PATCH] x86-64: T...
228
229
  		if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
  			continue;
952686643   Jacob Shin   [PATCH] x86_64: m...
230
  		for (block = 0; block < NR_BLOCKS; ++block) {
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
231
  			if (block == 0) {
952686643   Jacob Shin   [PATCH] x86_64: m...
232
  				address = MSR_IA32_MC0_MISC + bank * 4;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
233
  			} else if (block == 1) {
24ce0e96f   Jan Beulich   [PATCH] x86-64: T...
234
235
236
237
  				address = (low & MASK_BLKPTR_LO) >> 21;
  				if (!address)
  					break;
  				address += MCG_XBLK_ADDR;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
238
  			} else {
952686643   Jacob Shin   [PATCH] x86_64: m...
239
  				++address;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
240
  			}
952686643   Jacob Shin   [PATCH] x86_64: m...
241
242
  
  			if (rdmsr_safe(address, &low, &high))
24ce0e96f   Jan Beulich   [PATCH] x86-64: T...
243
  				break;
952686643   Jacob Shin   [PATCH] x86_64: m...
244
245
246
247
248
249
250
  
  			if (!(high & MASK_VALID_HI)) {
  				if (block)
  					continue;
  				else
  					break;
  			}
24ce0e96f   Jan Beulich   [PATCH] x86-64: T...
251
252
  			if (!(high & MASK_CNTP_HI)  ||
  			     (high & MASK_LOCKED_HI))
952686643   Jacob Shin   [PATCH] x86_64: m...
253
  				continue;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
254
255
256
257
  			/*
  			 * Log the machine check that caused the threshold
  			 * event.
  			 */
ee031c31d   Andi Kleen   x86, mce, cmci: u...
258
259
  			machine_check_poll(MCP_TIMESTAMP,
  					&__get_cpu_var(mce_poll_banks));
a98f0dd34   Andi Kleen   [PATCH] x86-64: A...
260

952686643   Jacob Shin   [PATCH] x86_64: m...
261
262
263
264
265
266
267
268
  			if (high & MASK_OVERFLOW_HI) {
  				rdmsrl(address, m.misc);
  				rdmsrl(MSR_IA32_MC0_STATUS + bank * 4,
  				       m.status);
  				m.bank = K8_MCE_THRESHOLD_BASE
  				       + bank * NR_BLOCKS
  				       + block;
  				mce_log(&m);
b27626863   Andi Kleen   x86, mce, cmci: f...
269
  				return;
952686643   Jacob Shin   [PATCH] x86_64: m...
270
  			}
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
271
272
  		}
  	}
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
273
274
275
276
277
  }
  
  /*
   * Sysfs Interface
   */
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
278
  struct threshold_attr {
2903ee85c   Jacob Shin   [PATCH] x86_64: m...
279
  	struct attribute attr;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
280
281
  	ssize_t (*show) (struct threshold_block *, char *);
  	ssize_t (*store) (struct threshold_block *, const char *, size_t count);
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
282
  };
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
283
284
285
286
287
  #define SHOW_FIELDS(name)						\
  static ssize_t show_ ## name(struct threshold_block *b, char *buf)	\
  {									\
  	return sprintf(buf, "%lx
  ", (unsigned long) b->name);		\
2903ee85c   Jacob Shin   [PATCH] x86_64: m...
288
  }
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
289
290
  SHOW_FIELDS(interrupt_enable)
  SHOW_FIELDS(threshold_limit)
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
291
  static ssize_t
9319cec8c   Hidetoshi Seto   x86, mce: use str...
292
  store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
293
  {
4cd4601d5   Mike Travis   x86: use work_on_...
294
  	struct thresh_restart tr;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
295
  	unsigned long new;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
296

9319cec8c   Hidetoshi Seto   x86, mce: use str...
297
  	if (strict_strtoul(buf, 0, &new) < 0)
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
298
  		return -EINVAL;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
299

89b831ef8   Jacob Shin   [PATCH] x86_64: S...
300
  	b->interrupt_enable = !!new;
9c37c9d89   Robert Richter   mce, amd: Impleme...
301
  	memset(&tr, 0, sizeof(tr));
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
302
  	tr.b		= b;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
303

a6b6a14e0   Andrew Morton   x86: use smp_call...
304
  	smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
305

9319cec8c   Hidetoshi Seto   x86, mce: use str...
306
  	return size;
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
307
  }
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
308
  static ssize_t
9319cec8c   Hidetoshi Seto   x86, mce: use str...
309
  store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
310
  {
4cd4601d5   Mike Travis   x86: use work_on_...
311
  	struct thresh_restart tr;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
312
  	unsigned long new;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
313

9319cec8c   Hidetoshi Seto   x86, mce: use str...
314
  	if (strict_strtoul(buf, 0, &new) < 0)
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
315
  		return -EINVAL;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
316

89b831ef8   Jacob Shin   [PATCH] x86_64: S...
317
318
319
320
  	if (new > THRESHOLD_MAX)
  		new = THRESHOLD_MAX;
  	if (new < 1)
  		new = 1;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
321

9c37c9d89   Robert Richter   mce, amd: Impleme...
322
  	memset(&tr, 0, sizeof(tr));
4cd4601d5   Mike Travis   x86: use work_on_...
323
  	tr.old_limit = b->threshold_limit;
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
324
  	b->threshold_limit = new;
4cd4601d5   Mike Travis   x86: use work_on_...
325
  	tr.b = b;
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
326

a6b6a14e0   Andrew Morton   x86: use smp_call...
327
  	smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
328

9319cec8c   Hidetoshi Seto   x86, mce: use str...
329
  	return size;
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
330
  }
a6b6a14e0   Andrew Morton   x86: use smp_call...
331
  struct threshold_block_cross_cpu {
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
332
333
  	struct threshold_block	*tb;
  	long			retval;
a6b6a14e0   Andrew Morton   x86: use smp_call...
334
335
336
  };
  
  static void local_error_count_handler(void *_tbcc)
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
337
  {
a6b6a14e0   Andrew Morton   x86: use smp_call...
338
339
  	struct threshold_block_cross_cpu *tbcc = _tbcc;
  	struct threshold_block *b = tbcc->tb;
4cd4601d5   Mike Travis   x86: use work_on_...
340
  	u32 low, high;
952686643   Jacob Shin   [PATCH] x86_64: m...
341
  	rdmsr(b->address, low, high);
a6b6a14e0   Andrew Morton   x86: use smp_call...
342
  	tbcc->retval = (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
4cd4601d5   Mike Travis   x86: use work_on_...
343
344
345
346
  }
  
  static ssize_t show_error_count(struct threshold_block *b, char *buf)
  {
a6b6a14e0   Andrew Morton   x86: use smp_call...
347
348
349
350
351
  	struct threshold_block_cross_cpu tbcc = { .tb = b, };
  
  	smp_call_function_single(b->cpu, local_error_count_handler, &tbcc, 1);
  	return sprintf(buf, "%lx
  ", tbcc.retval);
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
352
  }
952686643   Jacob Shin   [PATCH] x86_64: m...
353
  static ssize_t store_error_count(struct threshold_block *b,
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
354
355
  				 const char *buf, size_t count)
  {
4cd4601d5   Mike Travis   x86: use work_on_...
356
  	struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 };
a6b6a14e0   Andrew Morton   x86: use smp_call...
357
  	smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
358
359
  	return 1;
  }
34fa1967a   Hidetoshi Seto   x86, mce: trivial...
360
361
362
363
364
  #define RW_ATTR(val)							\
  static struct threshold_attr val = {					\
  	.attr	= {.name = __stringify(val), .mode = 0644 },		\
  	.show	= show_## val,						\
  	.store	= store_## val,						\
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
365
  };
2903ee85c   Jacob Shin   [PATCH] x86_64: m...
366
367
368
  RW_ATTR(interrupt_enable);
  RW_ATTR(threshold_limit);
  RW_ATTR(error_count);
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
369
370
371
372
373
374
375
  
  static struct attribute *default_attrs[] = {
  	&interrupt_enable.attr,
  	&threshold_limit.attr,
  	&error_count.attr,
  	NULL
  };
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
376
377
  #define to_block(k)	container_of(k, struct threshold_block, kobj)
  #define to_attr(a)	container_of(a, struct threshold_attr, attr)
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
378
379
380
  
  static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
  {
952686643   Jacob Shin   [PATCH] x86_64: m...
381
  	struct threshold_block *b = to_block(kobj);
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
382
383
  	struct threshold_attr *a = to_attr(attr);
  	ssize_t ret;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
384

89b831ef8   Jacob Shin   [PATCH] x86_64: S...
385
  	ret = a->show ? a->show(b, buf) : -EIO;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
386

89b831ef8   Jacob Shin   [PATCH] x86_64: S...
387
388
389
390
391
392
  	return ret;
  }
  
  static ssize_t store(struct kobject *kobj, struct attribute *attr,
  		     const char *buf, size_t count)
  {
952686643   Jacob Shin   [PATCH] x86_64: m...
393
  	struct threshold_block *b = to_block(kobj);
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
394
395
  	struct threshold_attr *a = to_attr(attr);
  	ssize_t ret;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
396

89b831ef8   Jacob Shin   [PATCH] x86_64: S...
397
  	ret = a->store ? a->store(b, buf, count) : -EIO;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
398

89b831ef8   Jacob Shin   [PATCH] x86_64: S...
399
400
  	return ret;
  }
52cf25d0a   Emese Revfy   Driver core: Cons...
401
  static const struct sysfs_ops threshold_ops = {
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
402
403
  	.show			= show,
  	.store			= store,
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
404
405
406
  };
  
  static struct kobj_type threshold_ktype = {
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
407
408
  	.sysfs_ops		= &threshold_ops,
  	.default_attrs		= default_attrs,
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
409
  };
952686643   Jacob Shin   [PATCH] x86_64: m...
410
411
412
413
414
  static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
  					       unsigned int bank,
  					       unsigned int block,
  					       u32 address)
  {
952686643   Jacob Shin   [PATCH] x86_64: m...
415
  	struct threshold_block *b = NULL;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
416
417
  	u32 low, high;
  	int err;
952686643   Jacob Shin   [PATCH] x86_64: m...
418
419
420
  
  	if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
  		return 0;
a6b6a14e0   Andrew Morton   x86: use smp_call...
421
  	if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
24ce0e96f   Jan Beulich   [PATCH] x86-64: T...
422
  		return 0;
952686643   Jacob Shin   [PATCH] x86_64: m...
423
424
425
426
427
428
429
  
  	if (!(high & MASK_VALID_HI)) {
  		if (block)
  			goto recurse;
  		else
  			return 0;
  	}
24ce0e96f   Jan Beulich   [PATCH] x86-64: T...
430
431
  	if (!(high & MASK_CNTP_HI)  ||
  	     (high & MASK_LOCKED_HI))
952686643   Jacob Shin   [PATCH] x86_64: m...
432
433
434
435
436
  		goto recurse;
  
  	b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
  	if (!b)
  		return -ENOMEM;
952686643   Jacob Shin   [PATCH] x86_64: m...
437

1cb2a8e17   Ingo Molnar   x86, mce: clean u...
438
439
440
441
442
443
  	b->block		= block;
  	b->bank			= bank;
  	b->cpu			= cpu;
  	b->address		= address;
  	b->interrupt_enable	= 0;
  	b->threshold_limit	= THRESHOLD_MAX;
952686643   Jacob Shin   [PATCH] x86_64: m...
444
445
  
  	INIT_LIST_HEAD(&b->miscj);
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
446
  	if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
952686643   Jacob Shin   [PATCH] x86_64: m...
447
448
  		list_add(&b->miscj,
  			 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
449
  	} else {
952686643   Jacob Shin   [PATCH] x86_64: m...
450
  		per_cpu(threshold_banks, cpu)[bank]->blocks = b;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
451
  	}
952686643   Jacob Shin   [PATCH] x86_64: m...
452

542eb75a2   Greg Kroah-Hartman   Kobject: change a...
453
454
455
  	err = kobject_init_and_add(&b->kobj, &threshold_ktype,
  				   per_cpu(threshold_banks, cpu)[bank]->kobj,
  				   "misc%i", block);
952686643   Jacob Shin   [PATCH] x86_64: m...
456
457
458
459
460
461
462
463
  	if (err)
  		goto out_free;
  recurse:
  	if (!block) {
  		address = (low & MASK_BLKPTR_LO) >> 21;
  		if (!address)
  			return 0;
  		address += MCG_XBLK_ADDR;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
464
  	} else {
952686643   Jacob Shin   [PATCH] x86_64: m...
465
  		++address;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
466
  	}
952686643   Jacob Shin   [PATCH] x86_64: m...
467
468
469
470
  
  	err = allocate_threshold_blocks(cpu, bank, ++block, address);
  	if (err)
  		goto out_free;
213eca7f4   Greg Kroah-Hartman   kobj: fix thresho...
471
472
  	if (b)
  		kobject_uevent(&b->kobj, KOBJ_ADD);
542eb75a2   Greg Kroah-Hartman   Kobject: change a...
473

952686643   Jacob Shin   [PATCH] x86_64: m...
474
475
476
477
  	return err;
  
  out_free:
  	if (b) {
38a382ae5   Greg Kroah-Hartman   Kobject: convert ...
478
  		kobject_put(&b->kobj);
d9a5ac9ef   Julia Lawall   x86, mce, AMD: Fi...
479
  		list_del(&b->miscj);
952686643   Jacob Shin   [PATCH] x86_64: m...
480
481
482
483
  		kfree(b);
  	}
  	return err;
  }
a6b6a14e0   Andrew Morton   x86: use smp_call...
484
485
  static __cpuinit long
  local_allocate_threshold_blocks(int cpu, unsigned int bank)
4cd4601d5   Mike Travis   x86: use work_on_...
486
  {
a6b6a14e0   Andrew Morton   x86: use smp_call...
487
488
  	return allocate_threshold_blocks(cpu, bank, 0,
  					 MSR_IA32_MC0_MISC + bank * 4);
4cd4601d5   Mike Travis   x86: use work_on_...
489
  }
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
490
  /* symlinks sibling shared banks to first core.  first core owns dir/files. */
952686643   Jacob Shin   [PATCH] x86_64: m...
491
  static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
492
  {
952686643   Jacob Shin   [PATCH] x86_64: m...
493
  	int i, err = 0;
682094078   Luiz Fernando Capitulino   [PATCH] x86_64: S...
494
  	struct threshold_bank *b = NULL;
e032d8077   Greg Kroah-Hartman   mce: fix warning ...
495
  	struct device *dev = mce_device[cpu];
952686643   Jacob Shin   [PATCH] x86_64: m...
496
497
498
  	char name[32];
  
  	sprintf(name, "threshold_bank%i", bank);
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
499

92cb7612a   Mike Travis   x86: convert cpui...
500
  	if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) {	/* symlink */
b3d7336db   Yinghai Lu   x86: Move llc_sha...
501
  		i = cpumask_first(cpu_llc_shared_mask(cpu));
952686643   Jacob Shin   [PATCH] x86_64: m...
502
503
  
  		/* first core not up yet */
92cb7612a   Mike Travis   x86: convert cpui...
504
  		if (cpu_data(i).cpu_core_id)
952686643   Jacob Shin   [PATCH] x86_64: m...
505
506
507
508
509
510
511
  			goto out;
  
  		/* already linked */
  		if (per_cpu(threshold_banks, cpu)[bank])
  			goto out;
  
  		b = per_cpu(threshold_banks, i)[bank];
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
512

89b831ef8   Jacob Shin   [PATCH] x86_64: S...
513
514
  		if (!b)
  			goto out;
952686643   Jacob Shin   [PATCH] x86_64: m...
515

e032d8077   Greg Kroah-Hartman   mce: fix warning ...
516
  		err = sysfs_create_link(&dev->kobj, b->kobj, name);
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
517
518
  		if (err)
  			goto out;
952686643   Jacob Shin   [PATCH] x86_64: m...
519

b3d7336db   Yinghai Lu   x86: Move llc_sha...
520
  		cpumask_copy(b->cpus, cpu_llc_shared_mask(cpu));
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
521
  		per_cpu(threshold_banks, cpu)[bank] = b;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
522

89b831ef8   Jacob Shin   [PATCH] x86_64: S...
523
524
  		goto out;
  	}
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
525

952686643   Jacob Shin   [PATCH] x86_64: m...
526
  	b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
527
528
529
530
  	if (!b) {
  		err = -ENOMEM;
  		goto out;
  	}
1389298f7   Andreas Herrmann   x86, mcheck: Avoi...
531
  	if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
a1c33bbeb   Mike Travis   x86: cleanup rema...
532
533
534
535
  		kfree(b);
  		err = -ENOMEM;
  		goto out;
  	}
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
536

e032d8077   Greg Kroah-Hartman   mce: fix warning ...
537
  	b->kobj = kobject_create_and_add(name, &dev->kobj);
a521cf209   Greg Kroah-Hartman   Kobject: change a...
538
539
  	if (!b->kobj)
  		goto out_free;
952686643   Jacob Shin   [PATCH] x86_64: m...
540
  #ifndef CONFIG_SMP
a1c33bbeb   Mike Travis   x86: cleanup rema...
541
  	cpumask_setall(b->cpus);
952686643   Jacob Shin   [PATCH] x86_64: m...
542
  #else
1389298f7   Andreas Herrmann   x86, mcheck: Avoi...
543
  	cpumask_set_cpu(cpu, b->cpus);
952686643   Jacob Shin   [PATCH] x86_64: m...
544
  #endif
952686643   Jacob Shin   [PATCH] x86_64: m...
545

89b831ef8   Jacob Shin   [PATCH] x86_64: S...
546
  	per_cpu(threshold_banks, cpu)[bank] = b;
952686643   Jacob Shin   [PATCH] x86_64: m...
547

a6b6a14e0   Andrew Morton   x86: use smp_call...
548
  	err = local_allocate_threshold_blocks(cpu, bank);
952686643   Jacob Shin   [PATCH] x86_64: m...
549
550
  	if (err)
  		goto out_free;
a1c33bbeb   Mike Travis   x86: cleanup rema...
551
  	for_each_cpu(i, b->cpus) {
952686643   Jacob Shin   [PATCH] x86_64: m...
552
553
  		if (i == cpu)
  			continue;
e032d8077   Greg Kroah-Hartman   mce: fix warning ...
554
555
556
  		dev = mce_device[i];
  		if (dev)
  			err = sysfs_create_link(&dev->kobj,b->kobj, name);
952686643   Jacob Shin   [PATCH] x86_64: m...
557
558
559
560
561
562
563
564
565
566
  		if (err)
  			goto out;
  
  		per_cpu(threshold_banks, i)[bank] = b;
  	}
  
  	goto out;
  
  out_free:
  	per_cpu(threshold_banks, cpu)[bank] = NULL;
a1c33bbeb   Mike Travis   x86: cleanup rema...
567
  	free_cpumask_var(b->cpus);
952686643   Jacob Shin   [PATCH] x86_64: m...
568
  	kfree(b);
2903ee85c   Jacob Shin   [PATCH] x86_64: m...
569
  out:
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
570
571
572
573
574
575
  	return err;
  }
  
  /* create dir/files for all valid threshold banks */
  static __cpuinit int threshold_create_device(unsigned int cpu)
  {
2903ee85c   Jacob Shin   [PATCH] x86_64: m...
576
  	unsigned int bank;
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
577
  	int err = 0;
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
578
  	for (bank = 0; bank < NR_BANKS; ++bank) {
5a96f4a55   Yinghai Lu   x86: fix recursio...
579
  		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
580
581
582
  			continue;
  		err = threshold_create_bank(cpu, bank);
  		if (err)
0a17941e7   Robert Richter   mce, amd: Remove ...
583
  			return err;
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
584
  	}
0a17941e7   Robert Richter   mce, amd: Remove ...
585

89b831ef8   Jacob Shin   [PATCH] x86_64: S...
586
587
  	return err;
  }
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
588
589
590
591
592
  /*
   * let's be hotplug friendly.
   * in case of multiple core processors, the first core always takes ownership
   *   of shared sysfs dir/files, and rest of the cores will be symlinked to it.
   */
be6b5a350   Chandra Seetharaman   [PATCH] cpu hotpl...
593
  static void deallocate_threshold_block(unsigned int cpu,
952686643   Jacob Shin   [PATCH] x86_64: m...
594
595
596
597
598
599
600
601
602
603
  						 unsigned int bank)
  {
  	struct threshold_block *pos = NULL;
  	struct threshold_block *tmp = NULL;
  	struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
  
  	if (!head)
  		return;
  
  	list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
38a382ae5   Greg Kroah-Hartman   Kobject: convert ...
604
  		kobject_put(&pos->kobj);
952686643   Jacob Shin   [PATCH] x86_64: m...
605
606
607
608
609
610
611
  		list_del(&pos->miscj);
  		kfree(pos);
  	}
  
  	kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
  	per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
  }
be6b5a350   Chandra Seetharaman   [PATCH] cpu hotpl...
612
  static void threshold_remove_bank(unsigned int cpu, int bank)
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
613
614
  {
  	struct threshold_bank *b;
e032d8077   Greg Kroah-Hartman   mce: fix warning ...
615
  	struct device *dev;
952686643   Jacob Shin   [PATCH] x86_64: m...
616
  	char name[32];
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
617
  	int i = 0;
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
618
619
620
621
  
  	b = per_cpu(threshold_banks, cpu)[bank];
  	if (!b)
  		return;
952686643   Jacob Shin   [PATCH] x86_64: m...
622
623
624
625
  	if (!b->blocks)
  		goto free_out;
  
  	sprintf(name, "threshold_bank%i", bank);
023160678   Ingo Molnar   [PATCH] hotplug C...
626
  #ifdef CONFIG_SMP
952686643   Jacob Shin   [PATCH] x86_64: m...
627
628
  	/* sibling symlink */
  	if (shared_bank[bank] && b->blocks->cpu != cpu) {
e032d8077   Greg Kroah-Hartman   mce: fix warning ...
629
  		sysfs_remove_link(&mce_device[cpu]->kobj, name);
0d2caebd5   Jacob Shin   [PATCH] x86_64: F...
630
  		per_cpu(threshold_banks, cpu)[bank] = NULL;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
631

952686643   Jacob Shin   [PATCH] x86_64: m...
632
  		return;
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
633
  	}
023160678   Ingo Molnar   [PATCH] hotplug C...
634
  #endif
952686643   Jacob Shin   [PATCH] x86_64: m...
635
636
  
  	/* remove all sibling symlinks before unregistering */
a1c33bbeb   Mike Travis   x86: cleanup rema...
637
  	for_each_cpu(i, b->cpus) {
952686643   Jacob Shin   [PATCH] x86_64: m...
638
639
  		if (i == cpu)
  			continue;
e032d8077   Greg Kroah-Hartman   mce: fix warning ...
640
641
642
  		dev = mce_device[i];
  		if (dev)
  			sysfs_remove_link(&dev->kobj, name);
952686643   Jacob Shin   [PATCH] x86_64: m...
643
644
645
646
647
648
  		per_cpu(threshold_banks, i)[bank] = NULL;
  	}
  
  	deallocate_threshold_block(cpu, bank);
  
  free_out:
8735728ef   Rafael J. Wysocki   x86 MCE: Fix CPU ...
649
  	kobject_del(b->kobj);
38a382ae5   Greg Kroah-Hartman   Kobject: convert ...
650
  	kobject_put(b->kobj);
a1c33bbeb   Mike Travis   x86: cleanup rema...
651
  	free_cpumask_var(b->cpus);
952686643   Jacob Shin   [PATCH] x86_64: m...
652
653
  	kfree(b);
  	per_cpu(threshold_banks, cpu)[bank] = NULL;
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
654
  }
be6b5a350   Chandra Seetharaman   [PATCH] cpu hotpl...
655
  static void threshold_remove_device(unsigned int cpu)
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
656
  {
2903ee85c   Jacob Shin   [PATCH] x86_64: m...
657
  	unsigned int bank;
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
658
659
  
  	for (bank = 0; bank < NR_BANKS; ++bank) {
5a96f4a55   Yinghai Lu   x86: fix recursio...
660
  		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
661
662
663
  			continue;
  		threshold_remove_bank(cpu, bank);
  	}
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
664
  }
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
665
  /* get notified when a cpu comes on/off */
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
666
667
  static void __cpuinit
  amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
668
  {
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
669
670
  	switch (action) {
  	case CPU_ONLINE:
8bb784428   Rafael J. Wysocki   Add suspend-relat...
671
  	case CPU_ONLINE_FROZEN:
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
672
  		threshold_create_device(cpu);
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
673
674
  		break;
  	case CPU_DEAD:
8bb784428   Rafael J. Wysocki   Add suspend-relat...
675
  	case CPU_DEAD_FROZEN:
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
676
677
678
679
680
  		threshold_remove_device(cpu);
  		break;
  	default:
  		break;
  	}
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
681
  }
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
682
683
  static __init int threshold_init_device(void)
  {
2903ee85c   Jacob Shin   [PATCH] x86_64: m...
684
  	unsigned lcpu = 0;
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
685

89b831ef8   Jacob Shin   [PATCH] x86_64: S...
686
687
  	/* to hit CPUs online before the notifier is up */
  	for_each_online_cpu(lcpu) {
fff2e89f1   Jacob Shin   [PATCH] x86_64: m...
688
  		int err = threshold_create_device(lcpu);
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
689

89b831ef8   Jacob Shin   [PATCH] x86_64: S...
690
  		if (err)
fff2e89f1   Jacob Shin   [PATCH] x86_64: m...
691
  			return err;
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
692
  	}
8735728ef   Rafael J. Wysocki   x86 MCE: Fix CPU ...
693
  	threshold_cpu_callback = amd_64_threshold_cpu_callback;
1cb2a8e17   Ingo Molnar   x86, mce: clean u...
694

fff2e89f1   Jacob Shin   [PATCH] x86_64: m...
695
  	return 0;
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
696
  }
89b831ef8   Jacob Shin   [PATCH] x86_64: S...
697
  device_initcall(threshold_init_device);