Blame view

kernel/time/tick-broadcast.c 15.5 KB
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
  /*
   * linux/kernel/time/tick-broadcast.c
   *
   * This file contains functions which emulate a local clock-event
   * device via a broadcast event source.
   *
   * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
   * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
   * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
   *
   * This code is licenced under the GPL version 2. For details see
   * kernel-base/COPYING.
   */
  #include <linux/cpu.h>
  #include <linux/err.h>
  #include <linux/hrtimer.h>
d7b906897   Russell King   [S390] genirq/clo...
17
  #include <linux/interrupt.h>
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
18
19
20
  #include <linux/percpu.h>
  #include <linux/profile.h>
  #include <linux/sched.h>
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
21
22
23
24
25
26
27
  
  #include "tick-internal.h"
  
  /*
   * Broadcast support for broken x86 hardware, where the local apic
   * timer stops in C3 state.
   */
a52f5c562   Dmitri Vorobiev   clockevents: tick...
28
  static struct tick_device tick_broadcast_device;
6b954823c   Rusty Russell   cpumask: convert ...
29
30
31
  /* FIXME: Use cpumask_var_t. */
  static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
  static DECLARE_BITMAP(tmpmask, NR_CPUS);
b5f91da0a   Thomas Gleixner   clockevents: Conv...
32
  static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
aa276e1ca   Thomas Gleixner   x86, clockevents:...
33
  static int tick_broadcast_force;
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
34

5590a536c   Thomas Gleixner   clockevents: fix ...
35
36
37
38
39
  #ifdef CONFIG_TICK_ONESHOT
  static void tick_broadcast_clear_oneshot(int cpu);
  #else
  static inline void tick_broadcast_clear_oneshot(int cpu) { }
  #endif
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
40
  /*
289f480af   Ingo Molnar   [PATCH] Add debug...
41
42
43
44
45
46
   * Debugging: see timer_list.c
   */
  struct tick_device *tick_get_broadcast_device(void)
  {
  	return &tick_broadcast_device;
  }
6b954823c   Rusty Russell   cpumask: convert ...
47
  struct cpumask *tick_get_broadcast_mask(void)
289f480af   Ingo Molnar   [PATCH] Add debug...
48
  {
6b954823c   Rusty Russell   cpumask: convert ...
49
  	return to_cpumask(tick_broadcast_mask);
289f480af   Ingo Molnar   [PATCH] Add debug...
50
51
52
  }
  
  /*
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
53
54
55
56
   * Start the device in periodic mode
   */
  static void tick_broadcast_start_periodic(struct clock_event_device *bc)
  {
18de5bc4c   Thomas Gleixner   clockevents: fix ...
57
  	if (bc)
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
58
59
60
61
62
63
64
65
  		tick_setup_periodic(bc, 1);
  }
  
  /*
   * Check, if the device can be utilized as broadcast device:
   */
  int tick_check_broadcast_device(struct clock_event_device *dev)
  {
4a93232da   Venki Pallipadi   clock events: all...
66
67
68
  	if ((tick_broadcast_device.evtdev &&
  	     tick_broadcast_device.evtdev->rating >= dev->rating) ||
  	     (dev->features & CLOCK_EVT_FEAT_C3STOP))
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
69
  		return 0;
c1be84309   Thomas Gleixner   tick-broadcast: S...
70
  	clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
71
  	tick_broadcast_device.evtdev = dev;
6b954823c   Rusty Russell   cpumask: convert ...
72
  	if (!cpumask_empty(tick_get_broadcast_mask()))
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
  		tick_broadcast_start_periodic(dev);
  	return 1;
  }
  
  /*
   * Check, if the device is the broadcast device
   */
  int tick_is_broadcast_device(struct clock_event_device *dev)
  {
  	return (dev && tick_broadcast_device.evtdev == dev);
  }
  
  /*
   * Check, if the device is disfunctional and a place holder, which
   * needs to be handled by the broadcast device.
   */
  int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
  {
  	unsigned long flags;
  	int ret = 0;
b5f91da0a   Thomas Gleixner   clockevents: Conv...
93
  	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
94
95
96
97
98
99
100
101
102
  
  	/*
  	 * Devices might be registered with both periodic and oneshot
  	 * mode disabled. This signals, that the device needs to be
  	 * operated from the broadcast device and is a placeholder for
  	 * the cpu local device.
  	 */
  	if (!tick_device_is_functional(dev)) {
  		dev->event_handler = tick_handle_periodic;
6b954823c   Rusty Russell   cpumask: convert ...
103
  		cpumask_set_cpu(cpu, tick_get_broadcast_mask());
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
104
105
  		tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
  		ret = 1;
5590a536c   Thomas Gleixner   clockevents: fix ...
106
107
108
109
110
111
112
113
  	} else {
  		/*
  		 * When the new device is not affected by the stop
  		 * feature and the cpu is marked in the broadcast mask
  		 * then clear the broadcast bit.
  		 */
  		if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
  			int cpu = smp_processor_id();
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
114

6b954823c   Rusty Russell   cpumask: convert ...
115
  			cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
5590a536c   Thomas Gleixner   clockevents: fix ...
116
117
118
  			tick_broadcast_clear_oneshot(cpu);
  		}
  	}
b5f91da0a   Thomas Gleixner   clockevents: Conv...
119
  	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
120
121
122
123
  	return ret;
  }
  
  /*
6b954823c   Rusty Russell   cpumask: convert ...
124
   * Broadcast the event to the cpus, which are set in the mask (mangled).
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
125
   */
6b954823c   Rusty Russell   cpumask: convert ...
126
  static void tick_do_broadcast(struct cpumask *mask)
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
127
  {
186e3cb8a   Thomas Gleixner   timer: clean up t...
128
  	int cpu = smp_processor_id();
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
129
130
131
132
133
  	struct tick_device *td;
  
  	/*
  	 * Check, if the current cpu is in the mask
  	 */
6b954823c   Rusty Russell   cpumask: convert ...
134
135
  	if (cpumask_test_cpu(cpu, mask)) {
  		cpumask_clear_cpu(cpu, mask);
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
136
137
  		td = &per_cpu(tick_cpu_device, cpu);
  		td->evtdev->event_handler(td->evtdev);
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
138
  	}
6b954823c   Rusty Russell   cpumask: convert ...
139
  	if (!cpumask_empty(mask)) {
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
140
141
142
143
144
145
  		/*
  		 * It might be necessary to actually check whether the devices
  		 * have different broadcast functions. For now, just use the
  		 * one of the first device. This works as long as we have this
  		 * misfeature only on x86 (lapic)
  		 */
6b954823c   Rusty Russell   cpumask: convert ...
146
147
  		td = &per_cpu(tick_cpu_device, cpumask_first(mask));
  		td->evtdev->broadcast(mask);
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
148
  	}
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
149
150
151
152
153
154
155
156
  }
  
  /*
   * Periodic broadcast:
   * - invoke the broadcast handlers
   */
  static void tick_do_periodic_broadcast(void)
  {
b5f91da0a   Thomas Gleixner   clockevents: Conv...
157
  	raw_spin_lock(&tick_broadcast_lock);
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
158

6b954823c   Rusty Russell   cpumask: convert ...
159
160
161
  	cpumask_and(to_cpumask(tmpmask),
  		    cpu_online_mask, tick_get_broadcast_mask());
  	tick_do_broadcast(to_cpumask(tmpmask));
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
162

b5f91da0a   Thomas Gleixner   clockevents: Conv...
163
  	raw_spin_unlock(&tick_broadcast_lock);
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
164
165
166
167
168
169
170
  }
  
  /*
   * Event handler for periodic broadcast ticks
   */
  static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
  {
d4496b395   Thomas Gleixner   clockevents: prev...
171
  	ktime_t next;
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
172
173
174
175
176
177
178
179
180
181
  	tick_do_periodic_broadcast();
  
  	/*
  	 * The device is in periodic mode. No reprogramming necessary:
  	 */
  	if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
  		return;
  
  	/*
  	 * Setup the next period for devices, which do not have
d4496b395   Thomas Gleixner   clockevents: prev...
182
  	 * periodic mode. We read dev->next_event first and add to it
698f93159   Uwe Kleine-König   fix comment/print...
183
  	 * when the event already expired. clockevents_program_event()
d4496b395   Thomas Gleixner   clockevents: prev...
184
185
  	 * sets dev->next_event only when the event is really
  	 * programmed to the device.
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
186
  	 */
d4496b395   Thomas Gleixner   clockevents: prev...
187
188
  	for (next = dev->next_event; ;) {
  		next = ktime_add(next, tick_period);
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
189

d1748302f   Martin Schwidefsky   clockevents: Make...
190
  		if (!clockevents_program_event(dev, next, false))
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
191
192
193
194
195
196
197
198
199
  			return;
  		tick_do_periodic_broadcast();
  	}
  }
  
  /*
   * Powerstate information: The system enters/leaves a state, where
   * affected devices might stop
   */
f833bab87   Suresh Siddha   clockevent: Preve...
200
  static void tick_do_broadcast_on_off(unsigned long *reason)
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
201
202
203
  {
  	struct clock_event_device *bc, *dev;
  	struct tick_device *td;
f833bab87   Suresh Siddha   clockevent: Preve...
204
  	unsigned long flags;
9c17bcda9   Thomas Gleixner   clockevents: prev...
205
  	int cpu, bc_stopped;
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
206

b5f91da0a   Thomas Gleixner   clockevents: Conv...
207
  	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
208
209
210
211
212
213
214
  
  	cpu = smp_processor_id();
  	td = &per_cpu(tick_cpu_device, cpu);
  	dev = td->evtdev;
  	bc = tick_broadcast_device.evtdev;
  
  	/*
1595f452f   Thomas Gleixner   clockevents: intr...
215
  	 * Is the device not affected by the powerstate ?
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
216
  	 */
1595f452f   Thomas Gleixner   clockevents: intr...
217
  	if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
218
  		goto out;
3dfbc8846   Thomas Gleixner   x86: C1E late det...
219
220
  	if (!tick_device_is_functional(dev))
  		goto out;
1595f452f   Thomas Gleixner   clockevents: intr...
221

6b954823c   Rusty Russell   cpumask: convert ...
222
  	bc_stopped = cpumask_empty(tick_get_broadcast_mask());
9c17bcda9   Thomas Gleixner   clockevents: prev...
223

1595f452f   Thomas Gleixner   clockevents: intr...
224
225
226
  	switch (*reason) {
  	case CLOCK_EVT_NOTIFY_BROADCAST_ON:
  	case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
6b954823c   Rusty Russell   cpumask: convert ...
227
228
  		if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
  			cpumask_set_cpu(cpu, tick_get_broadcast_mask());
07454bfff   Thomas Gleixner   clockevents: chec...
229
230
  			if (tick_broadcast_device.mode ==
  			    TICKDEV_MODE_PERIODIC)
2344abbcb   Thomas Gleixner   clockevents: make...
231
  				clockevents_shutdown(dev);
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
232
  		}
3dfbc8846   Thomas Gleixner   x86: C1E late det...
233
  		if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
aa276e1ca   Thomas Gleixner   x86, clockevents:...
234
  			tick_broadcast_force = 1;
1595f452f   Thomas Gleixner   clockevents: intr...
235
236
  		break;
  	case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
aa276e1ca   Thomas Gleixner   x86, clockevents:...
237
  		if (!tick_broadcast_force &&
6b954823c   Rusty Russell   cpumask: convert ...
238
239
  		    cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
  			cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
07454bfff   Thomas Gleixner   clockevents: chec...
240
241
  			if (tick_broadcast_device.mode ==
  			    TICKDEV_MODE_PERIODIC)
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
242
243
  				tick_setup_periodic(dev, 0);
  		}
1595f452f   Thomas Gleixner   clockevents: intr...
244
  		break;
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
245
  	}
6b954823c   Rusty Russell   cpumask: convert ...
246
  	if (cpumask_empty(tick_get_broadcast_mask())) {
9c17bcda9   Thomas Gleixner   clockevents: prev...
247
  		if (!bc_stopped)
2344abbcb   Thomas Gleixner   clockevents: make...
248
  			clockevents_shutdown(bc);
9c17bcda9   Thomas Gleixner   clockevents: prev...
249
  	} else if (bc_stopped) {
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
250
251
  		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
  			tick_broadcast_start_periodic(bc);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
252
253
  		else
  			tick_broadcast_setup_oneshot(bc);
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
254
255
  	}
  out:
b5f91da0a   Thomas Gleixner   clockevents: Conv...
256
  	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
257
258
259
260
261
262
263
264
  }
  
  /*
   * Powerstate information: The system enters/leaves a state, where
   * affected devices might stop.
   */
  void tick_broadcast_on_off(unsigned long reason, int *oncpu)
  {
6b954823c   Rusty Russell   cpumask: convert ...
265
  	if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
833df317f   Glauber Costa   clockevents: fix ...
266
  		printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
72fcde966   Thomas Gleixner   Ignore bogus ACPI...
267
268
  		       "offline CPU #%d
  ", *oncpu);
bf020cb7b   Avi Kivity   time: simplify sm...
269
  	else
f833bab87   Suresh Siddha   clockevent: Preve...
270
  		tick_do_broadcast_on_off(&reason);
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
  }
  
  /*
   * Set the periodic handler depending on broadcast on/off
   */
  void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
  {
  	if (!broadcast)
  		dev->event_handler = tick_handle_periodic;
  	else
  		dev->event_handler = tick_handle_periodic_broadcast;
  }
  
  /*
   * Remove a CPU from broadcasting
   */
  void tick_shutdown_broadcast(unsigned int *cpup)
  {
  	struct clock_event_device *bc;
  	unsigned long flags;
  	unsigned int cpu = *cpup;
b5f91da0a   Thomas Gleixner   clockevents: Conv...
292
  	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
293
294
  
  	bc = tick_broadcast_device.evtdev;
6b954823c   Rusty Russell   cpumask: convert ...
295
  	cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
296
297
  
  	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
6b954823c   Rusty Russell   cpumask: convert ...
298
  		if (bc && cpumask_empty(tick_get_broadcast_mask()))
2344abbcb   Thomas Gleixner   clockevents: make...
299
  			clockevents_shutdown(bc);
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
300
  	}
b5f91da0a   Thomas Gleixner   clockevents: Conv...
301
  	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
f8381cba0   Thomas Gleixner   [PATCH] tick-mana...
302
  }
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
303

6321dd60c   Thomas Gleixner   [PATCH] Save/rest...
304
305
306
307
  void tick_suspend_broadcast(void)
  {
  	struct clock_event_device *bc;
  	unsigned long flags;
b5f91da0a   Thomas Gleixner   clockevents: Conv...
308
  	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
6321dd60c   Thomas Gleixner   [PATCH] Save/rest...
309
310
  
  	bc = tick_broadcast_device.evtdev;
18de5bc4c   Thomas Gleixner   clockevents: fix ...
311
  	if (bc)
2344abbcb   Thomas Gleixner   clockevents: make...
312
  		clockevents_shutdown(bc);
6321dd60c   Thomas Gleixner   [PATCH] Save/rest...
313

b5f91da0a   Thomas Gleixner   clockevents: Conv...
314
  	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
6321dd60c   Thomas Gleixner   [PATCH] Save/rest...
315
316
317
318
319
320
321
  }
  
  int tick_resume_broadcast(void)
  {
  	struct clock_event_device *bc;
  	unsigned long flags;
  	int broadcast = 0;
b5f91da0a   Thomas Gleixner   clockevents: Conv...
322
  	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
6321dd60c   Thomas Gleixner   [PATCH] Save/rest...
323
324
  
  	bc = tick_broadcast_device.evtdev;
6321dd60c   Thomas Gleixner   [PATCH] Save/rest...
325

cd05a1f81   Thomas Gleixner   [PATCH] clockeven...
326
  	if (bc) {
18de5bc4c   Thomas Gleixner   clockevents: fix ...
327
  		clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
cd05a1f81   Thomas Gleixner   [PATCH] clockeven...
328
329
  		switch (tick_broadcast_device.mode) {
  		case TICKDEV_MODE_PERIODIC:
6b954823c   Rusty Russell   cpumask: convert ...
330
  			if (!cpumask_empty(tick_get_broadcast_mask()))
cd05a1f81   Thomas Gleixner   [PATCH] clockeven...
331
  				tick_broadcast_start_periodic(bc);
6b954823c   Rusty Russell   cpumask: convert ...
332
333
  			broadcast = cpumask_test_cpu(smp_processor_id(),
  						     tick_get_broadcast_mask());
cd05a1f81   Thomas Gleixner   [PATCH] clockeven...
334
335
336
337
338
  			break;
  		case TICKDEV_MODE_ONESHOT:
  			broadcast = tick_resume_broadcast_oneshot(bc);
  			break;
  		}
6321dd60c   Thomas Gleixner   [PATCH] Save/rest...
339
  	}
b5f91da0a   Thomas Gleixner   clockevents: Conv...
340
  	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
6321dd60c   Thomas Gleixner   [PATCH] Save/rest...
341
342
343
  
  	return broadcast;
  }
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
344
  #ifdef CONFIG_TICK_ONESHOT
6b954823c   Rusty Russell   cpumask: convert ...
345
346
  /* FIXME: use cpumask_var_t. */
  static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
347

289f480af   Ingo Molnar   [PATCH] Add debug...
348
  /*
6b954823c   Rusty Russell   cpumask: convert ...
349
   * Exposed for debugging: see timer_list.c
289f480af   Ingo Molnar   [PATCH] Add debug...
350
   */
6b954823c   Rusty Russell   cpumask: convert ...
351
  struct cpumask *tick_get_broadcast_oneshot_mask(void)
289f480af   Ingo Molnar   [PATCH] Add debug...
352
  {
6b954823c   Rusty Russell   cpumask: convert ...
353
  	return to_cpumask(tick_broadcast_oneshot_mask);
289f480af   Ingo Molnar   [PATCH] Add debug...
354
  }
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
355
356
357
  static int tick_broadcast_set_event(ktime_t expires, int force)
  {
  	struct clock_event_device *bc = tick_broadcast_device.evtdev;
1fb9b7d29   Thomas Gleixner   clockevents: prev...
358

d1748302f   Martin Schwidefsky   clockevents: Make...
359
  	return clockevents_program_event(bc, expires, force);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
360
  }
cd05a1f81   Thomas Gleixner   [PATCH] clockeven...
361
362
363
  int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
  {
  	clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
b7e113dc9   Thomas Gleixner   clockevents: remo...
364
  	return 0;
cd05a1f81   Thomas Gleixner   [PATCH] clockeven...
365
  }
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
366
  /*
fb02fbc14   Thomas Gleixner   NOHZ: restart tic...
367
368
369
370
371
   * Called from irq_enter() when idle was interrupted to reenable the
   * per cpu device.
   */
  void tick_check_oneshot_broadcast(int cpu)
  {
6b954823c   Rusty Russell   cpumask: convert ...
372
  	if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) {
fb02fbc14   Thomas Gleixner   NOHZ: restart tic...
373
374
375
376
377
378
379
  		struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
  
  		clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
  	}
  }
  
  /*
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
380
381
382
383
384
   * Handle oneshot mode broadcasting
   */
  static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
  {
  	struct tick_device *td;
cdc6f27d9   Thomas Gleixner   clockevents: fix ...
385
  	ktime_t now, next_event;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
386
  	int cpu;
b5f91da0a   Thomas Gleixner   clockevents: Conv...
387
  	raw_spin_lock(&tick_broadcast_lock);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
388
389
  again:
  	dev->next_event.tv64 = KTIME_MAX;
cdc6f27d9   Thomas Gleixner   clockevents: fix ...
390
  	next_event.tv64 = KTIME_MAX;
6b954823c   Rusty Russell   cpumask: convert ...
391
  	cpumask_clear(to_cpumask(tmpmask));
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
392
393
  	now = ktime_get();
  	/* Find all expired events */
6b954823c   Rusty Russell   cpumask: convert ...
394
  	for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) {
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
395
396
  		td = &per_cpu(tick_cpu_device, cpu);
  		if (td->evtdev->next_event.tv64 <= now.tv64)
6b954823c   Rusty Russell   cpumask: convert ...
397
  			cpumask_set_cpu(cpu, to_cpumask(tmpmask));
cdc6f27d9   Thomas Gleixner   clockevents: fix ...
398
399
  		else if (td->evtdev->next_event.tv64 < next_event.tv64)
  			next_event.tv64 = td->evtdev->next_event.tv64;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
400
401
402
  	}
  
  	/*
cdc6f27d9   Thomas Gleixner   clockevents: fix ...
403
404
  	 * Wakeup the cpus which have an expired event.
  	 */
6b954823c   Rusty Russell   cpumask: convert ...
405
  	tick_do_broadcast(to_cpumask(tmpmask));
cdc6f27d9   Thomas Gleixner   clockevents: fix ...
406
407
408
409
410
411
412
413
414
415
  
  	/*
  	 * Two reasons for reprogram:
  	 *
  	 * - The global event did not expire any CPU local
  	 * events. This happens in dyntick mode, as the maximum PIT
  	 * delta is quite small.
  	 *
  	 * - There are pending events on sleeping CPUs which were not
  	 * in the event mask
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
416
  	 */
cdc6f27d9   Thomas Gleixner   clockevents: fix ...
417
  	if (next_event.tv64 != KTIME_MAX) {
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
418
  		/*
cdc6f27d9   Thomas Gleixner   clockevents: fix ...
419
420
  		 * Rearm the broadcast device. If event expired,
  		 * repeat the above
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
421
  		 */
cdc6f27d9   Thomas Gleixner   clockevents: fix ...
422
  		if (tick_broadcast_set_event(next_event, 0))
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
423
424
  			goto again;
  	}
b5f91da0a   Thomas Gleixner   clockevents: Conv...
425
  	raw_spin_unlock(&tick_broadcast_lock);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
426
427
428
429
430
431
432
433
434
435
436
437
  }
  
  /*
   * Powerstate information: The system enters/leaves a state, where
   * affected devices might stop
   */
  void tick_broadcast_oneshot_control(unsigned long reason)
  {
  	struct clock_event_device *bc, *dev;
  	struct tick_device *td;
  	unsigned long flags;
  	int cpu;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
438
439
440
441
442
  	/*
  	 * Periodic mode does not care about the enter/exit of power
  	 * states
  	 */
  	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
7372b0b12   Andi Kleen   clockevents: Move...
443
  		return;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
444

7372b0b12   Andi Kleen   clockevents: Move...
445
446
447
448
  	/*
  	 * We are called with preemtion disabled from the depth of the
  	 * idle code, so we can't be moved away.
  	 */
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
449
450
451
452
453
  	cpu = smp_processor_id();
  	td = &per_cpu(tick_cpu_device, cpu);
  	dev = td->evtdev;
  
  	if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
7372b0b12   Andi Kleen   clockevents: Move...
454
455
456
  		return;
  
  	bc = tick_broadcast_device.evtdev;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
457

7372b0b12   Andi Kleen   clockevents: Move...
458
  	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
459
  	if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
6b954823c   Rusty Russell   cpumask: convert ...
460
461
  		if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
  			cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask());
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
462
463
464
465
466
  			clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
  			if (dev->next_event.tv64 < bc->next_event.tv64)
  				tick_broadcast_set_event(dev->next_event, 1);
  		}
  	} else {
6b954823c   Rusty Russell   cpumask: convert ...
467
468
469
  		if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
  			cpumask_clear_cpu(cpu,
  					  tick_get_broadcast_oneshot_mask());
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
470
471
472
473
474
  			clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
  			if (dev->next_event.tv64 != KTIME_MAX)
  				tick_program_event(dev->next_event, 1);
  		}
  	}
b5f91da0a   Thomas Gleixner   clockevents: Conv...
475
  	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
476
  }
5590a536c   Thomas Gleixner   clockevents: fix ...
477
478
479
480
481
482
483
  /*
   * Reset the one shot broadcast for a cpu
   *
   * Called with tick_broadcast_lock held
   */
  static void tick_broadcast_clear_oneshot(int cpu)
  {
6b954823c   Rusty Russell   cpumask: convert ...
484
  	cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
5590a536c   Thomas Gleixner   clockevents: fix ...
485
  }
6b954823c   Rusty Russell   cpumask: convert ...
486
487
  static void tick_broadcast_init_next_event(struct cpumask *mask,
  					   ktime_t expires)
7300711e8   Thomas Gleixner   clockevents: broa...
488
489
490
  {
  	struct tick_device *td;
  	int cpu;
5db0e1e9e   Rusty Russell   cpumask: replace ...
491
  	for_each_cpu(cpu, mask) {
7300711e8   Thomas Gleixner   clockevents: broa...
492
493
494
495
496
  		td = &per_cpu(tick_cpu_device, cpu);
  		if (td->evtdev)
  			td->evtdev->next_event = expires;
  	}
  }
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
497
  /**
8dce39c23   Li Zefan   time: fix inconsi...
498
   * tick_broadcast_setup_oneshot - setup the broadcast device
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
499
500
501
   */
  void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
  {
07f4beb0b   Thomas Gleixner   tick: Clear broad...
502
  	int cpu = smp_processor_id();
9c17bcda9   Thomas Gleixner   clockevents: prev...
503
504
  	/* Set it up only once ! */
  	if (bc->event_handler != tick_handle_oneshot_broadcast) {
7300711e8   Thomas Gleixner   clockevents: broa...
505
  		int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
7300711e8   Thomas Gleixner   clockevents: broa...
506

9c17bcda9   Thomas Gleixner   clockevents: prev...
507
508
  		bc->event_handler = tick_handle_oneshot_broadcast;
  		clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
7300711e8   Thomas Gleixner   clockevents: broa...
509
510
511
512
513
514
515
516
517
518
  
  		/* Take the do_timer update */
  		tick_do_timer_cpu = cpu;
  
  		/*
  		 * We must be careful here. There might be other CPUs
  		 * waiting for periodic broadcast. We need to set the
  		 * oneshot_mask bits for those and program the
  		 * broadcast device to fire.
  		 */
6b954823c   Rusty Russell   cpumask: convert ...
519
520
521
522
523
524
525
526
527
  		cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask());
  		cpumask_clear_cpu(cpu, to_cpumask(tmpmask));
  		cpumask_or(tick_get_broadcast_oneshot_mask(),
  			   tick_get_broadcast_oneshot_mask(),
  			   to_cpumask(tmpmask));
  
  		if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
  			tick_broadcast_init_next_event(to_cpumask(tmpmask),
  						       tick_next_period);
7300711e8   Thomas Gleixner   clockevents: broa...
528
529
530
  			tick_broadcast_set_event(tick_next_period, 1);
  		} else
  			bc->next_event.tv64 = KTIME_MAX;
07f4beb0b   Thomas Gleixner   tick: Clear broad...
531
532
533
534
535
536
537
538
539
  	} else {
  		/*
  		 * The first cpu which switches to oneshot mode sets
  		 * the bit for all other cpus which are in the general
  		 * (periodic) broadcast mask. So the bit is set and
  		 * would prevent the first broadcast enter after this
  		 * to program the bc device.
  		 */
  		tick_broadcast_clear_oneshot(cpu);
9c17bcda9   Thomas Gleixner   clockevents: prev...
540
  	}
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
541
542
543
544
545
546
547
548
549
  }
  
  /*
   * Select oneshot operating mode for the broadcast device
   */
  void tick_broadcast_switch_to_oneshot(void)
  {
  	struct clock_event_device *bc;
  	unsigned long flags;
b5f91da0a   Thomas Gleixner   clockevents: Conv...
550
  	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
551
552
553
554
555
  
  	tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
  	bc = tick_broadcast_device.evtdev;
  	if (bc)
  		tick_broadcast_setup_oneshot(bc);
b5f91da0a   Thomas Gleixner   clockevents: Conv...
556
  	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
557
558
559
560
561
562
563
564
  }
  
  
  /*
   * Remove a dead CPU from broadcasting
   */
  void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
  {
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
565
566
  	unsigned long flags;
  	unsigned int cpu = *cpup;
b5f91da0a   Thomas Gleixner   clockevents: Conv...
567
  	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
568

31d9b3938   Thomas Gleixner   clockevents: do n...
569
570
571
572
  	/*
  	 * Clear the broadcast mask flag for the dead cpu, but do not
  	 * stop the broadcast device!
  	 */
6b954823c   Rusty Russell   cpumask: convert ...
573
  	cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
574

b5f91da0a   Thomas Gleixner   clockevents: Conv...
575
  	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
576
  }
27ce4cb4a   Thomas Gleixner   clockevents: prev...
577
578
579
580
581
582
583
  /*
   * Check, whether the broadcast device is in one shot mode
   */
  int tick_broadcast_oneshot_active(void)
  {
  	return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
  }
3a142a067   Thomas Gleixner   clockevents: Prev...
584
585
586
587
588
589
590
591
592
  /*
   * Check whether the broadcast device supports oneshot.
   */
  bool tick_broadcast_oneshot_available(void)
  {
  	struct clock_event_device *bc = tick_broadcast_device.evtdev;
  
  	return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
  }
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
593
  #endif