Blame view

kernel/hung_task.c 5.71 KB
e162b39a3   Mandeep Singh Baines   softlockup: decou...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
  /*
   * Detect Hung Task
   *
   * kernel/hung_task.c - kernel thread for detecting tasks stuck in D state
   *
   */
  
  #include <linux/mm.h>
  #include <linux/cpu.h>
  #include <linux/nmi.h>
  #include <linux/init.h>
  #include <linux/delay.h>
  #include <linux/freezer.h>
  #include <linux/kthread.h>
  #include <linux/lockdep.h>
9984de1a5   Paul Gortmaker   kernel: Map most ...
16
  #include <linux/export.h>
e162b39a3   Mandeep Singh Baines   softlockup: decou...
17
  #include <linux/sysctl.h>
41e85ce82   Oleg Nesterov   hung_task debuggi...
18
  #include <linux/utsname.h>
6a716c90a   Oleg Nesterov   hung_task debuggi...
19
  #include <trace/events/sched.h>
e162b39a3   Mandeep Singh Baines   softlockup: decou...
20
21
  
  /*
ce9dbe244   Mandeep Singh Baines   softlockup: check...
22
   * The number of tasks checked:
e162b39a3   Mandeep Singh Baines   softlockup: decou...
23
   */
cd64647f0   Li Zefan   hung_task: Change...
24
  int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
ce9dbe244   Mandeep Singh Baines   softlockup: check...
25
26
27
28
29
30
31
32
33
  
  /*
   * Limit number of tasks checked in a batch.
   *
   * This value controls the preemptibility of khungtaskd since preemption
   * is disabled during the critical section. It also controls the size of
   * the RCU grace period. So it needs to be upper-bound.
   */
  #define HUNG_TASK_BATCHING 1024
e162b39a3   Mandeep Singh Baines   softlockup: decou...
34
35
36
37
  
  /*
   * Zero means infinite timeout - no checking done:
   */
e11feaa11   Jeff Mahoney   watchdog, hung_ta...
38
  unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_TASK_TIMEOUT;
e162b39a3   Mandeep Singh Baines   softlockup: decou...
39

270750dbc   Aaron Tomlin   hung_task: Displa...
40
  int __read_mostly sysctl_hung_task_warnings = 10;
e162b39a3   Mandeep Singh Baines   softlockup: decou...
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
  
  static int __read_mostly did_panic;
  
  static struct task_struct *watchdog_task;
  
  /*
   * Should we panic (and reboot, if panic_timeout= is set) when a
   * hung task is detected:
   */
  unsigned int __read_mostly sysctl_hung_task_panic =
  				CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE;
  
  static int __init hung_task_panic_setup(char *str)
  {
  	sysctl_hung_task_panic = simple_strtoul(str, NULL, 0);
  
  	return 1;
  }
  __setup("hung_task_panic=", hung_task_panic_setup);
  
  static int
  hung_task_panic(struct notifier_block *this, unsigned long event, void *ptr)
  {
  	did_panic = 1;
  
  	return NOTIFY_DONE;
  }
  
  static struct notifier_block panic_block = {
  	.notifier_call = hung_task_panic,
  };
17406b82d   Mandeep Singh Baines   softlockup: remov...
72
  static void check_hung_task(struct task_struct *t, unsigned long timeout)
e162b39a3   Mandeep Singh Baines   softlockup: decou...
73
74
  {
  	unsigned long switch_count = t->nvcsw + t->nivcsw;
cf2592f59   Frederic Weisbecker   softlockup: ensur...
75
76
  	/*
  	 * Ensure the task is not frozen.
f9fab10bb   Mandeep Singh Baines   hung_task: fix fa...
77
  	 * Also, skip vfork and any other user process that freezer should skip.
cf2592f59   Frederic Weisbecker   softlockup: ensur...
78
  	 */
f9fab10bb   Mandeep Singh Baines   hung_task: fix fa...
79
80
81
82
83
84
85
86
87
  	if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
  	    return;
  
  	/*
  	 * When a freshly created task is scheduled once, changes its state to
  	 * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
  	 * musn't be checked.
  	 */
  	if (unlikely(!switch_count))
e162b39a3   Mandeep Singh Baines   softlockup: decou...
88
  		return;
17406b82d   Mandeep Singh Baines   softlockup: remov...
89
  	if (switch_count != t->last_switch_count) {
e162b39a3   Mandeep Singh Baines   softlockup: decou...
90
  		t->last_switch_count = switch_count;
e162b39a3   Mandeep Singh Baines   softlockup: decou...
91
92
  		return;
  	}
6a716c90a   Oleg Nesterov   hung_task debuggi...
93
94
  
  	trace_sched_process_hang(t);
e162b39a3   Mandeep Singh Baines   softlockup: decou...
95
96
  	if (!sysctl_hung_task_warnings)
  		return;
270750dbc   Aaron Tomlin   hung_task: Displa...
97
98
99
  
  	if (sysctl_hung_task_warnings > 0)
  		sysctl_hung_task_warnings--;
e162b39a3   Mandeep Singh Baines   softlockup: decou...
100
101
102
103
104
  
  	/*
  	 * Ok, the task did not get scheduled for more than 2 minutes,
  	 * complain:
  	 */
41e85ce82   Oleg Nesterov   hung_task debuggi...
105
106
107
108
109
110
111
112
113
114
115
  	pr_err("INFO: task %s:%d blocked for more than %ld seconds.
  ",
  		t->comm, t->pid, timeout);
  	pr_err("      %s %s %.*s
  ",
  		print_tainted(), init_utsname()->release,
  		(int)strcspn(init_utsname()->version, " "),
  		init_utsname()->version);
  	pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
  		" disables this message.
  ");
e162b39a3   Mandeep Singh Baines   softlockup: decou...
116
  	sched_show_task(t);
f1b499f02   John Kacur   lockdep: Remove _...
117
  	debug_show_held_locks(t);
e162b39a3   Mandeep Singh Baines   softlockup: decou...
118

e162b39a3   Mandeep Singh Baines   softlockup: decou...
119
  	touch_nmi_watchdog();
625056b65   Sasha Levin   hung task debuggi...
120
121
  	if (sysctl_hung_task_panic) {
  		trigger_all_cpu_backtrace();
e162b39a3   Mandeep Singh Baines   softlockup: decou...
122
  		panic("hung_task: blocked tasks");
625056b65   Sasha Levin   hung task debuggi...
123
  	}
e162b39a3   Mandeep Singh Baines   softlockup: decou...
124
125
126
  }
  
  /*
ce9dbe244   Mandeep Singh Baines   softlockup: check...
127
128
129
130
   * To avoid extending the RCU grace period for an unbounded amount of time,
   * periodically exit the critical section and enter a new one.
   *
   * For preemptible RCU it is sufficient to call rcu_read_unlock in order
6a103b0d4   John Kacur   lockup detector: ...
131
   * to exit the grace period. For classic RCU, a reschedule is required.
ce9dbe244   Mandeep Singh Baines   softlockup: check...
132
   */
6027ce497   Oleg Nesterov   hung_task: fix th...
133
  static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
ce9dbe244   Mandeep Singh Baines   softlockup: check...
134
  {
6027ce497   Oleg Nesterov   hung_task: fix th...
135
  	bool can_cont;
ce9dbe244   Mandeep Singh Baines   softlockup: check...
136
137
138
139
140
  	get_task_struct(g);
  	get_task_struct(t);
  	rcu_read_unlock();
  	cond_resched();
  	rcu_read_lock();
6027ce497   Oleg Nesterov   hung_task: fix th...
141
  	can_cont = pid_alive(g) && pid_alive(t);
ce9dbe244   Mandeep Singh Baines   softlockup: check...
142
143
  	put_task_struct(t);
  	put_task_struct(g);
6027ce497   Oleg Nesterov   hung_task: fix th...
144
145
  
  	return can_cont;
ce9dbe244   Mandeep Singh Baines   softlockup: check...
146
147
148
  }
  
  /*
e162b39a3   Mandeep Singh Baines   softlockup: decou...
149
150
151
152
   * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for
   * a really long time (120 seconds). If that happens, print out
   * a warning.
   */
603a148f4   Mandeep Singh Baines   softlockup: fix p...
153
  static void check_hung_uninterruptible_tasks(unsigned long timeout)
e162b39a3   Mandeep Singh Baines   softlockup: decou...
154
155
  {
  	int max_count = sysctl_hung_task_check_count;
ce9dbe244   Mandeep Singh Baines   softlockup: check...
156
  	int batch_count = HUNG_TASK_BATCHING;
e162b39a3   Mandeep Singh Baines   softlockup: decou...
157
158
159
160
161
162
163
164
  	struct task_struct *g, *t;
  
  	/*
  	 * If the system crashed already then all bets are off,
  	 * do not report extra hung tasks:
  	 */
  	if (test_taint(TAINT_DIE) || did_panic)
  		return;
94be52dc0   Mandeep Singh Baines   softlockup: conve...
165
  	rcu_read_lock();
e162b39a3   Mandeep Singh Baines   softlockup: decou...
166
  	do_each_thread(g, t) {
e5af02261   Anton Blanchard   softlockup: Fix h...
167
  		if (!max_count--)
e162b39a3   Mandeep Singh Baines   softlockup: decou...
168
  			goto unlock;
ce9dbe244   Mandeep Singh Baines   softlockup: check...
169
170
  		if (!--batch_count) {
  			batch_count = HUNG_TASK_BATCHING;
6027ce497   Oleg Nesterov   hung_task: fix th...
171
  			if (!rcu_lock_break(g, t))
ce9dbe244   Mandeep Singh Baines   softlockup: check...
172
173
  				goto unlock;
  		}
e162b39a3   Mandeep Singh Baines   softlockup: decou...
174
175
  		/* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
  		if (t->state == TASK_UNINTERRUPTIBLE)
17406b82d   Mandeep Singh Baines   softlockup: remov...
176
  			check_hung_task(t, timeout);
e162b39a3   Mandeep Singh Baines   softlockup: decou...
177
178
  	} while_each_thread(g, t);
   unlock:
94be52dc0   Mandeep Singh Baines   softlockup: conve...
179
  	rcu_read_unlock();
e162b39a3   Mandeep Singh Baines   softlockup: decou...
180
  }
17406b82d   Mandeep Singh Baines   softlockup: remov...
181
  static unsigned long timeout_jiffies(unsigned long timeout)
e162b39a3   Mandeep Singh Baines   softlockup: decou...
182
183
  {
  	/* timeout of 0 will disable the watchdog */
17406b82d   Mandeep Singh Baines   softlockup: remov...
184
  	return timeout ? timeout * HZ : MAX_SCHEDULE_TIMEOUT;
e162b39a3   Mandeep Singh Baines   softlockup: decou...
185
186
187
188
189
190
  }
  
  /*
   * Process updating of timeout sysctl
   */
  int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
191
  				  void __user *buffer,
e162b39a3   Mandeep Singh Baines   softlockup: decou...
192
193
194
  				  size_t *lenp, loff_t *ppos)
  {
  	int ret;
8d65af789   Alexey Dobriyan   sysctl: remove "s...
195
  	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
e162b39a3   Mandeep Singh Baines   softlockup: decou...
196
197
198
  
  	if (ret || !write)
  		goto out;
e162b39a3   Mandeep Singh Baines   softlockup: decou...
199
200
201
202
203
  	wake_up_process(watchdog_task);
  
   out:
  	return ret;
  }
8b414521b   Marcelo Tosatti   hung_task: add me...
204
205
206
207
208
209
210
  static atomic_t reset_hung_task = ATOMIC_INIT(0);
  
  void reset_hung_task_detector(void)
  {
  	atomic_set(&reset_hung_task, 1);
  }
  EXPORT_SYMBOL_GPL(reset_hung_task_detector);
e162b39a3   Mandeep Singh Baines   softlockup: decou...
211
212
213
214
215
216
  /*
   * kthread which checks for tasks stuck in D state
   */
  static int watchdog(void *dummy)
  {
  	set_user_nice(current, 0);
e162b39a3   Mandeep Singh Baines   softlockup: decou...
217
218
  
  	for ( ; ; ) {
17406b82d   Mandeep Singh Baines   softlockup: remov...
219
  		unsigned long timeout = sysctl_hung_task_timeout_secs;
603a148f4   Mandeep Singh Baines   softlockup: fix p...
220

17406b82d   Mandeep Singh Baines   softlockup: remov...
221
222
  		while (schedule_timeout_interruptible(timeout_jiffies(timeout)))
  			timeout = sysctl_hung_task_timeout_secs;
603a148f4   Mandeep Singh Baines   softlockup: fix p...
223

8b414521b   Marcelo Tosatti   hung_task: add me...
224
225
  		if (atomic_xchg(&reset_hung_task, 0))
  			continue;
17406b82d   Mandeep Singh Baines   softlockup: remov...
226
  		check_hung_uninterruptible_tasks(timeout);
e162b39a3   Mandeep Singh Baines   softlockup: decou...
227
228
229
230
231
232
233
234
235
236
237
238
  	}
  
  	return 0;
  }
  
  static int __init hung_task_init(void)
  {
  	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
  	watchdog_task = kthread_run(watchdog, NULL, "khungtaskd");
  
  	return 0;
  }
c96d6660d   Paul Gortmaker   kernel: audit/fix...
239
  subsys_initcall(hung_task_init);