Commit 1b2439dbb703ae8d95a9ce7ece6b7800b80f41f0

Authored by Arjan van de Ven
Committed by Ingo Molnar
1 parent b09c3e3f17

debug: add notifier chain debugging

during some development we suspected a case where we left something
in a notifier chain that was from a module that was unloaded already...
and that sort of thing is rather hard to track down.

This patch adds a very simple sanity check (which isn't all that
expensive) to make sure the notifier we're about to call is
actually from either the kernel itself of from a still-loaded
module, avoiding a hard-to-chase-down crash.

Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 2 changed files with 26 additions and 0 deletions Inline Diff

1 #include <linux/kdebug.h> 1 #include <linux/kdebug.h>
2 #include <linux/kprobes.h> 2 #include <linux/kprobes.h>
3 #include <linux/module.h> 3 #include <linux/module.h>
4 #include <linux/notifier.h> 4 #include <linux/notifier.h>
5 #include <linux/rcupdate.h> 5 #include <linux/rcupdate.h>
6 #include <linux/vmalloc.h> 6 #include <linux/vmalloc.h>
7 #include <linux/reboot.h> 7 #include <linux/reboot.h>
8 8
9 /* 9 /*
10 * Notifier list for kernel code which wants to be called 10 * Notifier list for kernel code which wants to be called
11 * at shutdown. This is used to stop any idling DMA operations 11 * at shutdown. This is used to stop any idling DMA operations
12 * and the like. 12 * and the like.
13 */ 13 */
14 BLOCKING_NOTIFIER_HEAD(reboot_notifier_list); 14 BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
15 15
16 /* 16 /*
17 * Notifier chain core routines. The exported routines below 17 * Notifier chain core routines. The exported routines below
18 * are layered on top of these, with appropriate locking added. 18 * are layered on top of these, with appropriate locking added.
19 */ 19 */
20 20
21 static int notifier_chain_register(struct notifier_block **nl, 21 static int notifier_chain_register(struct notifier_block **nl,
22 struct notifier_block *n) 22 struct notifier_block *n)
23 { 23 {
24 if (!kernel_text_address((unsigned long)n->notifier_call)) {
25 WARN(1, "Invalid notifier registered!");
26 return 0;
27 }
24 while ((*nl) != NULL) { 28 while ((*nl) != NULL) {
25 if (n->priority > (*nl)->priority) 29 if (n->priority > (*nl)->priority)
26 break; 30 break;
27 nl = &((*nl)->next); 31 nl = &((*nl)->next);
28 } 32 }
29 n->next = *nl; 33 n->next = *nl;
30 rcu_assign_pointer(*nl, n); 34 rcu_assign_pointer(*nl, n);
31 return 0; 35 return 0;
32 } 36 }
33 37
34 static int notifier_chain_cond_register(struct notifier_block **nl, 38 static int notifier_chain_cond_register(struct notifier_block **nl,
35 struct notifier_block *n) 39 struct notifier_block *n)
36 { 40 {
41 if (!kernel_text_address((unsigned long)n->notifier_call)) {
42 WARN(1, "Invalid notifier registered!");
43 return 0;
44 }
37 while ((*nl) != NULL) { 45 while ((*nl) != NULL) {
38 if ((*nl) == n) 46 if ((*nl) == n)
39 return 0; 47 return 0;
40 if (n->priority > (*nl)->priority) 48 if (n->priority > (*nl)->priority)
41 break; 49 break;
42 nl = &((*nl)->next); 50 nl = &((*nl)->next);
43 } 51 }
44 n->next = *nl; 52 n->next = *nl;
45 rcu_assign_pointer(*nl, n); 53 rcu_assign_pointer(*nl, n);
46 return 0; 54 return 0;
47 } 55 }
48 56
49 static int notifier_chain_unregister(struct notifier_block **nl, 57 static int notifier_chain_unregister(struct notifier_block **nl,
50 struct notifier_block *n) 58 struct notifier_block *n)
51 { 59 {
52 while ((*nl) != NULL) { 60 while ((*nl) != NULL) {
53 if ((*nl) == n) { 61 if ((*nl) == n) {
54 rcu_assign_pointer(*nl, n->next); 62 rcu_assign_pointer(*nl, n->next);
55 return 0; 63 return 0;
56 } 64 }
57 nl = &((*nl)->next); 65 nl = &((*nl)->next);
58 } 66 }
59 return -ENOENT; 67 return -ENOENT;
60 } 68 }
61 69
62 /** 70 /**
63 * notifier_call_chain - Informs the registered notifiers about an event. 71 * notifier_call_chain - Informs the registered notifiers about an event.
64 * @nl: Pointer to head of the blocking notifier chain 72 * @nl: Pointer to head of the blocking notifier chain
65 * @val: Value passed unmodified to notifier function 73 * @val: Value passed unmodified to notifier function
66 * @v: Pointer passed unmodified to notifier function 74 * @v: Pointer passed unmodified to notifier function
67 * @nr_to_call: Number of notifier functions to be called. Don't care 75 * @nr_to_call: Number of notifier functions to be called. Don't care
68 * value of this parameter is -1. 76 * value of this parameter is -1.
69 * @nr_calls: Records the number of notifications sent. Don't care 77 * @nr_calls: Records the number of notifications sent. Don't care
70 * value of this field is NULL. 78 * value of this field is NULL.
71 * @returns: notifier_call_chain returns the value returned by the 79 * @returns: notifier_call_chain returns the value returned by the
72 * last notifier function called. 80 * last notifier function called.
73 */ 81 */
74 static int __kprobes notifier_call_chain(struct notifier_block **nl, 82 static int __kprobes notifier_call_chain(struct notifier_block **nl,
75 unsigned long val, void *v, 83 unsigned long val, void *v,
76 int nr_to_call, int *nr_calls) 84 int nr_to_call, int *nr_calls)
77 { 85 {
78 int ret = NOTIFY_DONE; 86 int ret = NOTIFY_DONE;
79 struct notifier_block *nb, *next_nb; 87 struct notifier_block *nb, *next_nb;
80 88
81 nb = rcu_dereference(*nl); 89 nb = rcu_dereference(*nl);
82 90
83 while (nb && nr_to_call) { 91 while (nb && nr_to_call) {
84 next_nb = rcu_dereference(nb->next); 92 next_nb = rcu_dereference(nb->next);
93
94 #ifdef CONFIG_DEBUG_NOTIFIERS
95 if (!kernel_text_address((unsigned long)nb->notifier_call)) {
96 WARN(1, "Invalid notifier called!");
97 nb = next_nb;
98 continue;
99 }
100 #endif
85 ret = nb->notifier_call(nb, val, v); 101 ret = nb->notifier_call(nb, val, v);
86 102
87 if (nr_calls) 103 if (nr_calls)
88 (*nr_calls)++; 104 (*nr_calls)++;
89 105
90 if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) 106 if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
91 break; 107 break;
92 nb = next_nb; 108 nb = next_nb;
93 nr_to_call--; 109 nr_to_call--;
94 } 110 }
95 return ret; 111 return ret;
96 } 112 }
97 113
98 /* 114 /*
99 * Atomic notifier chain routines. Registration and unregistration 115 * Atomic notifier chain routines. Registration and unregistration
100 * use a spinlock, and call_chain is synchronized by RCU (no locks). 116 * use a spinlock, and call_chain is synchronized by RCU (no locks).
101 */ 117 */
102 118
103 /** 119 /**
104 * atomic_notifier_chain_register - Add notifier to an atomic notifier chain 120 * atomic_notifier_chain_register - Add notifier to an atomic notifier chain
105 * @nh: Pointer to head of the atomic notifier chain 121 * @nh: Pointer to head of the atomic notifier chain
106 * @n: New entry in notifier chain 122 * @n: New entry in notifier chain
107 * 123 *
108 * Adds a notifier to an atomic notifier chain. 124 * Adds a notifier to an atomic notifier chain.
109 * 125 *
110 * Currently always returns zero. 126 * Currently always returns zero.
111 */ 127 */
112 int atomic_notifier_chain_register(struct atomic_notifier_head *nh, 128 int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
113 struct notifier_block *n) 129 struct notifier_block *n)
114 { 130 {
115 unsigned long flags; 131 unsigned long flags;
116 int ret; 132 int ret;
117 133
118 spin_lock_irqsave(&nh->lock, flags); 134 spin_lock_irqsave(&nh->lock, flags);
119 ret = notifier_chain_register(&nh->head, n); 135 ret = notifier_chain_register(&nh->head, n);
120 spin_unlock_irqrestore(&nh->lock, flags); 136 spin_unlock_irqrestore(&nh->lock, flags);
121 return ret; 137 return ret;
122 } 138 }
123 EXPORT_SYMBOL_GPL(atomic_notifier_chain_register); 139 EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
124 140
125 /** 141 /**
126 * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain 142 * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
127 * @nh: Pointer to head of the atomic notifier chain 143 * @nh: Pointer to head of the atomic notifier chain
128 * @n: Entry to remove from notifier chain 144 * @n: Entry to remove from notifier chain
129 * 145 *
130 * Removes a notifier from an atomic notifier chain. 146 * Removes a notifier from an atomic notifier chain.
131 * 147 *
132 * Returns zero on success or %-ENOENT on failure. 148 * Returns zero on success or %-ENOENT on failure.
133 */ 149 */
134 int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, 150 int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
135 struct notifier_block *n) 151 struct notifier_block *n)
136 { 152 {
137 unsigned long flags; 153 unsigned long flags;
138 int ret; 154 int ret;
139 155
140 spin_lock_irqsave(&nh->lock, flags); 156 spin_lock_irqsave(&nh->lock, flags);
141 ret = notifier_chain_unregister(&nh->head, n); 157 ret = notifier_chain_unregister(&nh->head, n);
142 spin_unlock_irqrestore(&nh->lock, flags); 158 spin_unlock_irqrestore(&nh->lock, flags);
143 synchronize_rcu(); 159 synchronize_rcu();
144 return ret; 160 return ret;
145 } 161 }
146 EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); 162 EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
147 163
148 /** 164 /**
149 * __atomic_notifier_call_chain - Call functions in an atomic notifier chain 165 * __atomic_notifier_call_chain - Call functions in an atomic notifier chain
150 * @nh: Pointer to head of the atomic notifier chain 166 * @nh: Pointer to head of the atomic notifier chain
151 * @val: Value passed unmodified to notifier function 167 * @val: Value passed unmodified to notifier function
152 * @v: Pointer passed unmodified to notifier function 168 * @v: Pointer passed unmodified to notifier function
153 * @nr_to_call: See the comment for notifier_call_chain. 169 * @nr_to_call: See the comment for notifier_call_chain.
154 * @nr_calls: See the comment for notifier_call_chain. 170 * @nr_calls: See the comment for notifier_call_chain.
155 * 171 *
156 * Calls each function in a notifier chain in turn. The functions 172 * Calls each function in a notifier chain in turn. The functions
157 * run in an atomic context, so they must not block. 173 * run in an atomic context, so they must not block.
158 * This routine uses RCU to synchronize with changes to the chain. 174 * This routine uses RCU to synchronize with changes to the chain.
159 * 175 *
160 * If the return value of the notifier can be and'ed 176 * If the return value of the notifier can be and'ed
161 * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain() 177 * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain()
162 * will return immediately, with the return value of 178 * will return immediately, with the return value of
163 * the notifier function which halted execution. 179 * the notifier function which halted execution.
164 * Otherwise the return value is the return value 180 * Otherwise the return value is the return value
165 * of the last notifier function called. 181 * of the last notifier function called.
166 */ 182 */
167 int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh, 183 int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
168 unsigned long val, void *v, 184 unsigned long val, void *v,
169 int nr_to_call, int *nr_calls) 185 int nr_to_call, int *nr_calls)
170 { 186 {
171 int ret; 187 int ret;
172 188
173 rcu_read_lock(); 189 rcu_read_lock();
174 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); 190 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
175 rcu_read_unlock(); 191 rcu_read_unlock();
176 return ret; 192 return ret;
177 } 193 }
178 EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain); 194 EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain);
179 195
180 int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh, 196 int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh,
181 unsigned long val, void *v) 197 unsigned long val, void *v)
182 { 198 {
183 return __atomic_notifier_call_chain(nh, val, v, -1, NULL); 199 return __atomic_notifier_call_chain(nh, val, v, -1, NULL);
184 } 200 }
185 EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); 201 EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
186 202
187 /* 203 /*
188 * Blocking notifier chain routines. All access to the chain is 204 * Blocking notifier chain routines. All access to the chain is
189 * synchronized by an rwsem. 205 * synchronized by an rwsem.
190 */ 206 */
191 207
192 /** 208 /**
193 * blocking_notifier_chain_register - Add notifier to a blocking notifier chain 209 * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
194 * @nh: Pointer to head of the blocking notifier chain 210 * @nh: Pointer to head of the blocking notifier chain
195 * @n: New entry in notifier chain 211 * @n: New entry in notifier chain
196 * 212 *
197 * Adds a notifier to a blocking notifier chain. 213 * Adds a notifier to a blocking notifier chain.
198 * Must be called in process context. 214 * Must be called in process context.
199 * 215 *
200 * Currently always returns zero. 216 * Currently always returns zero.
201 */ 217 */
202 int blocking_notifier_chain_register(struct blocking_notifier_head *nh, 218 int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
203 struct notifier_block *n) 219 struct notifier_block *n)
204 { 220 {
205 int ret; 221 int ret;
206 222
207 /* 223 /*
208 * This code gets used during boot-up, when task switching is 224 * This code gets used during boot-up, when task switching is
209 * not yet working and interrupts must remain disabled. At 225 * not yet working and interrupts must remain disabled. At
210 * such times we must not call down_write(). 226 * such times we must not call down_write().
211 */ 227 */
212 if (unlikely(system_state == SYSTEM_BOOTING)) 228 if (unlikely(system_state == SYSTEM_BOOTING))
213 return notifier_chain_register(&nh->head, n); 229 return notifier_chain_register(&nh->head, n);
214 230
215 down_write(&nh->rwsem); 231 down_write(&nh->rwsem);
216 ret = notifier_chain_register(&nh->head, n); 232 ret = notifier_chain_register(&nh->head, n);
217 up_write(&nh->rwsem); 233 up_write(&nh->rwsem);
218 return ret; 234 return ret;
219 } 235 }
220 EXPORT_SYMBOL_GPL(blocking_notifier_chain_register); 236 EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
221 237
222 /** 238 /**
223 * blocking_notifier_chain_cond_register - Cond add notifier to a blocking notifier chain 239 * blocking_notifier_chain_cond_register - Cond add notifier to a blocking notifier chain
224 * @nh: Pointer to head of the blocking notifier chain 240 * @nh: Pointer to head of the blocking notifier chain
225 * @n: New entry in notifier chain 241 * @n: New entry in notifier chain
226 * 242 *
227 * Adds a notifier to a blocking notifier chain, only if not already 243 * Adds a notifier to a blocking notifier chain, only if not already
228 * present in the chain. 244 * present in the chain.
229 * Must be called in process context. 245 * Must be called in process context.
230 * 246 *
231 * Currently always returns zero. 247 * Currently always returns zero.
232 */ 248 */
233 int blocking_notifier_chain_cond_register(struct blocking_notifier_head *nh, 249 int blocking_notifier_chain_cond_register(struct blocking_notifier_head *nh,
234 struct notifier_block *n) 250 struct notifier_block *n)
235 { 251 {
236 int ret; 252 int ret;
237 253
238 down_write(&nh->rwsem); 254 down_write(&nh->rwsem);
239 ret = notifier_chain_cond_register(&nh->head, n); 255 ret = notifier_chain_cond_register(&nh->head, n);
240 up_write(&nh->rwsem); 256 up_write(&nh->rwsem);
241 return ret; 257 return ret;
242 } 258 }
243 EXPORT_SYMBOL_GPL(blocking_notifier_chain_cond_register); 259 EXPORT_SYMBOL_GPL(blocking_notifier_chain_cond_register);
244 260
245 /** 261 /**
246 * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain 262 * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
247 * @nh: Pointer to head of the blocking notifier chain 263 * @nh: Pointer to head of the blocking notifier chain
248 * @n: Entry to remove from notifier chain 264 * @n: Entry to remove from notifier chain
249 * 265 *
250 * Removes a notifier from a blocking notifier chain. 266 * Removes a notifier from a blocking notifier chain.
251 * Must be called from process context. 267 * Must be called from process context.
252 * 268 *
253 * Returns zero on success or %-ENOENT on failure. 269 * Returns zero on success or %-ENOENT on failure.
254 */ 270 */
255 int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, 271 int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
256 struct notifier_block *n) 272 struct notifier_block *n)
257 { 273 {
258 int ret; 274 int ret;
259 275
260 /* 276 /*
261 * This code gets used during boot-up, when task switching is 277 * This code gets used during boot-up, when task switching is
262 * not yet working and interrupts must remain disabled. At 278 * not yet working and interrupts must remain disabled. At
263 * such times we must not call down_write(). 279 * such times we must not call down_write().
264 */ 280 */
265 if (unlikely(system_state == SYSTEM_BOOTING)) 281 if (unlikely(system_state == SYSTEM_BOOTING))
266 return notifier_chain_unregister(&nh->head, n); 282 return notifier_chain_unregister(&nh->head, n);
267 283
268 down_write(&nh->rwsem); 284 down_write(&nh->rwsem);
269 ret = notifier_chain_unregister(&nh->head, n); 285 ret = notifier_chain_unregister(&nh->head, n);
270 up_write(&nh->rwsem); 286 up_write(&nh->rwsem);
271 return ret; 287 return ret;
272 } 288 }
273 EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); 289 EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
274 290
275 /** 291 /**
276 * __blocking_notifier_call_chain - Call functions in a blocking notifier chain 292 * __blocking_notifier_call_chain - Call functions in a blocking notifier chain
277 * @nh: Pointer to head of the blocking notifier chain 293 * @nh: Pointer to head of the blocking notifier chain
278 * @val: Value passed unmodified to notifier function 294 * @val: Value passed unmodified to notifier function
279 * @v: Pointer passed unmodified to notifier function 295 * @v: Pointer passed unmodified to notifier function
280 * @nr_to_call: See comment for notifier_call_chain. 296 * @nr_to_call: See comment for notifier_call_chain.
281 * @nr_calls: See comment for notifier_call_chain. 297 * @nr_calls: See comment for notifier_call_chain.
282 * 298 *
283 * Calls each function in a notifier chain in turn. The functions 299 * Calls each function in a notifier chain in turn. The functions
284 * run in a process context, so they are allowed to block. 300 * run in a process context, so they are allowed to block.
285 * 301 *
286 * If the return value of the notifier can be and'ed 302 * If the return value of the notifier can be and'ed
287 * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain() 303 * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain()
288 * will return immediately, with the return value of 304 * will return immediately, with the return value of
289 * the notifier function which halted execution. 305 * the notifier function which halted execution.
290 * Otherwise the return value is the return value 306 * Otherwise the return value is the return value
291 * of the last notifier function called. 307 * of the last notifier function called.
292 */ 308 */
293 int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, 309 int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
294 unsigned long val, void *v, 310 unsigned long val, void *v,
295 int nr_to_call, int *nr_calls) 311 int nr_to_call, int *nr_calls)
296 { 312 {
297 int ret = NOTIFY_DONE; 313 int ret = NOTIFY_DONE;
298 314
299 /* 315 /*
300 * We check the head outside the lock, but if this access is 316 * We check the head outside the lock, but if this access is
301 * racy then it does not matter what the result of the test 317 * racy then it does not matter what the result of the test
302 * is, we re-check the list after having taken the lock anyway: 318 * is, we re-check the list after having taken the lock anyway:
303 */ 319 */
304 if (rcu_dereference(nh->head)) { 320 if (rcu_dereference(nh->head)) {
305 down_read(&nh->rwsem); 321 down_read(&nh->rwsem);
306 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, 322 ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
307 nr_calls); 323 nr_calls);
308 up_read(&nh->rwsem); 324 up_read(&nh->rwsem);
309 } 325 }
310 return ret; 326 return ret;
311 } 327 }
312 EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain); 328 EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain);
313 329
314 int blocking_notifier_call_chain(struct blocking_notifier_head *nh, 330 int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
315 unsigned long val, void *v) 331 unsigned long val, void *v)
316 { 332 {
317 return __blocking_notifier_call_chain(nh, val, v, -1, NULL); 333 return __blocking_notifier_call_chain(nh, val, v, -1, NULL);
318 } 334 }
319 EXPORT_SYMBOL_GPL(blocking_notifier_call_chain); 335 EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
320 336
321 /* 337 /*
322 * Raw notifier chain routines. There is no protection; 338 * Raw notifier chain routines. There is no protection;
323 * the caller must provide it. Use at your own risk! 339 * the caller must provide it. Use at your own risk!
324 */ 340 */
325 341
326 /** 342 /**
327 * raw_notifier_chain_register - Add notifier to a raw notifier chain 343 * raw_notifier_chain_register - Add notifier to a raw notifier chain
328 * @nh: Pointer to head of the raw notifier chain 344 * @nh: Pointer to head of the raw notifier chain
329 * @n: New entry in notifier chain 345 * @n: New entry in notifier chain
330 * 346 *
331 * Adds a notifier to a raw notifier chain. 347 * Adds a notifier to a raw notifier chain.
332 * All locking must be provided by the caller. 348 * All locking must be provided by the caller.
333 * 349 *
334 * Currently always returns zero. 350 * Currently always returns zero.
335 */ 351 */
336 int raw_notifier_chain_register(struct raw_notifier_head *nh, 352 int raw_notifier_chain_register(struct raw_notifier_head *nh,
337 struct notifier_block *n) 353 struct notifier_block *n)
338 { 354 {
339 return notifier_chain_register(&nh->head, n); 355 return notifier_chain_register(&nh->head, n);
340 } 356 }
341 EXPORT_SYMBOL_GPL(raw_notifier_chain_register); 357 EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
342 358
343 /** 359 /**
344 * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain 360 * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
345 * @nh: Pointer to head of the raw notifier chain 361 * @nh: Pointer to head of the raw notifier chain
346 * @n: Entry to remove from notifier chain 362 * @n: Entry to remove from notifier chain
347 * 363 *
348 * Removes a notifier from a raw notifier chain. 364 * Removes a notifier from a raw notifier chain.
349 * All locking must be provided by the caller. 365 * All locking must be provided by the caller.
350 * 366 *
351 * Returns zero on success or %-ENOENT on failure. 367 * Returns zero on success or %-ENOENT on failure.
352 */ 368 */
353 int raw_notifier_chain_unregister(struct raw_notifier_head *nh, 369 int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
354 struct notifier_block *n) 370 struct notifier_block *n)
355 { 371 {
356 return notifier_chain_unregister(&nh->head, n); 372 return notifier_chain_unregister(&nh->head, n);
357 } 373 }
358 EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); 374 EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
359 375
360 /** 376 /**
361 * __raw_notifier_call_chain - Call functions in a raw notifier chain 377 * __raw_notifier_call_chain - Call functions in a raw notifier chain
362 * @nh: Pointer to head of the raw notifier chain 378 * @nh: Pointer to head of the raw notifier chain
363 * @val: Value passed unmodified to notifier function 379 * @val: Value passed unmodified to notifier function
364 * @v: Pointer passed unmodified to notifier function 380 * @v: Pointer passed unmodified to notifier function
365 * @nr_to_call: See comment for notifier_call_chain. 381 * @nr_to_call: See comment for notifier_call_chain.
366 * @nr_calls: See comment for notifier_call_chain 382 * @nr_calls: See comment for notifier_call_chain
367 * 383 *
368 * Calls each function in a notifier chain in turn. The functions 384 * Calls each function in a notifier chain in turn. The functions
369 * run in an undefined context. 385 * run in an undefined context.
370 * All locking must be provided by the caller. 386 * All locking must be provided by the caller.
371 * 387 *
372 * If the return value of the notifier can be and'ed 388 * If the return value of the notifier can be and'ed
373 * with %NOTIFY_STOP_MASK then raw_notifier_call_chain() 389 * with %NOTIFY_STOP_MASK then raw_notifier_call_chain()
374 * will return immediately, with the return value of 390 * will return immediately, with the return value of
375 * the notifier function which halted execution. 391 * the notifier function which halted execution.
376 * Otherwise the return value is the return value 392 * Otherwise the return value is the return value
377 * of the last notifier function called. 393 * of the last notifier function called.
378 */ 394 */
379 int __raw_notifier_call_chain(struct raw_notifier_head *nh, 395 int __raw_notifier_call_chain(struct raw_notifier_head *nh,
380 unsigned long val, void *v, 396 unsigned long val, void *v,
381 int nr_to_call, int *nr_calls) 397 int nr_to_call, int *nr_calls)
382 { 398 {
383 return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); 399 return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
384 } 400 }
385 EXPORT_SYMBOL_GPL(__raw_notifier_call_chain); 401 EXPORT_SYMBOL_GPL(__raw_notifier_call_chain);
386 402
387 int raw_notifier_call_chain(struct raw_notifier_head *nh, 403 int raw_notifier_call_chain(struct raw_notifier_head *nh,
388 unsigned long val, void *v) 404 unsigned long val, void *v)
389 { 405 {
390 return __raw_notifier_call_chain(nh, val, v, -1, NULL); 406 return __raw_notifier_call_chain(nh, val, v, -1, NULL);
391 } 407 }
392 EXPORT_SYMBOL_GPL(raw_notifier_call_chain); 408 EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
393 409
394 /* 410 /*
395 * SRCU notifier chain routines. Registration and unregistration 411 * SRCU notifier chain routines. Registration and unregistration
396 * use a mutex, and call_chain is synchronized by SRCU (no locks). 412 * use a mutex, and call_chain is synchronized by SRCU (no locks).
397 */ 413 */
398 414
399 /** 415 /**
400 * srcu_notifier_chain_register - Add notifier to an SRCU notifier chain 416 * srcu_notifier_chain_register - Add notifier to an SRCU notifier chain
401 * @nh: Pointer to head of the SRCU notifier chain 417 * @nh: Pointer to head of the SRCU notifier chain
402 * @n: New entry in notifier chain 418 * @n: New entry in notifier chain
403 * 419 *
404 * Adds a notifier to an SRCU notifier chain. 420 * Adds a notifier to an SRCU notifier chain.
405 * Must be called in process context. 421 * Must be called in process context.
406 * 422 *
407 * Currently always returns zero. 423 * Currently always returns zero.
408 */ 424 */
409 int srcu_notifier_chain_register(struct srcu_notifier_head *nh, 425 int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
410 struct notifier_block *n) 426 struct notifier_block *n)
411 { 427 {
412 int ret; 428 int ret;
413 429
414 /* 430 /*
415 * This code gets used during boot-up, when task switching is 431 * This code gets used during boot-up, when task switching is
416 * not yet working and interrupts must remain disabled. At 432 * not yet working and interrupts must remain disabled. At
417 * such times we must not call mutex_lock(). 433 * such times we must not call mutex_lock().
418 */ 434 */
419 if (unlikely(system_state == SYSTEM_BOOTING)) 435 if (unlikely(system_state == SYSTEM_BOOTING))
420 return notifier_chain_register(&nh->head, n); 436 return notifier_chain_register(&nh->head, n);
421 437
422 mutex_lock(&nh->mutex); 438 mutex_lock(&nh->mutex);
423 ret = notifier_chain_register(&nh->head, n); 439 ret = notifier_chain_register(&nh->head, n);
424 mutex_unlock(&nh->mutex); 440 mutex_unlock(&nh->mutex);
425 return ret; 441 return ret;
426 } 442 }
427 EXPORT_SYMBOL_GPL(srcu_notifier_chain_register); 443 EXPORT_SYMBOL_GPL(srcu_notifier_chain_register);
428 444
429 /** 445 /**
430 * srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain 446 * srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain
431 * @nh: Pointer to head of the SRCU notifier chain 447 * @nh: Pointer to head of the SRCU notifier chain
432 * @n: Entry to remove from notifier chain 448 * @n: Entry to remove from notifier chain
433 * 449 *
434 * Removes a notifier from an SRCU notifier chain. 450 * Removes a notifier from an SRCU notifier chain.
435 * Must be called from process context. 451 * Must be called from process context.
436 * 452 *
437 * Returns zero on success or %-ENOENT on failure. 453 * Returns zero on success or %-ENOENT on failure.
438 */ 454 */
439 int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, 455 int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
440 struct notifier_block *n) 456 struct notifier_block *n)
441 { 457 {
442 int ret; 458 int ret;
443 459
444 /* 460 /*
445 * This code gets used during boot-up, when task switching is 461 * This code gets used during boot-up, when task switching is
446 * not yet working and interrupts must remain disabled. At 462 * not yet working and interrupts must remain disabled. At
447 * such times we must not call mutex_lock(). 463 * such times we must not call mutex_lock().
448 */ 464 */
449 if (unlikely(system_state == SYSTEM_BOOTING)) 465 if (unlikely(system_state == SYSTEM_BOOTING))
450 return notifier_chain_unregister(&nh->head, n); 466 return notifier_chain_unregister(&nh->head, n);
451 467
452 mutex_lock(&nh->mutex); 468 mutex_lock(&nh->mutex);
453 ret = notifier_chain_unregister(&nh->head, n); 469 ret = notifier_chain_unregister(&nh->head, n);
454 mutex_unlock(&nh->mutex); 470 mutex_unlock(&nh->mutex);
455 synchronize_srcu(&nh->srcu); 471 synchronize_srcu(&nh->srcu);
456 return ret; 472 return ret;
457 } 473 }
458 EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister); 474 EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
459 475
460 /** 476 /**
461 * __srcu_notifier_call_chain - Call functions in an SRCU notifier chain 477 * __srcu_notifier_call_chain - Call functions in an SRCU notifier chain
462 * @nh: Pointer to head of the SRCU notifier chain 478 * @nh: Pointer to head of the SRCU notifier chain
463 * @val: Value passed unmodified to notifier function 479 * @val: Value passed unmodified to notifier function
464 * @v: Pointer passed unmodified to notifier function 480 * @v: Pointer passed unmodified to notifier function
465 * @nr_to_call: See comment for notifier_call_chain. 481 * @nr_to_call: See comment for notifier_call_chain.
466 * @nr_calls: See comment for notifier_call_chain 482 * @nr_calls: See comment for notifier_call_chain
467 * 483 *
468 * Calls each function in a notifier chain in turn. The functions 484 * Calls each function in a notifier chain in turn. The functions
469 * run in a process context, so they are allowed to block. 485 * run in a process context, so they are allowed to block.
470 * 486 *
471 * If the return value of the notifier can be and'ed 487 * If the return value of the notifier can be and'ed
472 * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain() 488 * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain()
473 * will return immediately, with the return value of 489 * will return immediately, with the return value of
474 * the notifier function which halted execution. 490 * the notifier function which halted execution.
475 * Otherwise the return value is the return value 491 * Otherwise the return value is the return value
476 * of the last notifier function called. 492 * of the last notifier function called.
477 */ 493 */
478 int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, 494 int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
479 unsigned long val, void *v, 495 unsigned long val, void *v,
480 int nr_to_call, int *nr_calls) 496 int nr_to_call, int *nr_calls)
481 { 497 {
482 int ret; 498 int ret;
483 int idx; 499 int idx;
484 500
485 idx = srcu_read_lock(&nh->srcu); 501 idx = srcu_read_lock(&nh->srcu);
486 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); 502 ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
487 srcu_read_unlock(&nh->srcu, idx); 503 srcu_read_unlock(&nh->srcu, idx);
488 return ret; 504 return ret;
489 } 505 }
490 EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain); 506 EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain);
491 507
492 int srcu_notifier_call_chain(struct srcu_notifier_head *nh, 508 int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
493 unsigned long val, void *v) 509 unsigned long val, void *v)
494 { 510 {
495 return __srcu_notifier_call_chain(nh, val, v, -1, NULL); 511 return __srcu_notifier_call_chain(nh, val, v, -1, NULL);
496 } 512 }
497 EXPORT_SYMBOL_GPL(srcu_notifier_call_chain); 513 EXPORT_SYMBOL_GPL(srcu_notifier_call_chain);
498 514
499 /** 515 /**
500 * srcu_init_notifier_head - Initialize an SRCU notifier head 516 * srcu_init_notifier_head - Initialize an SRCU notifier head
501 * @nh: Pointer to head of the srcu notifier chain 517 * @nh: Pointer to head of the srcu notifier chain
502 * 518 *
503 * Unlike other sorts of notifier heads, SRCU notifier heads require 519 * Unlike other sorts of notifier heads, SRCU notifier heads require
504 * dynamic initialization. Be sure to call this routine before 520 * dynamic initialization. Be sure to call this routine before
505 * calling any of the other SRCU notifier routines for this head. 521 * calling any of the other SRCU notifier routines for this head.
506 * 522 *
507 * If an SRCU notifier head is deallocated, it must first be cleaned 523 * If an SRCU notifier head is deallocated, it must first be cleaned
508 * up by calling srcu_cleanup_notifier_head(). Otherwise the head's 524 * up by calling srcu_cleanup_notifier_head(). Otherwise the head's
509 * per-cpu data (used by the SRCU mechanism) will leak. 525 * per-cpu data (used by the SRCU mechanism) will leak.
510 */ 526 */
511 void srcu_init_notifier_head(struct srcu_notifier_head *nh) 527 void srcu_init_notifier_head(struct srcu_notifier_head *nh)
512 { 528 {
513 mutex_init(&nh->mutex); 529 mutex_init(&nh->mutex);
514 if (init_srcu_struct(&nh->srcu) < 0) 530 if (init_srcu_struct(&nh->srcu) < 0)
515 BUG(); 531 BUG();
516 nh->head = NULL; 532 nh->head = NULL;
517 } 533 }
518 EXPORT_SYMBOL_GPL(srcu_init_notifier_head); 534 EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
519 535
520 /** 536 /**
521 * register_reboot_notifier - Register function to be called at reboot time 537 * register_reboot_notifier - Register function to be called at reboot time
522 * @nb: Info about notifier function to be called 538 * @nb: Info about notifier function to be called
523 * 539 *
524 * Registers a function with the list of functions 540 * Registers a function with the list of functions
525 * to be called at reboot time. 541 * to be called at reboot time.
526 * 542 *
527 * Currently always returns zero, as blocking_notifier_chain_register() 543 * Currently always returns zero, as blocking_notifier_chain_register()
528 * always returns zero. 544 * always returns zero.
529 */ 545 */
530 int register_reboot_notifier(struct notifier_block *nb) 546 int register_reboot_notifier(struct notifier_block *nb)
531 { 547 {
532 return blocking_notifier_chain_register(&reboot_notifier_list, nb); 548 return blocking_notifier_chain_register(&reboot_notifier_list, nb);
533 } 549 }
534 EXPORT_SYMBOL(register_reboot_notifier); 550 EXPORT_SYMBOL(register_reboot_notifier);
535 551
536 /** 552 /**
537 * unregister_reboot_notifier - Unregister previously registered reboot notifier 553 * unregister_reboot_notifier - Unregister previously registered reboot notifier
538 * @nb: Hook to be unregistered 554 * @nb: Hook to be unregistered
539 * 555 *
540 * Unregisters a previously registered reboot 556 * Unregisters a previously registered reboot
541 * notifier function. 557 * notifier function.
542 * 558 *
543 * Returns zero on success, or %-ENOENT on failure. 559 * Returns zero on success, or %-ENOENT on failure.
544 */ 560 */
545 int unregister_reboot_notifier(struct notifier_block *nb) 561 int unregister_reboot_notifier(struct notifier_block *nb)
546 { 562 {
547 return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); 563 return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
548 } 564 }
549 EXPORT_SYMBOL(unregister_reboot_notifier); 565 EXPORT_SYMBOL(unregister_reboot_notifier);
550 566
551 static ATOMIC_NOTIFIER_HEAD(die_chain); 567 static ATOMIC_NOTIFIER_HEAD(die_chain);
552 568
553 int notify_die(enum die_val val, const char *str, 569 int notify_die(enum die_val val, const char *str,
554 struct pt_regs *regs, long err, int trap, int sig) 570 struct pt_regs *regs, long err, int trap, int sig)
555 { 571 {
556 struct die_args args = { 572 struct die_args args = {
557 .regs = regs, 573 .regs = regs,
558 .str = str, 574 .str = str,
559 .err = err, 575 .err = err,
560 .trapnr = trap, 576 .trapnr = trap,
561 .signr = sig, 577 .signr = sig,
562 578
563 }; 579 };
564 return atomic_notifier_call_chain(&die_chain, val, &args); 580 return atomic_notifier_call_chain(&die_chain, val, &args);
565 } 581 }
566 582
567 int register_die_notifier(struct notifier_block *nb) 583 int register_die_notifier(struct notifier_block *nb)
568 { 584 {
569 vmalloc_sync_all(); 585 vmalloc_sync_all();
570 return atomic_notifier_chain_register(&die_chain, nb); 586 return atomic_notifier_chain_register(&die_chain, nb);
571 } 587 }
572 EXPORT_SYMBOL_GPL(register_die_notifier); 588 EXPORT_SYMBOL_GPL(register_die_notifier);
573 589
574 int unregister_die_notifier(struct notifier_block *nb) 590 int unregister_die_notifier(struct notifier_block *nb)
575 { 591 {
576 return atomic_notifier_chain_unregister(&die_chain, nb); 592 return atomic_notifier_chain_unregister(&die_chain, nb);
577 } 593 }
578 EXPORT_SYMBOL_GPL(unregister_die_notifier); 594 EXPORT_SYMBOL_GPL(unregister_die_notifier);
579 595
1 1
2 config PRINTK_TIME 2 config PRINTK_TIME
3 bool "Show timing information on printks" 3 bool "Show timing information on printks"
4 depends on PRINTK 4 depends on PRINTK
5 help 5 help
6 Selecting this option causes timing information to be 6 Selecting this option causes timing information to be
7 included in printk output. This allows you to measure 7 included in printk output. This allows you to measure
8 the interval between kernel operations, including bootup 8 the interval between kernel operations, including bootup
9 operations. This is useful for identifying long delays 9 operations. This is useful for identifying long delays
10 in kernel startup. 10 in kernel startup.
11 11
12 config ENABLE_WARN_DEPRECATED 12 config ENABLE_WARN_DEPRECATED
13 bool "Enable __deprecated logic" 13 bool "Enable __deprecated logic"
14 default y 14 default y
15 help 15 help
16 Enable the __deprecated logic in the kernel build. 16 Enable the __deprecated logic in the kernel build.
17 Disable this to suppress the "warning: 'foo' is deprecated 17 Disable this to suppress the "warning: 'foo' is deprecated
18 (declared at kernel/power/somefile.c:1234)" messages. 18 (declared at kernel/power/somefile.c:1234)" messages.
19 19
20 config ENABLE_MUST_CHECK 20 config ENABLE_MUST_CHECK
21 bool "Enable __must_check logic" 21 bool "Enable __must_check logic"
22 default y 22 default y
23 help 23 help
24 Enable the __must_check logic in the kernel build. Disable this to 24 Enable the __must_check logic in the kernel build. Disable this to
25 suppress the "warning: ignoring return value of 'foo', declared with 25 suppress the "warning: ignoring return value of 'foo', declared with
26 attribute warn_unused_result" messages. 26 attribute warn_unused_result" messages.
27 27
28 config FRAME_WARN 28 config FRAME_WARN
29 int "Warn for stack frames larger than (needs gcc 4.4)" 29 int "Warn for stack frames larger than (needs gcc 4.4)"
30 range 0 8192 30 range 0 8192
31 default 1024 if !64BIT 31 default 1024 if !64BIT
32 default 2048 if 64BIT 32 default 2048 if 64BIT
33 help 33 help
34 Tell gcc to warn at build time for stack frames larger than this. 34 Tell gcc to warn at build time for stack frames larger than this.
35 Setting this too low will cause a lot of warnings. 35 Setting this too low will cause a lot of warnings.
36 Setting it to 0 disables the warning. 36 Setting it to 0 disables the warning.
37 Requires gcc 4.4 37 Requires gcc 4.4
38 38
39 config MAGIC_SYSRQ 39 config MAGIC_SYSRQ
40 bool "Magic SysRq key" 40 bool "Magic SysRq key"
41 depends on !UML 41 depends on !UML
42 help 42 help
43 If you say Y here, you will have some control over the system even 43 If you say Y here, you will have some control over the system even
44 if the system crashes for example during kernel debugging (e.g., you 44 if the system crashes for example during kernel debugging (e.g., you
45 will be able to flush the buffer cache to disk, reboot the system 45 will be able to flush the buffer cache to disk, reboot the system
46 immediately or dump some status information). This is accomplished 46 immediately or dump some status information). This is accomplished
47 by pressing various keys while holding SysRq (Alt+PrintScreen). It 47 by pressing various keys while holding SysRq (Alt+PrintScreen). It
48 also works on a serial console (on PC hardware at least), if you 48 also works on a serial console (on PC hardware at least), if you
49 send a BREAK and then within 5 seconds a command keypress. The 49 send a BREAK and then within 5 seconds a command keypress. The
50 keys are documented in <file:Documentation/sysrq.txt>. Don't say Y 50 keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
51 unless you really know what this hack does. 51 unless you really know what this hack does.
52 52
53 config UNUSED_SYMBOLS 53 config UNUSED_SYMBOLS
54 bool "Enable unused/obsolete exported symbols" 54 bool "Enable unused/obsolete exported symbols"
55 default y if X86 55 default y if X86
56 help 56 help
57 Unused but exported symbols make the kernel needlessly bigger. For 57 Unused but exported symbols make the kernel needlessly bigger. For
58 that reason most of these unused exports will soon be removed. This 58 that reason most of these unused exports will soon be removed. This
59 option is provided temporarily to provide a transition period in case 59 option is provided temporarily to provide a transition period in case
60 some external kernel module needs one of these symbols anyway. If you 60 some external kernel module needs one of these symbols anyway. If you
61 encounter such a case in your module, consider if you are actually 61 encounter such a case in your module, consider if you are actually
62 using the right API. (rationale: since nobody in the kernel is using 62 using the right API. (rationale: since nobody in the kernel is using
63 this in a module, there is a pretty good chance it's actually the 63 this in a module, there is a pretty good chance it's actually the
64 wrong interface to use). If you really need the symbol, please send a 64 wrong interface to use). If you really need the symbol, please send a
65 mail to the linux kernel mailing list mentioning the symbol and why 65 mail to the linux kernel mailing list mentioning the symbol and why
66 you really need it, and what the merge plan to the mainline kernel for 66 you really need it, and what the merge plan to the mainline kernel for
67 your module is. 67 your module is.
68 68
69 config DEBUG_FS 69 config DEBUG_FS
70 bool "Debug Filesystem" 70 bool "Debug Filesystem"
71 depends on SYSFS 71 depends on SYSFS
72 help 72 help
73 debugfs is a virtual file system that kernel developers use to put 73 debugfs is a virtual file system that kernel developers use to put
74 debugging files into. Enable this option to be able to read and 74 debugging files into. Enable this option to be able to read and
75 write to these files. 75 write to these files.
76 76
77 For detailed documentation on the debugfs API, see 77 For detailed documentation on the debugfs API, see
78 Documentation/DocBook/filesystems. 78 Documentation/DocBook/filesystems.
79 79
80 If unsure, say N. 80 If unsure, say N.
81 81
82 config HEADERS_CHECK 82 config HEADERS_CHECK
83 bool "Run 'make headers_check' when building vmlinux" 83 bool "Run 'make headers_check' when building vmlinux"
84 depends on !UML 84 depends on !UML
85 help 85 help
86 This option will extract the user-visible kernel headers whenever 86 This option will extract the user-visible kernel headers whenever
87 building the kernel, and will run basic sanity checks on them to 87 building the kernel, and will run basic sanity checks on them to
88 ensure that exported files do not attempt to include files which 88 ensure that exported files do not attempt to include files which
89 were not exported, etc. 89 were not exported, etc.
90 90
91 If you're making modifications to header files which are 91 If you're making modifications to header files which are
92 relevant for userspace, say 'Y', and check the headers 92 relevant for userspace, say 'Y', and check the headers
93 exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in 93 exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
94 your build tree), to make sure they're suitable. 94 your build tree), to make sure they're suitable.
95 95
96 config DEBUG_SECTION_MISMATCH 96 config DEBUG_SECTION_MISMATCH
97 bool "Enable full Section mismatch analysis" 97 bool "Enable full Section mismatch analysis"
98 depends on UNDEFINED 98 depends on UNDEFINED
99 # This option is on purpose disabled for now. 99 # This option is on purpose disabled for now.
100 # It will be enabled when we are down to a resonable number 100 # It will be enabled when we are down to a resonable number
101 # of section mismatch warnings (< 10 for an allyesconfig build) 101 # of section mismatch warnings (< 10 for an allyesconfig build)
102 help 102 help
103 The section mismatch analysis checks if there are illegal 103 The section mismatch analysis checks if there are illegal
104 references from one section to another section. 104 references from one section to another section.
105 Linux will during link or during runtime drop some sections 105 Linux will during link or during runtime drop some sections
106 and any use of code/data previously in these sections will 106 and any use of code/data previously in these sections will
107 most likely result in an oops. 107 most likely result in an oops.
108 In the code functions and variables are annotated with 108 In the code functions and variables are annotated with
109 __init, __devinit etc. (see full list in include/linux/init.h) 109 __init, __devinit etc. (see full list in include/linux/init.h)
110 which results in the code/data being placed in specific sections. 110 which results in the code/data being placed in specific sections.
111 The section mismatch analysis is always done after a full 111 The section mismatch analysis is always done after a full
112 kernel build but enabling this option will in addition 112 kernel build but enabling this option will in addition
113 do the following: 113 do the following:
114 - Add the option -fno-inline-functions-called-once to gcc 114 - Add the option -fno-inline-functions-called-once to gcc
115 When inlining a function annotated __init in a non-init 115 When inlining a function annotated __init in a non-init
116 function we would lose the section information and thus 116 function we would lose the section information and thus
117 the analysis would not catch the illegal reference. 117 the analysis would not catch the illegal reference.
118 This option tells gcc to inline less but will also 118 This option tells gcc to inline less but will also
119 result in a larger kernel. 119 result in a larger kernel.
120 - Run the section mismatch analysis for each module/built-in.o 120 - Run the section mismatch analysis for each module/built-in.o
121 When we run the section mismatch analysis on vmlinux.o we 121 When we run the section mismatch analysis on vmlinux.o we
122 lose valueble information about where the mismatch was 122 lose valueble information about where the mismatch was
123 introduced. 123 introduced.
124 Running the analysis for each module/built-in.o file 124 Running the analysis for each module/built-in.o file
125 will tell where the mismatch happens much closer to the 125 will tell where the mismatch happens much closer to the
126 source. The drawback is that we will report the same 126 source. The drawback is that we will report the same
127 mismatch at least twice. 127 mismatch at least twice.
128 - Enable verbose reporting from modpost to help solving 128 - Enable verbose reporting from modpost to help solving
129 the section mismatches reported. 129 the section mismatches reported.
130 130
131 config DEBUG_KERNEL 131 config DEBUG_KERNEL
132 bool "Kernel debugging" 132 bool "Kernel debugging"
133 help 133 help
134 Say Y here if you are developing drivers or trying to debug and 134 Say Y here if you are developing drivers or trying to debug and
135 identify kernel problems. 135 identify kernel problems.
136 136
137 config DEBUG_SHIRQ 137 config DEBUG_SHIRQ
138 bool "Debug shared IRQ handlers" 138 bool "Debug shared IRQ handlers"
139 depends on DEBUG_KERNEL && GENERIC_HARDIRQS 139 depends on DEBUG_KERNEL && GENERIC_HARDIRQS
140 help 140 help
141 Enable this to generate a spurious interrupt as soon as a shared 141 Enable this to generate a spurious interrupt as soon as a shared
142 interrupt handler is registered, and just before one is deregistered. 142 interrupt handler is registered, and just before one is deregistered.
143 Drivers ought to be able to handle interrupts coming in at those 143 Drivers ought to be able to handle interrupts coming in at those
144 points; some don't and need to be caught. 144 points; some don't and need to be caught.
145 145
146 config DETECT_SOFTLOCKUP 146 config DETECT_SOFTLOCKUP
147 bool "Detect Soft Lockups" 147 bool "Detect Soft Lockups"
148 depends on DEBUG_KERNEL && !S390 148 depends on DEBUG_KERNEL && !S390
149 default y 149 default y
150 help 150 help
151 Say Y here to enable the kernel to detect "soft lockups", 151 Say Y here to enable the kernel to detect "soft lockups",
152 which are bugs that cause the kernel to loop in kernel 152 which are bugs that cause the kernel to loop in kernel
153 mode for more than 60 seconds, without giving other tasks a 153 mode for more than 60 seconds, without giving other tasks a
154 chance to run. 154 chance to run.
155 155
156 When a soft-lockup is detected, the kernel will print the 156 When a soft-lockup is detected, the kernel will print the
157 current stack trace (which you should report), but the 157 current stack trace (which you should report), but the
158 system will stay locked up. This feature has negligible 158 system will stay locked up. This feature has negligible
159 overhead. 159 overhead.
160 160
161 (Note that "hard lockups" are separate type of bugs that 161 (Note that "hard lockups" are separate type of bugs that
162 can be detected via the NMI-watchdog, on platforms that 162 can be detected via the NMI-watchdog, on platforms that
163 support it.) 163 support it.)
164 164
165 config BOOTPARAM_SOFTLOCKUP_PANIC 165 config BOOTPARAM_SOFTLOCKUP_PANIC
166 bool "Panic (Reboot) On Soft Lockups" 166 bool "Panic (Reboot) On Soft Lockups"
167 depends on DETECT_SOFTLOCKUP 167 depends on DETECT_SOFTLOCKUP
168 help 168 help
169 Say Y here to enable the kernel to panic on "soft lockups", 169 Say Y here to enable the kernel to panic on "soft lockups",
170 which are bugs that cause the kernel to loop in kernel 170 which are bugs that cause the kernel to loop in kernel
171 mode for more than 60 seconds, without giving other tasks a 171 mode for more than 60 seconds, without giving other tasks a
172 chance to run. 172 chance to run.
173 173
174 The panic can be used in combination with panic_timeout, 174 The panic can be used in combination with panic_timeout,
175 to cause the system to reboot automatically after a 175 to cause the system to reboot automatically after a
176 lockup has been detected. This feature is useful for 176 lockup has been detected. This feature is useful for
177 high-availability systems that have uptime guarantees and 177 high-availability systems that have uptime guarantees and
178 where a lockup must be resolved ASAP. 178 where a lockup must be resolved ASAP.
179 179
180 Say N if unsure. 180 Say N if unsure.
181 181
182 config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE 182 config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
183 int 183 int
184 depends on DETECT_SOFTLOCKUP 184 depends on DETECT_SOFTLOCKUP
185 range 0 1 185 range 0 1
186 default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC 186 default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
187 default 1 if BOOTPARAM_SOFTLOCKUP_PANIC 187 default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
188 188
189 config SCHED_DEBUG 189 config SCHED_DEBUG
190 bool "Collect scheduler debugging info" 190 bool "Collect scheduler debugging info"
191 depends on DEBUG_KERNEL && PROC_FS 191 depends on DEBUG_KERNEL && PROC_FS
192 default y 192 default y
193 help 193 help
194 If you say Y here, the /proc/sched_debug file will be provided 194 If you say Y here, the /proc/sched_debug file will be provided
195 that can help debug the scheduler. The runtime overhead of this 195 that can help debug the scheduler. The runtime overhead of this
196 option is minimal. 196 option is minimal.
197 197
198 config SCHEDSTATS 198 config SCHEDSTATS
199 bool "Collect scheduler statistics" 199 bool "Collect scheduler statistics"
200 depends on DEBUG_KERNEL && PROC_FS 200 depends on DEBUG_KERNEL && PROC_FS
201 help 201 help
202 If you say Y here, additional code will be inserted into the 202 If you say Y here, additional code will be inserted into the
203 scheduler and related routines to collect statistics about 203 scheduler and related routines to collect statistics about
204 scheduler behavior and provide them in /proc/schedstat. These 204 scheduler behavior and provide them in /proc/schedstat. These
205 stats may be useful for both tuning and debugging the scheduler 205 stats may be useful for both tuning and debugging the scheduler
206 If you aren't debugging the scheduler or trying to tune a specific 206 If you aren't debugging the scheduler or trying to tune a specific
207 application, you can say N to avoid the very slight overhead 207 application, you can say N to avoid the very slight overhead
208 this adds. 208 this adds.
209 209
210 config TIMER_STATS 210 config TIMER_STATS
211 bool "Collect kernel timers statistics" 211 bool "Collect kernel timers statistics"
212 depends on DEBUG_KERNEL && PROC_FS 212 depends on DEBUG_KERNEL && PROC_FS
213 help 213 help
214 If you say Y here, additional code will be inserted into the 214 If you say Y here, additional code will be inserted into the
215 timer routines to collect statistics about kernel timers being 215 timer routines to collect statistics about kernel timers being
216 reprogrammed. The statistics can be read from /proc/timer_stats. 216 reprogrammed. The statistics can be read from /proc/timer_stats.
217 The statistics collection is started by writing 1 to /proc/timer_stats, 217 The statistics collection is started by writing 1 to /proc/timer_stats,
218 writing 0 stops it. This feature is useful to collect information 218 writing 0 stops it. This feature is useful to collect information
219 about timer usage patterns in kernel and userspace. This feature 219 about timer usage patterns in kernel and userspace. This feature
220 is lightweight if enabled in the kernel config but not activated 220 is lightweight if enabled in the kernel config but not activated
221 (it defaults to deactivated on bootup and will only be activated 221 (it defaults to deactivated on bootup and will only be activated
222 if some application like powertop activates it explicitly). 222 if some application like powertop activates it explicitly).
223 223
224 config DEBUG_OBJECTS 224 config DEBUG_OBJECTS
225 bool "Debug object operations" 225 bool "Debug object operations"
226 depends on DEBUG_KERNEL 226 depends on DEBUG_KERNEL
227 help 227 help
228 If you say Y here, additional code will be inserted into the 228 If you say Y here, additional code will be inserted into the
229 kernel to track the life time of various objects and validate 229 kernel to track the life time of various objects and validate
230 the operations on those objects. 230 the operations on those objects.
231 231
232 config DEBUG_OBJECTS_SELFTEST 232 config DEBUG_OBJECTS_SELFTEST
233 bool "Debug objects selftest" 233 bool "Debug objects selftest"
234 depends on DEBUG_OBJECTS 234 depends on DEBUG_OBJECTS
235 help 235 help
236 This enables the selftest of the object debug code. 236 This enables the selftest of the object debug code.
237 237
238 config DEBUG_OBJECTS_FREE 238 config DEBUG_OBJECTS_FREE
239 bool "Debug objects in freed memory" 239 bool "Debug objects in freed memory"
240 depends on DEBUG_OBJECTS 240 depends on DEBUG_OBJECTS
241 help 241 help
242 This enables checks whether a k/v free operation frees an area 242 This enables checks whether a k/v free operation frees an area
243 which contains an object which has not been deactivated 243 which contains an object which has not been deactivated
244 properly. This can make kmalloc/kfree-intensive workloads 244 properly. This can make kmalloc/kfree-intensive workloads
245 much slower. 245 much slower.
246 246
247 config DEBUG_OBJECTS_TIMERS 247 config DEBUG_OBJECTS_TIMERS
248 bool "Debug timer objects" 248 bool "Debug timer objects"
249 depends on DEBUG_OBJECTS 249 depends on DEBUG_OBJECTS
250 help 250 help
251 If you say Y here, additional code will be inserted into the 251 If you say Y here, additional code will be inserted into the
252 timer routines to track the life time of timer objects and 252 timer routines to track the life time of timer objects and
253 validate the timer operations. 253 validate the timer operations.
254 254
255 config DEBUG_SLAB 255 config DEBUG_SLAB
256 bool "Debug slab memory allocations" 256 bool "Debug slab memory allocations"
257 depends on DEBUG_KERNEL && SLAB 257 depends on DEBUG_KERNEL && SLAB
258 help 258 help
259 Say Y here to have the kernel do limited verification on memory 259 Say Y here to have the kernel do limited verification on memory
260 allocation as well as poisoning memory on free to catch use of freed 260 allocation as well as poisoning memory on free to catch use of freed
261 memory. This can make kmalloc/kfree-intensive workloads much slower. 261 memory. This can make kmalloc/kfree-intensive workloads much slower.
262 262
263 config DEBUG_SLAB_LEAK 263 config DEBUG_SLAB_LEAK
264 bool "Memory leak debugging" 264 bool "Memory leak debugging"
265 depends on DEBUG_SLAB 265 depends on DEBUG_SLAB
266 266
267 config SLUB_DEBUG_ON 267 config SLUB_DEBUG_ON
268 bool "SLUB debugging on by default" 268 bool "SLUB debugging on by default"
269 depends on SLUB && SLUB_DEBUG 269 depends on SLUB && SLUB_DEBUG
270 default n 270 default n
271 help 271 help
272 Boot with debugging on by default. SLUB boots by default with 272 Boot with debugging on by default. SLUB boots by default with
273 the runtime debug capabilities switched off. Enabling this is 273 the runtime debug capabilities switched off. Enabling this is
274 equivalent to specifying the "slub_debug" parameter on boot. 274 equivalent to specifying the "slub_debug" parameter on boot.
275 There is no support for more fine grained debug control like 275 There is no support for more fine grained debug control like
276 possible with slub_debug=xxx. SLUB debugging may be switched 276 possible with slub_debug=xxx. SLUB debugging may be switched
277 off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying 277 off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying
278 "slub_debug=-". 278 "slub_debug=-".
279 279
280 config SLUB_STATS 280 config SLUB_STATS
281 default n 281 default n
282 bool "Enable SLUB performance statistics" 282 bool "Enable SLUB performance statistics"
283 depends on SLUB && SLUB_DEBUG && SYSFS 283 depends on SLUB && SLUB_DEBUG && SYSFS
284 help 284 help
285 SLUB statistics are useful to debug SLUBs allocation behavior in 285 SLUB statistics are useful to debug SLUBs allocation behavior in
286 order find ways to optimize the allocator. This should never be 286 order find ways to optimize the allocator. This should never be
287 enabled for production use since keeping statistics slows down 287 enabled for production use since keeping statistics slows down
288 the allocator by a few percentage points. The slabinfo command 288 the allocator by a few percentage points. The slabinfo command
289 supports the determination of the most active slabs to figure 289 supports the determination of the most active slabs to figure
290 out which slabs are relevant to a particular load. 290 out which slabs are relevant to a particular load.
291 Try running: slabinfo -DA 291 Try running: slabinfo -DA
292 292
293 config DEBUG_PREEMPT 293 config DEBUG_PREEMPT
294 bool "Debug preemptible kernel" 294 bool "Debug preemptible kernel"
295 depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64) 295 depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64)
296 default y 296 default y
297 help 297 help
298 If you say Y here then the kernel will use a debug variant of the 298 If you say Y here then the kernel will use a debug variant of the
299 commonly used smp_processor_id() function and will print warnings 299 commonly used smp_processor_id() function and will print warnings
300 if kernel code uses it in a preemption-unsafe way. Also, the kernel 300 if kernel code uses it in a preemption-unsafe way. Also, the kernel
301 will detect preemption count underflows. 301 will detect preemption count underflows.
302 302
303 config DEBUG_RT_MUTEXES 303 config DEBUG_RT_MUTEXES
304 bool "RT Mutex debugging, deadlock detection" 304 bool "RT Mutex debugging, deadlock detection"
305 depends on DEBUG_KERNEL && RT_MUTEXES 305 depends on DEBUG_KERNEL && RT_MUTEXES
306 help 306 help
307 This allows rt mutex semantics violations and rt mutex related 307 This allows rt mutex semantics violations and rt mutex related
308 deadlocks (lockups) to be detected and reported automatically. 308 deadlocks (lockups) to be detected and reported automatically.
309 309
310 config DEBUG_PI_LIST 310 config DEBUG_PI_LIST
311 bool 311 bool
312 default y 312 default y
313 depends on DEBUG_RT_MUTEXES 313 depends on DEBUG_RT_MUTEXES
314 314
315 config RT_MUTEX_TESTER 315 config RT_MUTEX_TESTER
316 bool "Built-in scriptable tester for rt-mutexes" 316 bool "Built-in scriptable tester for rt-mutexes"
317 depends on DEBUG_KERNEL && RT_MUTEXES 317 depends on DEBUG_KERNEL && RT_MUTEXES
318 help 318 help
319 This option enables a rt-mutex tester. 319 This option enables a rt-mutex tester.
320 320
321 config DEBUG_SPINLOCK 321 config DEBUG_SPINLOCK
322 bool "Spinlock and rw-lock debugging: basic checks" 322 bool "Spinlock and rw-lock debugging: basic checks"
323 depends on DEBUG_KERNEL 323 depends on DEBUG_KERNEL
324 help 324 help
325 Say Y here and build SMP to catch missing spinlock initialization 325 Say Y here and build SMP to catch missing spinlock initialization
326 and certain other kinds of spinlock errors commonly made. This is 326 and certain other kinds of spinlock errors commonly made. This is
327 best used in conjunction with the NMI watchdog so that spinlock 327 best used in conjunction with the NMI watchdog so that spinlock
328 deadlocks are also debuggable. 328 deadlocks are also debuggable.
329 329
330 config DEBUG_MUTEXES 330 config DEBUG_MUTEXES
331 bool "Mutex debugging: basic checks" 331 bool "Mutex debugging: basic checks"
332 depends on DEBUG_KERNEL 332 depends on DEBUG_KERNEL
333 help 333 help
334 This feature allows mutex semantics violations to be detected and 334 This feature allows mutex semantics violations to be detected and
335 reported. 335 reported.
336 336
337 config DEBUG_LOCK_ALLOC 337 config DEBUG_LOCK_ALLOC
338 bool "Lock debugging: detect incorrect freeing of live locks" 338 bool "Lock debugging: detect incorrect freeing of live locks"
339 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 339 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
340 select DEBUG_SPINLOCK 340 select DEBUG_SPINLOCK
341 select DEBUG_MUTEXES 341 select DEBUG_MUTEXES
342 select LOCKDEP 342 select LOCKDEP
343 help 343 help
344 This feature will check whether any held lock (spinlock, rwlock, 344 This feature will check whether any held lock (spinlock, rwlock,
345 mutex or rwsem) is incorrectly freed by the kernel, via any of the 345 mutex or rwsem) is incorrectly freed by the kernel, via any of the
346 memory-freeing routines (kfree(), kmem_cache_free(), free_pages(), 346 memory-freeing routines (kfree(), kmem_cache_free(), free_pages(),
347 vfree(), etc.), whether a live lock is incorrectly reinitialized via 347 vfree(), etc.), whether a live lock is incorrectly reinitialized via
348 spin_lock_init()/mutex_init()/etc., or whether there is any lock 348 spin_lock_init()/mutex_init()/etc., or whether there is any lock
349 held during task exit. 349 held during task exit.
350 350
351 config PROVE_LOCKING 351 config PROVE_LOCKING
352 bool "Lock debugging: prove locking correctness" 352 bool "Lock debugging: prove locking correctness"
353 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 353 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
354 select LOCKDEP 354 select LOCKDEP
355 select DEBUG_SPINLOCK 355 select DEBUG_SPINLOCK
356 select DEBUG_MUTEXES 356 select DEBUG_MUTEXES
357 select DEBUG_LOCK_ALLOC 357 select DEBUG_LOCK_ALLOC
358 default n 358 default n
359 help 359 help
360 This feature enables the kernel to prove that all locking 360 This feature enables the kernel to prove that all locking
361 that occurs in the kernel runtime is mathematically 361 that occurs in the kernel runtime is mathematically
362 correct: that under no circumstance could an arbitrary (and 362 correct: that under no circumstance could an arbitrary (and
363 not yet triggered) combination of observed locking 363 not yet triggered) combination of observed locking
364 sequences (on an arbitrary number of CPUs, running an 364 sequences (on an arbitrary number of CPUs, running an
365 arbitrary number of tasks and interrupt contexts) cause a 365 arbitrary number of tasks and interrupt contexts) cause a
366 deadlock. 366 deadlock.
367 367
368 In short, this feature enables the kernel to report locking 368 In short, this feature enables the kernel to report locking
369 related deadlocks before they actually occur. 369 related deadlocks before they actually occur.
370 370
371 The proof does not depend on how hard and complex a 371 The proof does not depend on how hard and complex a
372 deadlock scenario would be to trigger: how many 372 deadlock scenario would be to trigger: how many
373 participant CPUs, tasks and irq-contexts would be needed 373 participant CPUs, tasks and irq-contexts would be needed
374 for it to trigger. The proof also does not depend on 374 for it to trigger. The proof also does not depend on
375 timing: if a race and a resulting deadlock is possible 375 timing: if a race and a resulting deadlock is possible
376 theoretically (no matter how unlikely the race scenario 376 theoretically (no matter how unlikely the race scenario
377 is), it will be proven so and will immediately be 377 is), it will be proven so and will immediately be
378 reported by the kernel (once the event is observed that 378 reported by the kernel (once the event is observed that
379 makes the deadlock theoretically possible). 379 makes the deadlock theoretically possible).
380 380
381 If a deadlock is impossible (i.e. the locking rules, as 381 If a deadlock is impossible (i.e. the locking rules, as
382 observed by the kernel, are mathematically correct), the 382 observed by the kernel, are mathematically correct), the
383 kernel reports nothing. 383 kernel reports nothing.
384 384
385 NOTE: this feature can also be enabled for rwlocks, mutexes 385 NOTE: this feature can also be enabled for rwlocks, mutexes
386 and rwsems - in which case all dependencies between these 386 and rwsems - in which case all dependencies between these
387 different locking variants are observed and mapped too, and 387 different locking variants are observed and mapped too, and
388 the proof of observed correctness is also maintained for an 388 the proof of observed correctness is also maintained for an
389 arbitrary combination of these separate locking variants. 389 arbitrary combination of these separate locking variants.
390 390
391 For more details, see Documentation/lockdep-design.txt. 391 For more details, see Documentation/lockdep-design.txt.
392 392
393 config LOCKDEP 393 config LOCKDEP
394 bool 394 bool
395 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 395 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
396 select STACKTRACE 396 select STACKTRACE
397 select FRAME_POINTER if !X86 && !MIPS 397 select FRAME_POINTER if !X86 && !MIPS
398 select KALLSYMS 398 select KALLSYMS
399 select KALLSYMS_ALL 399 select KALLSYMS_ALL
400 400
401 config LOCK_STAT 401 config LOCK_STAT
402 bool "Lock usage statistics" 402 bool "Lock usage statistics"
403 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 403 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
404 select LOCKDEP 404 select LOCKDEP
405 select DEBUG_SPINLOCK 405 select DEBUG_SPINLOCK
406 select DEBUG_MUTEXES 406 select DEBUG_MUTEXES
407 select DEBUG_LOCK_ALLOC 407 select DEBUG_LOCK_ALLOC
408 default n 408 default n
409 help 409 help
410 This feature enables tracking lock contention points 410 This feature enables tracking lock contention points
411 411
412 For more details, see Documentation/lockstat.txt 412 For more details, see Documentation/lockstat.txt
413 413
414 config DEBUG_LOCKDEP 414 config DEBUG_LOCKDEP
415 bool "Lock dependency engine debugging" 415 bool "Lock dependency engine debugging"
416 depends on DEBUG_KERNEL && LOCKDEP 416 depends on DEBUG_KERNEL && LOCKDEP
417 help 417 help
418 If you say Y here, the lock dependency engine will do 418 If you say Y here, the lock dependency engine will do
419 additional runtime checks to debug itself, at the price 419 additional runtime checks to debug itself, at the price
420 of more runtime overhead. 420 of more runtime overhead.
421 421
422 config TRACE_IRQFLAGS 422 config TRACE_IRQFLAGS
423 depends on DEBUG_KERNEL 423 depends on DEBUG_KERNEL
424 bool 424 bool
425 default y 425 default y
426 depends on TRACE_IRQFLAGS_SUPPORT 426 depends on TRACE_IRQFLAGS_SUPPORT
427 depends on PROVE_LOCKING 427 depends on PROVE_LOCKING
428 428
429 config DEBUG_SPINLOCK_SLEEP 429 config DEBUG_SPINLOCK_SLEEP
430 bool "Spinlock debugging: sleep-inside-spinlock checking" 430 bool "Spinlock debugging: sleep-inside-spinlock checking"
431 depends on DEBUG_KERNEL 431 depends on DEBUG_KERNEL
432 help 432 help
433 If you say Y here, various routines which may sleep will become very 433 If you say Y here, various routines which may sleep will become very
434 noisy if they are called with a spinlock held. 434 noisy if they are called with a spinlock held.
435 435
436 config DEBUG_LOCKING_API_SELFTESTS 436 config DEBUG_LOCKING_API_SELFTESTS
437 bool "Locking API boot-time self-tests" 437 bool "Locking API boot-time self-tests"
438 depends on DEBUG_KERNEL 438 depends on DEBUG_KERNEL
439 help 439 help
440 Say Y here if you want the kernel to run a short self-test during 440 Say Y here if you want the kernel to run a short self-test during
441 bootup. The self-test checks whether common types of locking bugs 441 bootup. The self-test checks whether common types of locking bugs
442 are detected by debugging mechanisms or not. (if you disable 442 are detected by debugging mechanisms or not. (if you disable
443 lock debugging then those bugs wont be detected of course.) 443 lock debugging then those bugs wont be detected of course.)
444 The following locking APIs are covered: spinlocks, rwlocks, 444 The following locking APIs are covered: spinlocks, rwlocks,
445 mutexes and rwsems. 445 mutexes and rwsems.
446 446
447 config STACKTRACE 447 config STACKTRACE
448 bool 448 bool
449 depends on STACKTRACE_SUPPORT 449 depends on STACKTRACE_SUPPORT
450 450
451 config DEBUG_KOBJECT 451 config DEBUG_KOBJECT
452 bool "kobject debugging" 452 bool "kobject debugging"
453 depends on DEBUG_KERNEL 453 depends on DEBUG_KERNEL
454 help 454 help
455 If you say Y here, some extra kobject debugging messages will be sent 455 If you say Y here, some extra kobject debugging messages will be sent
456 to the syslog. 456 to the syslog.
457 457
458 config DEBUG_HIGHMEM 458 config DEBUG_HIGHMEM
459 bool "Highmem debugging" 459 bool "Highmem debugging"
460 depends on DEBUG_KERNEL && HIGHMEM 460 depends on DEBUG_KERNEL && HIGHMEM
461 help 461 help
462 This options enables addition error checking for high memory systems. 462 This options enables addition error checking for high memory systems.
463 Disable for production systems. 463 Disable for production systems.
464 464
465 config DEBUG_BUGVERBOSE 465 config DEBUG_BUGVERBOSE
466 bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED 466 bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED
467 depends on BUG 467 depends on BUG
468 depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ 468 depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
469 FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 469 FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300
470 default !EMBEDDED 470 default !EMBEDDED
471 help 471 help
472 Say Y here to make BUG() panics output the file name and line number 472 Say Y here to make BUG() panics output the file name and line number
473 of the BUG call as well as the EIP and oops trace. This aids 473 of the BUG call as well as the EIP and oops trace. This aids
474 debugging but costs about 70-100K of memory. 474 debugging but costs about 70-100K of memory.
475 475
476 config DEBUG_INFO 476 config DEBUG_INFO
477 bool "Compile the kernel with debug info" 477 bool "Compile the kernel with debug info"
478 depends on DEBUG_KERNEL 478 depends on DEBUG_KERNEL
479 help 479 help
480 If you say Y here the resulting kernel image will include 480 If you say Y here the resulting kernel image will include
481 debugging info resulting in a larger kernel image. 481 debugging info resulting in a larger kernel image.
482 This adds debug symbols to the kernel and modules (gcc -g), and 482 This adds debug symbols to the kernel and modules (gcc -g), and
483 is needed if you intend to use kernel crashdump or binary object 483 is needed if you intend to use kernel crashdump or binary object
484 tools like crash, kgdb, LKCD, gdb, etc on the kernel. 484 tools like crash, kgdb, LKCD, gdb, etc on the kernel.
485 Say Y here only if you plan to debug the kernel. 485 Say Y here only if you plan to debug the kernel.
486 486
487 If unsure, say N. 487 If unsure, say N.
488 488
489 config DEBUG_VM 489 config DEBUG_VM
490 bool "Debug VM" 490 bool "Debug VM"
491 depends on DEBUG_KERNEL 491 depends on DEBUG_KERNEL
492 help 492 help
493 Enable this to turn on extended checks in the virtual-memory system 493 Enable this to turn on extended checks in the virtual-memory system
494 that may impact performance. 494 that may impact performance.
495 495
496 If unsure, say N. 496 If unsure, say N.
497 497
498 config DEBUG_WRITECOUNT 498 config DEBUG_WRITECOUNT
499 bool "Debug filesystem writers count" 499 bool "Debug filesystem writers count"
500 depends on DEBUG_KERNEL 500 depends on DEBUG_KERNEL
501 help 501 help
502 Enable this to catch wrong use of the writers count in struct 502 Enable this to catch wrong use of the writers count in struct
503 vfsmount. This will increase the size of each file struct by 503 vfsmount. This will increase the size of each file struct by
504 32 bits. 504 32 bits.
505 505
506 If unsure, say N. 506 If unsure, say N.
507 507
508 config DEBUG_MEMORY_INIT 508 config DEBUG_MEMORY_INIT
509 bool "Debug memory initialisation" if EMBEDDED 509 bool "Debug memory initialisation" if EMBEDDED
510 default !EMBEDDED 510 default !EMBEDDED
511 help 511 help
512 Enable this for additional checks during memory initialisation. 512 Enable this for additional checks during memory initialisation.
513 The sanity checks verify aspects of the VM such as the memory model 513 The sanity checks verify aspects of the VM such as the memory model
514 and other information provided by the architecture. Verbose 514 and other information provided by the architecture. Verbose
515 information will be printed at KERN_DEBUG loglevel depending 515 information will be printed at KERN_DEBUG loglevel depending
516 on the mminit_loglevel= command-line option. 516 on the mminit_loglevel= command-line option.
517 517
518 If unsure, say Y 518 If unsure, say Y
519 519
520 config DEBUG_LIST 520 config DEBUG_LIST
521 bool "Debug linked list manipulation" 521 bool "Debug linked list manipulation"
522 depends on DEBUG_KERNEL 522 depends on DEBUG_KERNEL
523 help 523 help
524 Enable this to turn on extended checks in the linked-list 524 Enable this to turn on extended checks in the linked-list
525 walking routines. 525 walking routines.
526 526
527 If unsure, say N. 527 If unsure, say N.
528 528
529 config DEBUG_SG 529 config DEBUG_SG
530 bool "Debug SG table operations" 530 bool "Debug SG table operations"
531 depends on DEBUG_KERNEL 531 depends on DEBUG_KERNEL
532 help 532 help
533 Enable this to turn on checks on scatter-gather tables. This can 533 Enable this to turn on checks on scatter-gather tables. This can
534 help find problems with drivers that do not properly initialize 534 help find problems with drivers that do not properly initialize
535 their sg tables. 535 their sg tables.
536 536
537 If unsure, say N. 537 If unsure, say N.
538 538
539 config DEBUG_NOTIFIERS
540 bool "Debug notifier call chains"
541 depends on DEBUG_KERNEL
542 help
543 Enable this to turn on sanity checking for notifier call chains.
544 This is most useful for kernel developers to make sure that
545 modules properly unregister themselves from notifier chains.
546 This is a relatively cheap check but if you care about maximum
547 performance, say N.
548
539 config FRAME_POINTER 549 config FRAME_POINTER
540 bool "Compile the kernel with frame pointers" 550 bool "Compile the kernel with frame pointers"
541 depends on DEBUG_KERNEL && \ 551 depends on DEBUG_KERNEL && \
542 (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || \ 552 (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || \
543 AVR32 || SUPERH || BLACKFIN || MN10300) 553 AVR32 || SUPERH || BLACKFIN || MN10300)
544 default y if DEBUG_INFO && UML 554 default y if DEBUG_INFO && UML
545 help 555 help
546 If you say Y here the resulting kernel image will be slightly larger 556 If you say Y here the resulting kernel image will be slightly larger
547 and slower, but it might give very useful debugging information on 557 and slower, but it might give very useful debugging information on
548 some architectures or if you use external debuggers. 558 some architectures or if you use external debuggers.
549 If you don't debug the kernel, you can say N. 559 If you don't debug the kernel, you can say N.
550 560
551 config BOOT_PRINTK_DELAY 561 config BOOT_PRINTK_DELAY
552 bool "Delay each boot printk message by N milliseconds" 562 bool "Delay each boot printk message by N milliseconds"
553 depends on DEBUG_KERNEL && PRINTK && GENERIC_CALIBRATE_DELAY 563 depends on DEBUG_KERNEL && PRINTK && GENERIC_CALIBRATE_DELAY
554 help 564 help
555 This build option allows you to read kernel boot messages 565 This build option allows you to read kernel boot messages
556 by inserting a short delay after each one. The delay is 566 by inserting a short delay after each one. The delay is
557 specified in milliseconds on the kernel command line, 567 specified in milliseconds on the kernel command line,
558 using "boot_delay=N". 568 using "boot_delay=N".
559 569
560 It is likely that you would also need to use "lpj=M" to preset 570 It is likely that you would also need to use "lpj=M" to preset
561 the "loops per jiffie" value. 571 the "loops per jiffie" value.
562 See a previous boot log for the "lpj" value to use for your 572 See a previous boot log for the "lpj" value to use for your
563 system, and then set "lpj=M" before setting "boot_delay=N". 573 system, and then set "lpj=M" before setting "boot_delay=N".
564 NOTE: Using this option may adversely affect SMP systems. 574 NOTE: Using this option may adversely affect SMP systems.
565 I.e., processors other than the first one may not boot up. 575 I.e., processors other than the first one may not boot up.
566 BOOT_PRINTK_DELAY also may cause DETECT_SOFTLOCKUP to detect 576 BOOT_PRINTK_DELAY also may cause DETECT_SOFTLOCKUP to detect
567 what it believes to be lockup conditions. 577 what it believes to be lockup conditions.
568 578
569 config RCU_TORTURE_TEST 579 config RCU_TORTURE_TEST
570 tristate "torture tests for RCU" 580 tristate "torture tests for RCU"
571 depends on DEBUG_KERNEL 581 depends on DEBUG_KERNEL
572 default n 582 default n
573 help 583 help
574 This option provides a kernel module that runs torture tests 584 This option provides a kernel module that runs torture tests
575 on the RCU infrastructure. The kernel module may be built 585 on the RCU infrastructure. The kernel module may be built
576 after the fact on the running kernel to be tested, if desired. 586 after the fact on the running kernel to be tested, if desired.
577 587
578 Say Y here if you want RCU torture tests to be built into 588 Say Y here if you want RCU torture tests to be built into
579 the kernel. 589 the kernel.
580 Say M if you want the RCU torture tests to build as a module. 590 Say M if you want the RCU torture tests to build as a module.
581 Say N if you are unsure. 591 Say N if you are unsure.
582 592
583 config RCU_TORTURE_TEST_RUNNABLE 593 config RCU_TORTURE_TEST_RUNNABLE
584 bool "torture tests for RCU runnable by default" 594 bool "torture tests for RCU runnable by default"
585 depends on RCU_TORTURE_TEST = y 595 depends on RCU_TORTURE_TEST = y
586 default n 596 default n
587 help 597 help
588 This option provides a way to build the RCU torture tests 598 This option provides a way to build the RCU torture tests
589 directly into the kernel without them starting up at boot 599 directly into the kernel without them starting up at boot
590 time. You can use /proc/sys/kernel/rcutorture_runnable 600 time. You can use /proc/sys/kernel/rcutorture_runnable
591 to manually override this setting. This /proc file is 601 to manually override this setting. This /proc file is
592 available only when the RCU torture tests have been built 602 available only when the RCU torture tests have been built
593 into the kernel. 603 into the kernel.
594 604
595 Say Y here if you want the RCU torture tests to start during 605 Say Y here if you want the RCU torture tests to start during
596 boot (you probably don't). 606 boot (you probably don't).
597 Say N here if you want the RCU torture tests to start only 607 Say N here if you want the RCU torture tests to start only
598 after being manually enabled via /proc. 608 after being manually enabled via /proc.
599 609
600 config KPROBES_SANITY_TEST 610 config KPROBES_SANITY_TEST
601 bool "Kprobes sanity tests" 611 bool "Kprobes sanity tests"
602 depends on DEBUG_KERNEL 612 depends on DEBUG_KERNEL
603 depends on KPROBES 613 depends on KPROBES
604 default n 614 default n
605 help 615 help
606 This option provides for testing basic kprobes functionality on 616 This option provides for testing basic kprobes functionality on
607 boot. A sample kprobe, jprobe and kretprobe are inserted and 617 boot. A sample kprobe, jprobe and kretprobe are inserted and
608 verified for functionality. 618 verified for functionality.
609 619
610 Say N if you are unsure. 620 Say N if you are unsure.
611 621
612 config BACKTRACE_SELF_TEST 622 config BACKTRACE_SELF_TEST
613 tristate "Self test for the backtrace code" 623 tristate "Self test for the backtrace code"
614 depends on DEBUG_KERNEL 624 depends on DEBUG_KERNEL
615 default n 625 default n
616 help 626 help
617 This option provides a kernel module that can be used to test 627 This option provides a kernel module that can be used to test
618 the kernel stack backtrace code. This option is not useful 628 the kernel stack backtrace code. This option is not useful
619 for distributions or general kernels, but only for kernel 629 for distributions or general kernels, but only for kernel
620 developers working on architecture code. 630 developers working on architecture code.
621 631
622 Note that if you want to also test saved backtraces, you will 632 Note that if you want to also test saved backtraces, you will
623 have to enable STACKTRACE as well. 633 have to enable STACKTRACE as well.
624 634
625 Say N if you are unsure. 635 Say N if you are unsure.
626 636
627 config LKDTM 637 config LKDTM
628 tristate "Linux Kernel Dump Test Tool Module" 638 tristate "Linux Kernel Dump Test Tool Module"
629 depends on DEBUG_KERNEL 639 depends on DEBUG_KERNEL
630 depends on KPROBES 640 depends on KPROBES
631 depends on BLOCK 641 depends on BLOCK
632 default n 642 default n
633 help 643 help
634 This module enables testing of the different dumping mechanisms by 644 This module enables testing of the different dumping mechanisms by
635 inducing system failures at predefined crash points. 645 inducing system failures at predefined crash points.
636 If you don't need it: say N 646 If you don't need it: say N
637 Choose M here to compile this code as a module. The module will be 647 Choose M here to compile this code as a module. The module will be
638 called lkdtm. 648 called lkdtm.
639 649
640 Documentation on how to use the module can be found in 650 Documentation on how to use the module can be found in
641 drivers/misc/lkdtm.c 651 drivers/misc/lkdtm.c
642 652
643 config FAULT_INJECTION 653 config FAULT_INJECTION
644 bool "Fault-injection framework" 654 bool "Fault-injection framework"
645 depends on DEBUG_KERNEL 655 depends on DEBUG_KERNEL
646 help 656 help
647 Provide fault-injection framework. 657 Provide fault-injection framework.
648 For more details, see Documentation/fault-injection/. 658 For more details, see Documentation/fault-injection/.
649 659
650 config FAILSLAB 660 config FAILSLAB
651 bool "Fault-injection capability for kmalloc" 661 bool "Fault-injection capability for kmalloc"
652 depends on FAULT_INJECTION 662 depends on FAULT_INJECTION
653 help 663 help
654 Provide fault-injection capability for kmalloc. 664 Provide fault-injection capability for kmalloc.
655 665
656 config FAIL_PAGE_ALLOC 666 config FAIL_PAGE_ALLOC
657 bool "Fault-injection capabilitiy for alloc_pages()" 667 bool "Fault-injection capabilitiy for alloc_pages()"
658 depends on FAULT_INJECTION 668 depends on FAULT_INJECTION
659 help 669 help
660 Provide fault-injection capability for alloc_pages(). 670 Provide fault-injection capability for alloc_pages().
661 671
662 config FAIL_MAKE_REQUEST 672 config FAIL_MAKE_REQUEST
663 bool "Fault-injection capability for disk IO" 673 bool "Fault-injection capability for disk IO"
664 depends on FAULT_INJECTION 674 depends on FAULT_INJECTION
665 help 675 help
666 Provide fault-injection capability for disk IO. 676 Provide fault-injection capability for disk IO.
667 677
668 config FAULT_INJECTION_DEBUG_FS 678 config FAULT_INJECTION_DEBUG_FS
669 bool "Debugfs entries for fault-injection capabilities" 679 bool "Debugfs entries for fault-injection capabilities"
670 depends on FAULT_INJECTION && SYSFS && DEBUG_FS 680 depends on FAULT_INJECTION && SYSFS && DEBUG_FS
671 help 681 help
672 Enable configuration of fault-injection capabilities via debugfs. 682 Enable configuration of fault-injection capabilities via debugfs.
673 683
674 config FAULT_INJECTION_STACKTRACE_FILTER 684 config FAULT_INJECTION_STACKTRACE_FILTER
675 bool "stacktrace filter for fault-injection capabilities" 685 bool "stacktrace filter for fault-injection capabilities"
676 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT 686 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
677 depends on !X86_64 687 depends on !X86_64
678 select STACKTRACE 688 select STACKTRACE
679 select FRAME_POINTER 689 select FRAME_POINTER
680 help 690 help
681 Provide stacktrace filter for fault-injection capabilities 691 Provide stacktrace filter for fault-injection capabilities
682 692
683 config LATENCYTOP 693 config LATENCYTOP
684 bool "Latency measuring infrastructure" 694 bool "Latency measuring infrastructure"
685 select FRAME_POINTER if !MIPS 695 select FRAME_POINTER if !MIPS
686 select KALLSYMS 696 select KALLSYMS
687 select KALLSYMS_ALL 697 select KALLSYMS_ALL
688 select STACKTRACE 698 select STACKTRACE
689 select SCHEDSTATS 699 select SCHEDSTATS
690 select SCHED_DEBUG 700 select SCHED_DEBUG
691 depends on HAVE_LATENCYTOP_SUPPORT 701 depends on HAVE_LATENCYTOP_SUPPORT
692 help 702 help
693 Enable this option if you want to use the LatencyTOP tool 703 Enable this option if you want to use the LatencyTOP tool
694 to find out which userspace is blocking on what kernel operations. 704 to find out which userspace is blocking on what kernel operations.
695 705
696 config SYSCTL_SYSCALL_CHECK 706 config SYSCTL_SYSCALL_CHECK
697 bool "Sysctl checks" 707 bool "Sysctl checks"
698 depends on SYSCTL_SYSCALL 708 depends on SYSCTL_SYSCALL
699 ---help--- 709 ---help---
700 sys_sysctl uses binary paths that have been found challenging 710 sys_sysctl uses binary paths that have been found challenging
701 to properly maintain and use. This enables checks that help 711 to properly maintain and use. This enables checks that help
702 you to keep things correct. 712 you to keep things correct.
703 713
704 source kernel/trace/Kconfig 714 source kernel/trace/Kconfig
705 715
706 config PROVIDE_OHCI1394_DMA_INIT 716 config PROVIDE_OHCI1394_DMA_INIT
707 bool "Remote debugging over FireWire early on boot" 717 bool "Remote debugging over FireWire early on boot"
708 depends on PCI && X86 718 depends on PCI && X86
709 help 719 help
710 If you want to debug problems which hang or crash the kernel early 720 If you want to debug problems which hang or crash the kernel early
711 on boot and the crashing machine has a FireWire port, you can use 721 on boot and the crashing machine has a FireWire port, you can use
712 this feature to remotely access the memory of the crashed machine 722 this feature to remotely access the memory of the crashed machine
713 over FireWire. This employs remote DMA as part of the OHCI1394 723 over FireWire. This employs remote DMA as part of the OHCI1394
714 specification which is now the standard for FireWire controllers. 724 specification which is now the standard for FireWire controllers.
715 725
716 With remote DMA, you can monitor the printk buffer remotely using 726 With remote DMA, you can monitor the printk buffer remotely using
717 firescope and access all memory below 4GB using fireproxy from gdb. 727 firescope and access all memory below 4GB using fireproxy from gdb.
718 Even controlling a kernel debugger is possible using remote DMA. 728 Even controlling a kernel debugger is possible using remote DMA.
719 729
720 Usage: 730 Usage:
721 731
722 If ohci1394_dma=early is used as boot parameter, it will initialize 732 If ohci1394_dma=early is used as boot parameter, it will initialize
723 all OHCI1394 controllers which are found in the PCI config space. 733 all OHCI1394 controllers which are found in the PCI config space.
724 734
725 As all changes to the FireWire bus such as enabling and disabling 735 As all changes to the FireWire bus such as enabling and disabling
726 devices cause a bus reset and thereby disable remote DMA for all 736 devices cause a bus reset and thereby disable remote DMA for all
727 devices, be sure to have the cable plugged and FireWire enabled on 737 devices, be sure to have the cable plugged and FireWire enabled on
728 the debugging host before booting the debug target for debugging. 738 the debugging host before booting the debug target for debugging.
729 739
730 This code (~1k) is freed after boot. By then, the firewire stack 740 This code (~1k) is freed after boot. By then, the firewire stack
731 in charge of the OHCI-1394 controllers should be used instead. 741 in charge of the OHCI-1394 controllers should be used instead.
732 742
733 See Documentation/debugging-via-ohci1394.txt for more information. 743 See Documentation/debugging-via-ohci1394.txt for more information.
734 744
735 config FIREWIRE_OHCI_REMOTE_DMA 745 config FIREWIRE_OHCI_REMOTE_DMA
736 bool "Remote debugging over FireWire with firewire-ohci" 746 bool "Remote debugging over FireWire with firewire-ohci"
737 depends on FIREWIRE_OHCI 747 depends on FIREWIRE_OHCI
738 help 748 help
739 This option lets you use the FireWire bus for remote debugging 749 This option lets you use the FireWire bus for remote debugging
740 with help of the firewire-ohci driver. It enables unfiltered 750 with help of the firewire-ohci driver. It enables unfiltered
741 remote DMA in firewire-ohci. 751 remote DMA in firewire-ohci.
742 See Documentation/debugging-via-ohci1394.txt for more information. 752 See Documentation/debugging-via-ohci1394.txt for more information.
743 753
744 If unsure, say N. 754 If unsure, say N.
745 755
746 menuconfig BUILD_DOCSRC 756 menuconfig BUILD_DOCSRC
747 bool "Build targets in Documentation/ tree" 757 bool "Build targets in Documentation/ tree"
748 depends on HEADERS_CHECK 758 depends on HEADERS_CHECK
749 help 759 help
750 This option attempts to build objects from the source files in the 760 This option attempts to build objects from the source files in the
751 kernel Documentation/ tree. 761 kernel Documentation/ tree.
752 762
753 Say N if you are unsure. 763 Say N if you are unsure.
754 764
755 source "samples/Kconfig" 765 source "samples/Kconfig"
756 766
757 source "lib/Kconfig.kgdb" 767 source "lib/Kconfig.kgdb"
758 768