Blame view
init/main.c
21.9 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 |
/* * linux/init/main.c * * Copyright (C) 1991, 1992 Linus Torvalds * * GK 2/5/95 - Changed to support mounting root fs via NFS * Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96 * Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96 * Simplified starting of init: Michael A. Griffith <grif@acm.org> */ |
1da177e4c
|
11 12 13 |
#include <linux/types.h> #include <linux/module.h> #include <linux/proc_fs.h> |
1da177e4c
|
14 15 |
#include <linux/kernel.h> #include <linux/syscalls.h> |
9b5609fd7
|
16 |
#include <linux/stackprotector.h> |
1da177e4c
|
17 18 19 |
#include <linux/string.h> #include <linux/ctype.h> #include <linux/delay.h> |
1da177e4c
|
20 21 22 23 |
#include <linux/ioport.h> #include <linux/init.h> #include <linux/smp_lock.h> #include <linux/initrd.h> |
1da177e4c
|
24 |
#include <linux/bootmem.h> |
4a7a16dc0
|
25 |
#include <linux/acpi.h> |
1da177e4c
|
26 |
#include <linux/tty.h> |
1da177e4c
|
27 28 |
#include <linux/percpu.h> #include <linux/kmod.h> |
db64fe022
|
29 |
#include <linux/vmalloc.h> |
1da177e4c
|
30 |
#include <linux/kernel_stat.h> |
d7cd56111
|
31 |
#include <linux/start_kernel.h> |
1da177e4c
|
32 |
#include <linux/security.h> |
3d4422332
|
33 |
#include <linux/smp.h> |
1da177e4c
|
34 35 36 37 38 39 40 41 |
#include <linux/workqueue.h> #include <linux/profile.h> #include <linux/rcupdate.h> #include <linux/moduleparam.h> #include <linux/kallsyms.h> #include <linux/writeback.h> #include <linux/cpu.h> #include <linux/cpuset.h> |
ddbcc7e8e
|
42 |
#include <linux/cgroup.h> |
1da177e4c
|
43 |
#include <linux/efi.h> |
906568c9c
|
44 |
#include <linux/tick.h> |
6168a702a
|
45 |
#include <linux/interrupt.h> |
c757249af
|
46 |
#include <linux/taskstats_kern.h> |
ca74e92b4
|
47 |
#include <linux/delayacct.h> |
1da177e4c
|
48 49 50 51 |
#include <linux/unistd.h> #include <linux/rmap.h> #include <linux/mempolicy.h> #include <linux/key.h> |
b6cd0b772
|
52 |
#include <linux/buffer_head.h> |
94b6da5ab
|
53 |
#include <linux/page_cgroup.h> |
9a11b49a8
|
54 |
#include <linux/debug_locks.h> |
3ac7fe5a4
|
55 |
#include <linux/debugobjects.h> |
fbb9ce953
|
56 |
#include <linux/lockdep.h> |
3c7b4e6b8
|
57 |
#include <linux/kmemleak.h> |
84d737866
|
58 |
#include <linux/pid_namespace.h> |
1f21782e6
|
59 |
#include <linux/device.h> |
73c279927
|
60 |
#include <linux/kthread.h> |
e6fe6649b
|
61 |
#include <linux/sched.h> |
a1c9eea9e
|
62 |
#include <linux/signal.h> |
199f0ca51
|
63 |
#include <linux/idr.h> |
0b4b3827d
|
64 |
#include <linux/kgdb.h> |
68bf21aa1
|
65 |
#include <linux/ftrace.h> |
22a9d6456
|
66 |
#include <linux/async.h> |
dfec072ec
|
67 |
#include <linux/kmemcheck.h> |
02af61bb5
|
68 |
#include <linux/kmemtrace.h> |
6ae6996a4
|
69 |
#include <linux/sfi.h> |
2b2af54a5
|
70 |
#include <linux/shmem_fs.h> |
5a0e3ad6a
|
71 |
#include <linux/slab.h> |
3f5ec1369
|
72 |
#include <trace/boot.h> |
1da177e4c
|
73 74 75 76 |
#include <asm/io.h> #include <asm/bugs.h> #include <asm/setup.h> |
a940199f2
|
77 |
#include <asm/sections.h> |
37b73c828
|
78 |
#include <asm/cacheflush.h> |
1da177e4c
|
79 |
|
1da177e4c
|
80 81 82 |
#ifdef CONFIG_X86_LOCAL_APIC #include <asm/smp.h> #endif |
aae5f662a
|
83 |
static int kernel_init(void *); |
1da177e4c
|
84 85 |
extern void init_IRQ(void); |
1da177e4c
|
86 87 88 |
extern void fork_init(unsigned long); extern void mca_init(void); extern void sbus_init(void); |
1da177e4c
|
89 90 91 |
extern void prio_tree_init(void); extern void radix_tree_init(void); extern void free_initmem(void); |
37b73c828
|
92 93 94 |
#ifndef CONFIG_DEBUG_RODATA static inline void mark_rodata_ro(void) { } #endif |
1da177e4c
|
95 96 97 98 |
#ifdef CONFIG_TC extern void tc_init(void); #endif |
a68260483
|
99 |
enum system_states system_state __read_mostly; |
1da177e4c
|
100 101 102 103 104 105 106 107 108 109 |
EXPORT_SYMBOL(system_state); /* * Boot command-line arguments */ #define MAX_INIT_ARGS CONFIG_INIT_ENV_ARG_LIMIT #define MAX_INIT_ENVS CONFIG_INIT_ENV_ARG_LIMIT extern void time_init(void); /* Default late time init is NULL. archs can override this later. */ |
d2e3192b6
|
110 |
void (*__initdata late_time_init)(void); |
1da177e4c
|
111 |
extern void softirq_init(void); |
30d7e0d46
|
112 113 114 115 116 117 |
/* Untouched command line saved by arch-specific code. */ char __initdata boot_command_line[COMMAND_LINE_SIZE]; /* Untouched saved command line (eg. for /proc) */ char *saved_command_line; /* Command line for parameter parsing */ static char *static_command_line; |
1da177e4c
|
118 119 |
static char *execute_command; |
ffdfc4097
|
120 |
static char *ramdisk_execute_command; |
1da177e4c
|
121 |
|
8b3b29550
|
122 |
#ifdef CONFIG_SMP |
1da177e4c
|
123 |
/* Setup configured maximum number of CPUs to activate */ |
75cbfb97a
|
124 125 |
unsigned int setup_max_cpus = NR_CPUS; EXPORT_SYMBOL(setup_max_cpus); |
7e96287dd
|
126 127 |
/* |
1da177e4c
|
128 129 130 131 132 133 134 135 136 |
* Setup routine for controlling SMP activation * * Command-line option of "nosmp" or "maxcpus=0" will disable SMP * activation entirely (the MPS table probe still happens, though). * * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer * greater than 0, limits the maximum number of CPUs activated in * SMP mode to <NUM>. */ |
65a4e574d
|
137 138 |
void __weak arch_disable_smp_support(void) { } |
61ec7567d
|
139 |
|
1da177e4c
|
140 141 |
static int __init nosmp(char *str) { |
ca74a6f84
|
142 |
setup_max_cpus = 0; |
65a4e574d
|
143 |
arch_disable_smp_support(); |
8b3b29550
|
144 |
return 0; |
1da177e4c
|
145 |
} |
8b3b29550
|
146 |
early_param("nosmp", nosmp); |
1da177e4c
|
147 |
|
2b633e3fa
|
148 149 150 151 152 153 154 155 156 157 158 159 160 |
/* this is hard limit */ static int __init nrcpus(char *str) { int nr_cpus; get_option(&str, &nr_cpus); if (nr_cpus > 0 && nr_cpus < nr_cpu_ids) nr_cpu_ids = nr_cpus; return 0; } early_param("nr_cpus", nrcpus); |
1da177e4c
|
161 162 |
static int __init maxcpus(char *str) { |
ca74a6f84
|
163 164 |
get_option(&str, &setup_max_cpus); if (setup_max_cpus == 0) |
65a4e574d
|
165 |
arch_disable_smp_support(); |
61ec7567d
|
166 167 |
return 0; |
1da177e4c
|
168 |
} |
813409771
|
169 |
early_param("maxcpus", maxcpus); |
8b3b29550
|
170 |
#else |
7463e633c
|
171 |
static const unsigned int setup_max_cpus = NR_CPUS; |
8b3b29550
|
172 173 174 175 176 177 178 179 180 181 182 183 184 |
#endif /* * If set, this is an indication to the drivers that reset the underlying * device before going ahead with the initialization otherwise driver might * rely on the BIOS and skip the reset operation. * * This is useful if kernel is booting in an unreliable environment. * For ex. kdump situaiton where previous kernel has crashed, BIOS has been * skipped and devices will be in unknown state. */ unsigned int reset_devices; EXPORT_SYMBOL(reset_devices); |
1da177e4c
|
185 |
|
7e96287dd
|
186 187 188 189 190 191 192 |
static int __init set_reset_devices(char *str) { reset_devices = 1; return 1; } __setup("reset_devices", set_reset_devices); |
1da177e4c
|
193 194 195 196 197 198 199 200 201 |
static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, }; char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, }; static const char *panic_later, *panic_param; extern struct obs_kernel_param __setup_start[], __setup_end[]; static int __init obsolete_checksetup(char *line) { struct obs_kernel_param *p; |
33df0d19e
|
202 |
int had_early_param = 0; |
1da177e4c
|
203 204 205 206 207 208 |
p = __setup_start; do { int n = strlen(p->str); if (!strncmp(line, p->str, n)) { if (p->early) { |
33df0d19e
|
209 210 211 212 |
/* Already done in parse_early_param? * (Needs exact match on param part). * Keep iterating, as we can have early * params and __setups of same names 8( */ |
1da177e4c
|
213 |
if (line[n] == '\0' || line[n] == '=') |
33df0d19e
|
214 |
had_early_param = 1; |
1da177e4c
|
215 216 217 218 219 220 221 222 223 224 |
} else if (!p->setup_func) { printk(KERN_WARNING "Parameter %s is obsolete," " ignored ", p->str); return 1; } else if (p->setup_func(line + n)) return 1; } p++; } while (p < __setup_end); |
33df0d19e
|
225 226 |
return had_early_param; |
1da177e4c
|
227 228 229 230 231 232 233 234 235 236 237 238 |
} /* * This should be approx 2 Bo*oMips to start (note initial shift), and will * still work even if initially too large, it will just take slightly longer */ unsigned long loops_per_jiffy = (1<<12); EXPORT_SYMBOL(loops_per_jiffy); static int __init debug_kernel(char *str) { |
1da177e4c
|
239 |
console_loglevel = 10; |
f6f21c814
|
240 |
return 0; |
1da177e4c
|
241 242 243 244 |
} static int __init quiet_kernel(char *str) { |
1da177e4c
|
245 |
console_loglevel = 4; |
f6f21c814
|
246 |
return 0; |
1da177e4c
|
247 |
} |
f6f21c814
|
248 249 |
early_param("debug", debug_kernel); early_param("quiet", quiet_kernel); |
1da177e4c
|
250 251 252 253 |
static int __init loglevel(char *str) { get_option(&str, &console_loglevel); |
d9d4fcfe5
|
254 |
return 0; |
1da177e4c
|
255 |
} |
f6f21c814
|
256 |
early_param("loglevel", loglevel); |
1da177e4c
|
257 258 259 |
/* * Unknown boot options get handed to init, unless they look like |
f066a4f6d
|
260 |
* unused parameters (modprobe will find them in /proc/cmdline). |
1da177e4c
|
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 |
*/ static int __init unknown_bootoption(char *param, char *val) { /* Change NUL term back to "=", to make "param" the whole string. */ if (val) { /* param=val or param="val"? */ if (val == param+strlen(param)+1) val[-1] = '='; else if (val == param+strlen(param)+2) { val[-2] = '='; memmove(val-1, val, strlen(val)+1); val--; } else BUG(); } /* Handle obsolete-style parameters */ if (obsolete_checksetup(param)) return 0; |
f066a4f6d
|
280 281 |
/* Unused module parameter. */ if (strchr(param, '.') && (!val || strchr(param, '.') < val)) |
1da177e4c
|
282 |
return 0; |
1da177e4c
|
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 |
if (panic_later) return 0; if (val) { /* Environment option */ unsigned int i; for (i = 0; envp_init[i]; i++) { if (i == MAX_INIT_ENVS) { panic_later = "Too many boot env vars at `%s'"; panic_param = param; } if (!strncmp(param, envp_init[i], val - param)) break; } envp_init[i] = param; } else { /* Command line option */ unsigned int i; for (i = 0; argv_init[i]; i++) { if (i == MAX_INIT_ARGS) { panic_later = "Too many boot init vars at `%s'"; panic_param = param; } } argv_init[i] = param; } return 0; } |
12d6f21ea
|
312 313 314 |
#ifdef CONFIG_DEBUG_PAGEALLOC int __read_mostly debug_pagealloc_enabled = 0; #endif |
1da177e4c
|
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 |
static int __init init_setup(char *str) { unsigned int i; execute_command = str; /* * In case LILO is going to boot us with default command line, * it prepends "auto" before the whole cmdline which makes * the shell think it should execute a script with such name. * So we ignore all arguments entered _before_ init=... [MJ] */ for (i = 1; i < MAX_INIT_ARGS; i++) argv_init[i] = NULL; return 1; } __setup("init=", init_setup); |
ffdfc4097
|
331 332 333 334 335 336 337 338 339 340 341 |
static int __init rdinit_setup(char *str) { unsigned int i; ramdisk_execute_command = str; /* See "auto" comment in init_setup */ for (i = 1; i < MAX_INIT_ARGS; i++) argv_init[i] = NULL; return 1; } __setup("rdinit=", rdinit_setup); |
1da177e4c
|
342 343 344 345 346 347 348 349 350 351 |
#ifndef CONFIG_SMP #ifdef CONFIG_X86_LOCAL_APIC static void __init smp_init(void) { APIC_init_uniprocessor(); } #else #define smp_init() do { } while (0) #endif |
e0982e90c
|
352 |
static inline void setup_nr_cpu_ids(void) { } |
1da177e4c
|
353 354 355 |
static inline void smp_prepare_cpus(unsigned int maxcpus) { } #else |
e0982e90c
|
356 357 358 359 360 361 362 |
/* Setup number of possible processor ids */ int nr_cpu_ids __read_mostly = NR_CPUS; EXPORT_SYMBOL(nr_cpu_ids); /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ static void __init setup_nr_cpu_ids(void) { |
e0c0ba736
|
363 |
nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; |
e0982e90c
|
364 |
} |
1da177e4c
|
365 366 367 |
/* Called by boot processor to activate the rest. */ static void __init smp_init(void) { |
53b8a315b
|
368 |
unsigned int cpu; |
1da177e4c
|
369 370 |
/* FIXME: This should be done in userspace --RR */ |
53b8a315b
|
371 |
for_each_present_cpu(cpu) { |
ca74a6f84
|
372 |
if (num_online_cpus() >= setup_max_cpus) |
1da177e4c
|
373 |
break; |
53b8a315b
|
374 375 |
if (!cpu_online(cpu)) cpu_up(cpu); |
1da177e4c
|
376 377 378 379 380 |
} /* Any cleanup work */ printk(KERN_INFO "Brought up %ld CPUs ", (long)num_online_cpus()); |
ca74a6f84
|
381 |
smp_cpus_done(setup_max_cpus); |
1da177e4c
|
382 383 384 385 386 |
} #endif /* |
30d7e0d46
|
387 388 389 390 391 392 393 394 395 396 397 398 399 400 |
* We need to store the untouched command line for future reference. * We also need to store the touched command line since the parameter * parsing is performed in place, and we should allow a component to * store reference of name/value for future reference. */ static void __init setup_command_line(char *command_line) { saved_command_line = alloc_bootmem(strlen (boot_command_line)+1); static_command_line = alloc_bootmem(strlen (command_line)+1); strcpy (saved_command_line, boot_command_line); strcpy (static_command_line, command_line); } /* |
1da177e4c
|
401 402 403 404 405 406 407 |
* We need to finalize in a non-__init function or else race conditions * between the root thread and the init thread may cause start_kernel to * be reaped by free_initmem before the root thread has proceeded to * cpu_idle. * * gcc-3.4 accidentally inlines this function, so use noinline. */ |
b433c3d45
|
408 |
static __initdata DECLARE_COMPLETION(kthreadd_done); |
f99ebf0a8
|
409 |
static noinline void __init_refok rest_init(void) |
1da177e4c
|
410 411 |
__releases(kernel_lock) { |
73c279927
|
412 |
int pid; |
7db905e63
|
413 |
rcu_scheduler_starting(); |
b433c3d45
|
414 |
/* |
971585692
|
415 |
* We need to spawn init first so that it obtains pid 1, however |
b433c3d45
|
416 417 418 |
* the init task will end up wanting to create kthreads, which, if * we schedule it before we create kthreadd, will OOPS. */ |
aae5f662a
|
419 |
kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); |
1da177e4c
|
420 |
numa_default_policy(); |
73c279927
|
421 |
pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); |
d11c563dd
|
422 |
rcu_read_lock(); |
5cd204550
|
423 |
kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); |
d11c563dd
|
424 |
rcu_read_unlock(); |
b433c3d45
|
425 |
complete(&kthreadd_done); |
1da177e4c
|
426 |
unlock_kernel(); |
f340c0d1a
|
427 428 429 |
/* * The boot idle thread must execute schedule() |
1df21055e
|
430 |
* at least once to get things moving: |
f340c0d1a
|
431 |
*/ |
1df21055e
|
432 |
init_idle_bootup_task(current); |
5bfb5d690
|
433 |
preempt_enable_no_resched(); |
f340c0d1a
|
434 |
schedule(); |
5bfb5d690
|
435 |
preempt_disable(); |
f340c0d1a
|
436 |
|
5bfb5d690
|
437 |
/* Call into cpu_idle with preempt disabled */ |
1da177e4c
|
438 |
cpu_idle(); |
1df21055e
|
439 |
} |
1da177e4c
|
440 441 442 443 444 445 446 |
/* Check for early params. */ static int __init do_early_param(char *param, char *val) { struct obs_kernel_param *p; for (p = __setup_start; p < __setup_end; p++) { |
18a8bd949
|
447 448 449 450 |
if ((p->early && strcmp(param, p->str) == 0) || (strcmp(param, "console") == 0 && strcmp(p->str, "earlycon") == 0) ) { |
1da177e4c
|
451 452 453 454 455 456 457 458 459 |
if (p->setup_func(val) != 0) printk(KERN_WARNING "Malformed early option '%s' ", param); } } /* We accept everything at this stage. */ return 0; } |
13977091a
|
460 461 462 463 |
void __init parse_early_options(char *cmdline) { parse_args("early options", cmdline, NULL, 0, do_early_param); } |
1da177e4c
|
464 465 466 467 468 469 470 471 472 473 |
/* Arch code calls this early on, or if not, just before other parsing. */ void __init parse_early_param(void) { static __initdata int done = 0; static __initdata char tmp_cmdline[COMMAND_LINE_SIZE]; if (done) return; /* All fall through to do_early_param. */ |
30d7e0d46
|
474 |
strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE); |
13977091a
|
475 |
parse_early_options(tmp_cmdline); |
1da177e4c
|
476 477 478 479 480 481 |
done = 1; } /* * Activate the first processor. */ |
44fd22992
|
482 483 484 485 |
static void __init boot_cpu_init(void) { int cpu = smp_processor_id(); /* Mark the boot cpu "present", "online" etc for SMP and UP case */ |
915441b60
|
486 |
set_cpu_online(cpu, true); |
933b0618d
|
487 |
set_cpu_active(cpu, true); |
915441b60
|
488 489 |
set_cpu_present(cpu, true); set_cpu_possible(cpu, true); |
44fd22992
|
490 |
} |
839ad62e7
|
491 |
void __init __weak smp_setup_processor_id(void) |
033ab7f8e
|
492 493 |
{ } |
8c9843e57
|
494 495 496 |
void __init __weak thread_info_cache_init(void) { } |
444f478f6
|
497 498 499 500 501 |
/* * Set up kernel memory allocators */ static void __init mm_init(void) { |
ca371c0d7
|
502 503 504 505 506 |
/* * page_cgroup requires countinous pages as memmap * and it's bigger than MAX_ORDER unless SPARSEMEM. */ page_cgroup_init_flatmem(); |
444f478f6
|
507 508 |
mem_init(); kmem_cache_init(); |
c868d5501
|
509 |
pgtable_cache_init(); |
444f478f6
|
510 511 |
vmalloc_init(); } |
1da177e4c
|
512 513 514 515 |
asmlinkage void __init start_kernel(void) { char * command_line; extern struct kernel_param __start___param[], __stop___param[]; |
033ab7f8e
|
516 517 |
smp_setup_processor_id(); |
fbb9ce953
|
518 519 520 521 522 |
/* * Need to run as early as possible, to initialize the * lockdep hash: */ lockdep_init(); |
3ac7fe5a4
|
523 |
debug_objects_early_init(); |
420594296
|
524 525 526 527 528 |
/* * Set up the the initial canary ASAP: */ boot_init_stack_canary(); |
ddbcc7e8e
|
529 |
cgroup_init_early(); |
fbb9ce953
|
530 531 532 |
local_irq_disable(); early_boot_irqs_off(); |
243c7621a
|
533 |
early_init_irq_lock_class(); |
fbb9ce953
|
534 |
|
1da177e4c
|
535 536 537 538 539 |
/* * Interrupts are still disabled. Do necessary setups, then * enable them */ lock_kernel(); |
906568c9c
|
540 |
tick_init(); |
44fd22992
|
541 |
boot_cpu_init(); |
1da177e4c
|
542 |
page_address_init(); |
657cafa6b
|
543 |
printk(KERN_NOTICE "%s", linux_banner); |
1da177e4c
|
544 |
setup_arch(&command_line); |
cf475ad28
|
545 |
mm_init_owner(&init_mm, &init_task); |
30d7e0d46
|
546 |
setup_command_line(command_line); |
e0982e90c
|
547 |
setup_nr_cpu_ids(); |
d6647bdf9
|
548 |
setup_per_cpu_areas(); |
44fd22992
|
549 |
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ |
1da177e4c
|
550 |
|
1f522509c
|
551 |
build_all_zonelists(NULL); |
83b519e8b
|
552 553 554 555 556 557 558 559 560 561 562 563 564 |
page_alloc_init(); printk(KERN_NOTICE "Kernel command line: %s ", boot_command_line); parse_early_param(); parse_args("Booting kernel", static_command_line, __start___param, __stop___param - __start___param, &unknown_bootoption); /* * These use large bootmem allocations and must precede * kmem_cache_init() */ pidhash_init(); |
83b519e8b
|
565 566 567 |
vfs_caches_init_early(); sort_main_extable(); trap_init(); |
444f478f6
|
568 |
mm_init(); |
1da177e4c
|
569 570 571 572 573 574 575 576 577 578 579 |
/* * Set up the scheduler prior starting any interrupts (such as the * timer interrupt). Full topology setup happens at smp_init() * time - but meanwhile we still have a functioning scheduler. */ sched_init(); /* * Disable preemption - early bootup scheduling is extremely * fragile until we cpu_idle() for the first time. */ preempt_disable(); |
c4a68306b
|
580 581 582 583 584 585 |
if (!irqs_disabled()) { printk(KERN_WARNING "start_kernel(): bug: interrupts were " "enabled *very* early, fixing it "); local_irq_disable(); } |
1da177e4c
|
586 |
rcu_init(); |
773e3eb7b
|
587 |
radix_tree_init(); |
0b8f1efad
|
588 589 |
/* init some links before init_ISA_irqs() */ early_irq_init(); |
1da177e4c
|
590 |
init_IRQ(); |
3c7b4e6b8
|
591 |
prio_tree_init(); |
1da177e4c
|
592 |
init_timers(); |
c0a313296
|
593 |
hrtimers_init(); |
1da177e4c
|
594 |
softirq_init(); |
ad596171e
|
595 |
timekeeping_init(); |
88fecaa27
|
596 |
time_init(); |
93e028148
|
597 598 |
profile_init(); if (!irqs_disabled()) |
24d431d06
|
599 600 601 |
printk(KERN_CRIT "start_kernel(): bug: interrupts were " "enabled early "); |
fbb9ce953
|
602 |
early_boot_irqs_on(); |
93e028148
|
603 |
local_irq_enable(); |
dcce284a2
|
604 605 |
/* Interrupts are enabled now so all GFP allocations are safe. */ |
452aa6999
|
606 |
gfp_allowed_mask = __GFP_BITS_MASK; |
dcce284a2
|
607 |
|
7e85ee0c1
|
608 |
kmem_cache_init_late(); |
1da177e4c
|
609 610 611 612 613 614 615 616 617 |
/* * HACK ALERT! This is early. We're enabling the console before * we've done PCI setups etc, and console_init() must be aware of * this. But we do want output early, in case something goes wrong. */ console_init(); if (panic_later) panic(panic_later, panic_param); |
fbb9ce953
|
618 619 |
lockdep_info(); |
9a11b49a8
|
620 621 622 623 624 625 |
/* * Need to run this when irqs are enabled, because it wants * to self-test [hard/soft]-irqs on/off lock inversion bugs * too: */ locking_selftest(); |
1da177e4c
|
626 627 |
#ifdef CONFIG_BLK_DEV_INITRD if (initrd_start && !initrd_below_start_ok && |
bd673c7c3
|
628 |
page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) { |
1da177e4c
|
629 |
printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - " |
fb6624ebd
|
630 631 |
"disabling it. ", |
bd673c7c3
|
632 633 |
page_to_pfn(virt_to_page((void *)initrd_start)), min_low_pfn); |
1da177e4c
|
634 635 636 |
initrd_start = 0; } #endif |
94b6da5ab
|
637 |
page_cgroup_init(); |
a03c2a48e
|
638 |
enable_debug_pagealloc(); |
b9ce08c01
|
639 |
kmemtrace_init(); |
3c7b4e6b8
|
640 |
kmemleak_init(); |
3ac7fe5a4
|
641 |
debug_objects_mem_init(); |
199f0ca51
|
642 |
idr_init_cache(); |
e7c8d5c99
|
643 |
setup_per_cpu_pageset(); |
1da177e4c
|
644 645 646 |
numa_policy_init(); if (late_time_init) late_time_init(); |
fa84e9eec
|
647 |
sched_clock_init(); |
1da177e4c
|
648 649 |
calibrate_delay(); pidmap_init(); |
1da177e4c
|
650 651 652 653 654 |
anon_vma_init(); #ifdef CONFIG_X86 if (efi_enabled) efi_enter_virtual_mode(); #endif |
8c9843e57
|
655 |
thread_info_cache_init(); |
d84f4f992
|
656 |
cred_init(); |
4481374ce
|
657 |
fork_init(totalram_pages); |
1da177e4c
|
658 659 |
proc_caches_init(); buffer_init(); |
1da177e4c
|
660 661 |
key_init(); security_init(); |
0b4b3827d
|
662 |
dbg_late_init(); |
4481374ce
|
663 |
vfs_caches_init(totalram_pages); |
1da177e4c
|
664 665 666 667 668 669 |
signals_init(); /* rootfs populating might need page-writeback */ page_writeback_init(); #ifdef CONFIG_PROC_FS proc_root_init(); #endif |
ddbcc7e8e
|
670 |
cgroup_init(); |
1da177e4c
|
671 |
cpuset_init(); |
c757249af
|
672 |
taskstats_init_early(); |
ca74e92b4
|
673 |
delayacct_init(); |
1da177e4c
|
674 675 676 677 |
check_bugs(); acpi_early_init(); /* before LAPIC and SMP init */ |
6ae6996a4
|
678 |
sfi_init_late(); |
1da177e4c
|
679 |
|
68bf21aa1
|
680 |
ftrace_init(); |
1da177e4c
|
681 682 683 |
/* Do the rest non-__init'ed, we're now alive */ rest_init(); } |
b99b87f70
|
684 685 686 687 |
/* Call all constructor functions linked into the kernel. */ static void __init do_ctors(void) { #ifdef CONFIG_CONSTRUCTORS |
196a15b4e
|
688 |
ctor_fn_t *fn = (ctor_fn_t *) __ctors_start; |
b99b87f70
|
689 |
|
196a15b4e
|
690 691 |
for (; fn < (ctor_fn_t *) __ctors_end; fn++) (*fn)(); |
b99b87f70
|
692 693 |
#endif } |
22a9d6456
|
694 |
int initcall_debug; |
d0ea3d7d2
|
695 |
core_param(initcall_debug, initcall_debug, bool, 0644); |
1da177e4c
|
696 |
|
4a683bf94
|
697 698 699 |
static char msgbuf[64]; static struct boot_trace_call call; static struct boot_trace_ret ret; |
59f9415ff
|
700 |
int do_one_initcall(initcall_t fn) |
1da177e4c
|
701 |
{ |
1da177e4c
|
702 |
int count = preempt_count(); |
742390728
|
703 |
ktime_t calltime, delta, rettime; |
1da177e4c
|
704 |
|
e0df154f4
|
705 |
if (initcall_debug) { |
742390728
|
706 707 708 709 710 |
call.caller = task_pid_nr(current); printk("calling %pF @ %i ", fn, call.caller); calltime = ktime_get(); trace_boot_call(&call, fn); |
71566a0d1
|
711 |
enable_boot_trace(); |
e0df154f4
|
712 |
} |
1da177e4c
|
713 |
|
742390728
|
714 |
ret.result = fn(); |
1da177e4c
|
715 |
|
e0df154f4
|
716 |
if (initcall_debug) { |
71566a0d1
|
717 |
disable_boot_trace(); |
742390728
|
718 719 |
rettime = ktime_get(); delta = ktime_sub(rettime, calltime); |
1d926f275
|
720 |
ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10; |
742390728
|
721 |
trace_boot_ret(&ret, fn); |
ca538f6bb
|
722 723 |
printk("initcall %pF returned %d after %Ld usecs ", fn, |
742390728
|
724 |
ret.result, ret.duration); |
e0df154f4
|
725 |
} |
8f0c45cdf
|
726 |
|
e0df154f4
|
727 |
msgbuf[0] = 0; |
e662e1cfd
|
728 |
|
742390728
|
729 730 |
if (ret.result && ret.result != -ENODEV && initcall_debug) sprintf(msgbuf, "error code %d ", ret.result); |
e662e1cfd
|
731 |
|
e0df154f4
|
732 |
if (preempt_count() != count) { |
a76bfd0da
|
733 |
strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf)); |
e0df154f4
|
734 |
preempt_count() = count; |
1da177e4c
|
735 |
} |
e0df154f4
|
736 |
if (irqs_disabled()) { |
a76bfd0da
|
737 |
strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf)); |
e0df154f4
|
738 739 740 |
local_irq_enable(); } if (msgbuf[0]) { |
96d746c68
|
741 742 |
printk("initcall %pF returned with %s ", fn, msgbuf); |
e0df154f4
|
743 |
} |
59f9415ff
|
744 |
|
742390728
|
745 |
return ret.result; |
e0df154f4
|
746 |
} |
c2147a509
|
747 |
extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[]; |
e0df154f4
|
748 749 750 |
static void __init do_initcalls(void) { |
196a15b4e
|
751 |
initcall_t *fn; |
e0df154f4
|
752 |
|
196a15b4e
|
753 754 |
for (fn = __early_initcall_end; fn < __initcall_end; fn++) do_one_initcall(*fn); |
1da177e4c
|
755 756 757 758 759 760 761 762 763 764 765 766 767 768 |
/* Make sure there is no pending stuff from the initcall sequence */ flush_scheduled_work(); } /* * Ok, the machine is now initialized. None of the devices * have been touched yet, but the CPU subsystem is up and * running, and memory and process management works. * * Now we can finally start doing some real work.. */ static void __init do_basic_setup(void) { |
4403b406d
|
769 |
init_workqueues(); |
759ee0915
|
770 |
cpuset_init_smp(); |
1da177e4c
|
771 |
usermodehelper_init(); |
2b2af54a5
|
772 |
init_tmpfs(); |
1da177e4c
|
773 |
driver_init(); |
b04c3afb2
|
774 |
init_irq_proc(); |
b99b87f70
|
775 |
do_ctors(); |
1da177e4c
|
776 777 |
do_initcalls(); } |
7babe8db9
|
778 |
static void __init do_pre_smp_initcalls(void) |
c2147a509
|
779 |
{ |
196a15b4e
|
780 |
initcall_t *fn; |
c2147a509
|
781 |
|
196a15b4e
|
782 783 |
for (fn = __initcall_start; fn < __early_initcall_end; fn++) do_one_initcall(*fn); |
c2147a509
|
784 |
} |
1da177e4c
|
785 786 787 |
static void run_init_process(char *init_filename) { argv_init[0] = init_filename; |
676085679
|
788 |
kernel_execve(init_filename, argv_init, envp_init); |
1da177e4c
|
789 |
} |
ee5bfa642
|
790 791 792 |
/* This is a non __init function. Force it to be noinline otherwise gcc * makes it inline to init() and it becomes part of init.text section */ |
f99ebf0a8
|
793 |
static noinline int init_post(void) |
acdd052a2
|
794 |
__releases(kernel_lock) |
ee5bfa642
|
795 |
{ |
22a9d6456
|
796 797 |
/* need to finish all async __init code before freeing the memory */ async_synchronize_full(); |
ee5bfa642
|
798 799 800 801 802 |
free_initmem(); unlock_kernel(); mark_rodata_ro(); system_state = SYSTEM_RUNNING; numa_default_policy(); |
ee5bfa642
|
803 |
|
fae5fa44f
|
804 |
current->signal->flags |= SIGNAL_UNKILLABLE; |
ee5bfa642
|
805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 |
if (ramdisk_execute_command) { run_init_process(ramdisk_execute_command); printk(KERN_WARNING "Failed to execute %s ", ramdisk_execute_command); } /* * We try each of these until one succeeds. * * The Bourne shell can be used instead of init if we are * trying to recover a really broken machine. */ if (execute_command) { run_init_process(execute_command); printk(KERN_WARNING "Failed to execute %s. Attempting " "defaults... ", execute_command); } run_init_process("/sbin/init"); run_init_process("/etc/init"); run_init_process("/bin/init"); run_init_process("/bin/sh"); |
9a85b8d60
|
828 829 |
panic("No init found. Try passing init= option to kernel. " "See Linux Documentation/init.txt for guidance."); |
ee5bfa642
|
830 |
} |
aae5f662a
|
831 |
static int __init kernel_init(void * unused) |
1da177e4c
|
832 |
{ |
b433c3d45
|
833 834 835 836 |
/* * Wait until kthreadd is all set-up. */ wait_for_completion(&kthreadd_done); |
1da177e4c
|
837 |
lock_kernel(); |
58568d2a8
|
838 839 840 841 |
/* * init can allocate pages on any node */ |
5ab116c93
|
842 |
set_mems_allowed(node_states[N_HIGH_MEMORY]); |
1da177e4c
|
843 844 845 |
/* * init can run on any cpu. */ |
1a2142afa
|
846 |
set_cpus_allowed_ptr(current, cpu_all_mask); |
1da177e4c
|
847 848 849 850 851 852 853 854 |
/* * Tell the world that we're going to be the grim * reaper of innocent orphaned children. * * We don't want people to have to make incorrect * assumptions about where in the task array this * can be found. */ |
84d737866
|
855 |
init_pid_ns.child_reaper = current; |
1da177e4c
|
856 |
|
9ec52099e
|
857 |
cad_pid = task_pid(current); |
ca74a6f84
|
858 |
smp_prepare_cpus(setup_max_cpus); |
1da177e4c
|
859 860 |
do_pre_smp_initcalls(); |
3bf77af6e
|
861 |
start_boot_trace(); |
1da177e4c
|
862 |
|
1da177e4c
|
863 864 |
smp_init(); sched_init_smp(); |
1da177e4c
|
865 |
do_basic_setup(); |
2bd3a997b
|
866 867 868 869 870 871 872 |
/* Open the /dev/console on the rootfs, this should never fail */ if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) printk(KERN_WARNING "Warning: unable to open an initial console. "); (void) sys_dup(0); (void) sys_dup(0); |
1da177e4c
|
873 874 875 876 |
/* * check if there is an early userspace init. If yes, let it do all * the work */ |
ffdfc4097
|
877 878 879 880 881 882 |
if (!ramdisk_execute_command) ramdisk_execute_command = "/init"; if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) { ramdisk_execute_command = NULL; |
1da177e4c
|
883 |
prepare_namespace(); |
ffdfc4097
|
884 |
} |
1da177e4c
|
885 886 887 888 889 890 |
/* * Ok, we have completed the initial bootup, and * we're essentially up and running. Get rid of the * initmem segments and start the user-mode stuff.. */ |
71566a0d1
|
891 |
|
ee5bfa642
|
892 893 |
init_post(); return 0; |
1da177e4c
|
894 |
} |