Blame view
init/main.c
20.1 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 |
/* * linux/init/main.c * * Copyright (C) 1991, 1992 Linus Torvalds * * GK 2/5/95 - Changed to support mounting root fs via NFS * Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96 * Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96 * Simplified starting of init: Michael A. Griffith <grif@acm.org> */ |
1da177e4c
|
11 12 13 |
#include <linux/types.h> #include <linux/module.h> #include <linux/proc_fs.h> |
1da177e4c
|
14 15 |
#include <linux/kernel.h> #include <linux/syscalls.h> |
9b5609fd7
|
16 |
#include <linux/stackprotector.h> |
1da177e4c
|
17 18 19 |
#include <linux/string.h> #include <linux/ctype.h> #include <linux/delay.h> |
1da177e4c
|
20 21 |
#include <linux/ioport.h> #include <linux/init.h> |
1da177e4c
|
22 |
#include <linux/initrd.h> |
1da177e4c
|
23 |
#include <linux/bootmem.h> |
4a7a16dc0
|
24 |
#include <linux/acpi.h> |
1da177e4c
|
25 |
#include <linux/tty.h> |
1da177e4c
|
26 27 |
#include <linux/percpu.h> #include <linux/kmod.h> |
db64fe022
|
28 |
#include <linux/vmalloc.h> |
1da177e4c
|
29 |
#include <linux/kernel_stat.h> |
d7cd56111
|
30 |
#include <linux/start_kernel.h> |
1da177e4c
|
31 |
#include <linux/security.h> |
3d4422332
|
32 |
#include <linux/smp.h> |
1da177e4c
|
33 34 35 36 37 38 39 |
#include <linux/profile.h> #include <linux/rcupdate.h> #include <linux/moduleparam.h> #include <linux/kallsyms.h> #include <linux/writeback.h> #include <linux/cpu.h> #include <linux/cpuset.h> |
ddbcc7e8e
|
40 |
#include <linux/cgroup.h> |
1da177e4c
|
41 |
#include <linux/efi.h> |
906568c9c
|
42 |
#include <linux/tick.h> |
6168a702a
|
43 |
#include <linux/interrupt.h> |
c757249af
|
44 |
#include <linux/taskstats_kern.h> |
ca74e92b4
|
45 |
#include <linux/delayacct.h> |
1da177e4c
|
46 47 48 49 |
#include <linux/unistd.h> #include <linux/rmap.h> #include <linux/mempolicy.h> #include <linux/key.h> |
b6cd0b772
|
50 |
#include <linux/buffer_head.h> |
94b6da5ab
|
51 |
#include <linux/page_cgroup.h> |
9a11b49a8
|
52 |
#include <linux/debug_locks.h> |
3ac7fe5a4
|
53 |
#include <linux/debugobjects.h> |
fbb9ce953
|
54 |
#include <linux/lockdep.h> |
3c7b4e6b8
|
55 |
#include <linux/kmemleak.h> |
84d737866
|
56 |
#include <linux/pid_namespace.h> |
1f21782e6
|
57 |
#include <linux/device.h> |
73c279927
|
58 |
#include <linux/kthread.h> |
e6fe6649b
|
59 |
#include <linux/sched.h> |
a1c9eea9e
|
60 |
#include <linux/signal.h> |
199f0ca51
|
61 |
#include <linux/idr.h> |
0b4b3827d
|
62 |
#include <linux/kgdb.h> |
68bf21aa1
|
63 |
#include <linux/ftrace.h> |
22a9d6456
|
64 |
#include <linux/async.h> |
dfec072ec
|
65 |
#include <linux/kmemcheck.h> |
6ae6996a4
|
66 |
#include <linux/sfi.h> |
2b2af54a5
|
67 |
#include <linux/shmem_fs.h> |
5a0e3ad6a
|
68 |
#include <linux/slab.h> |
24a24bb6f
|
69 |
#include <linux/perf_event.h> |
1da177e4c
|
70 71 72 73 |
#include <asm/io.h> #include <asm/bugs.h> #include <asm/setup.h> |
a940199f2
|
74 |
#include <asm/sections.h> |
37b73c828
|
75 |
#include <asm/cacheflush.h> |
1da177e4c
|
76 |
|
1da177e4c
|
77 78 79 |
#ifdef CONFIG_X86_LOCAL_APIC #include <asm/smp.h> #endif |
aae5f662a
|
80 |
static int kernel_init(void *); |
1da177e4c
|
81 82 |
extern void init_IRQ(void); |
1da177e4c
|
83 84 85 |
extern void fork_init(unsigned long); extern void mca_init(void); extern void sbus_init(void); |
1da177e4c
|
86 87 88 |
extern void prio_tree_init(void); extern void radix_tree_init(void); extern void free_initmem(void); |
37b73c828
|
89 90 91 |
#ifndef CONFIG_DEBUG_RODATA static inline void mark_rodata_ro(void) { } #endif |
1da177e4c
|
92 93 94 95 |
#ifdef CONFIG_TC extern void tc_init(void); #endif |
2ce802f62
|
96 97 98 99 100 101 102 103 |
/* * Debug helper: via this flag we know that we are in 'early bootup code' * where only the boot processor is running with IRQ disabled. This means * two things - IRQ must not be enabled before the flag is cleared and some * operations which are not allowed with IRQ disabled are allowed while the * flag is set. */ bool early_boot_irqs_disabled __read_mostly; |
a68260483
|
104 |
enum system_states system_state __read_mostly; |
1da177e4c
|
105 106 107 108 109 110 111 112 113 114 |
EXPORT_SYMBOL(system_state); /* * Boot command-line arguments */ #define MAX_INIT_ARGS CONFIG_INIT_ENV_ARG_LIMIT #define MAX_INIT_ENVS CONFIG_INIT_ENV_ARG_LIMIT extern void time_init(void); /* Default late time init is NULL. archs can override this later. */ |
d2e3192b6
|
115 |
void (*__initdata late_time_init)(void); |
1da177e4c
|
116 |
extern void softirq_init(void); |
30d7e0d46
|
117 118 119 120 121 122 |
/* Untouched command line saved by arch-specific code. */ char __initdata boot_command_line[COMMAND_LINE_SIZE]; /* Untouched saved command line (eg. for /proc) */ char *saved_command_line; /* Command line for parameter parsing */ static char *static_command_line; |
1da177e4c
|
123 124 |
static char *execute_command; |
ffdfc4097
|
125 |
static char *ramdisk_execute_command; |
1da177e4c
|
126 |
|
8b3b29550
|
127 128 129 130 131 132 133 134 135 136 137 |
/* * If set, this is an indication to the drivers that reset the underlying * device before going ahead with the initialization otherwise driver might * rely on the BIOS and skip the reset operation. * * This is useful if kernel is booting in an unreliable environment. * For ex. kdump situaiton where previous kernel has crashed, BIOS has been * skipped and devices will be in unknown state. */ unsigned int reset_devices; EXPORT_SYMBOL(reset_devices); |
1da177e4c
|
138 |
|
7e96287dd
|
139 140 141 142 143 144 145 |
static int __init set_reset_devices(char *str) { reset_devices = 1; return 1; } __setup("reset_devices", set_reset_devices); |
d7627467b
|
146 147 |
static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, }; const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, }; |
1da177e4c
|
148 |
static const char *panic_later, *panic_param; |
914dcaa84
|
149 |
extern const struct obs_kernel_param __setup_start[], __setup_end[]; |
1da177e4c
|
150 151 152 |
static int __init obsolete_checksetup(char *line) { |
914dcaa84
|
153 |
const struct obs_kernel_param *p; |
33df0d19e
|
154 |
int had_early_param = 0; |
1da177e4c
|
155 156 157 158 159 160 |
p = __setup_start; do { int n = strlen(p->str); if (!strncmp(line, p->str, n)) { if (p->early) { |
33df0d19e
|
161 162 163 164 |
/* Already done in parse_early_param? * (Needs exact match on param part). * Keep iterating, as we can have early * params and __setups of same names 8( */ |
1da177e4c
|
165 |
if (line[n] == '\0' || line[n] == '=') |
33df0d19e
|
166 |
had_early_param = 1; |
1da177e4c
|
167 168 169 170 171 172 173 174 175 176 |
} else if (!p->setup_func) { printk(KERN_WARNING "Parameter %s is obsolete," " ignored ", p->str); return 1; } else if (p->setup_func(line + n)) return 1; } p++; } while (p < __setup_end); |
33df0d19e
|
177 178 |
return had_early_param; |
1da177e4c
|
179 180 181 182 183 184 185 186 187 188 189 190 |
} /* * This should be approx 2 Bo*oMips to start (note initial shift), and will * still work even if initially too large, it will just take slightly longer */ unsigned long loops_per_jiffy = (1<<12); EXPORT_SYMBOL(loops_per_jiffy); static int __init debug_kernel(char *str) { |
1da177e4c
|
191 |
console_loglevel = 10; |
f6f21c814
|
192 |
return 0; |
1da177e4c
|
193 194 195 196 |
} static int __init quiet_kernel(char *str) { |
1da177e4c
|
197 |
console_loglevel = 4; |
f6f21c814
|
198 |
return 0; |
1da177e4c
|
199 |
} |
f6f21c814
|
200 201 |
early_param("debug", debug_kernel); early_param("quiet", quiet_kernel); |
1da177e4c
|
202 203 204 205 |
static int __init loglevel(char *str) { get_option(&str, &console_loglevel); |
d9d4fcfe5
|
206 |
return 0; |
1da177e4c
|
207 |
} |
f6f21c814
|
208 |
early_param("loglevel", loglevel); |
1da177e4c
|
209 210 211 |
/* * Unknown boot options get handed to init, unless they look like |
f066a4f6d
|
212 |
* unused parameters (modprobe will find them in /proc/cmdline). |
1da177e4c
|
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 |
*/ static int __init unknown_bootoption(char *param, char *val) { /* Change NUL term back to "=", to make "param" the whole string. */ if (val) { /* param=val or param="val"? */ if (val == param+strlen(param)+1) val[-1] = '='; else if (val == param+strlen(param)+2) { val[-2] = '='; memmove(val-1, val, strlen(val)+1); val--; } else BUG(); } /* Handle obsolete-style parameters */ if (obsolete_checksetup(param)) return 0; |
f066a4f6d
|
232 233 |
/* Unused module parameter. */ if (strchr(param, '.') && (!val || strchr(param, '.') < val)) |
1da177e4c
|
234 |
return 0; |
1da177e4c
|
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 |
if (panic_later) return 0; if (val) { /* Environment option */ unsigned int i; for (i = 0; envp_init[i]; i++) { if (i == MAX_INIT_ENVS) { panic_later = "Too many boot env vars at `%s'"; panic_param = param; } if (!strncmp(param, envp_init[i], val - param)) break; } envp_init[i] = param; } else { /* Command line option */ unsigned int i; for (i = 0; argv_init[i]; i++) { if (i == MAX_INIT_ARGS) { panic_later = "Too many boot init vars at `%s'"; panic_param = param; } } argv_init[i] = param; } return 0; } |
12d6f21ea
|
264 265 266 |
#ifdef CONFIG_DEBUG_PAGEALLOC int __read_mostly debug_pagealloc_enabled = 0; #endif |
1da177e4c
|
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 |
static int __init init_setup(char *str) { unsigned int i; execute_command = str; /* * In case LILO is going to boot us with default command line, * it prepends "auto" before the whole cmdline which makes * the shell think it should execute a script with such name. * So we ignore all arguments entered _before_ init=... [MJ] */ for (i = 1; i < MAX_INIT_ARGS; i++) argv_init[i] = NULL; return 1; } __setup("init=", init_setup); |
ffdfc4097
|
283 284 285 286 287 288 289 290 291 292 293 |
static int __init rdinit_setup(char *str) { unsigned int i; ramdisk_execute_command = str; /* See "auto" comment in init_setup */ for (i = 1; i < MAX_INIT_ARGS; i++) argv_init[i] = NULL; return 1; } __setup("rdinit=", rdinit_setup); |
1da177e4c
|
294 |
#ifndef CONFIG_SMP |
34db18a05
|
295 |
static const unsigned int setup_max_cpus = NR_CPUS; |
1da177e4c
|
296 297 298 299 300 301 302 303 |
#ifdef CONFIG_X86_LOCAL_APIC static void __init smp_init(void) { APIC_init_uniprocessor(); } #else #define smp_init() do { } while (0) #endif |
e0982e90c
|
304 |
static inline void setup_nr_cpu_ids(void) { } |
1da177e4c
|
305 |
static inline void smp_prepare_cpus(unsigned int maxcpus) { } |
1da177e4c
|
306 307 308 |
#endif /* |
30d7e0d46
|
309 310 311 312 313 314 315 316 317 318 319 320 321 322 |
* We need to store the untouched command line for future reference. * We also need to store the touched command line since the parameter * parsing is performed in place, and we should allow a component to * store reference of name/value for future reference. */ static void __init setup_command_line(char *command_line) { saved_command_line = alloc_bootmem(strlen (boot_command_line)+1); static_command_line = alloc_bootmem(strlen (command_line)+1); strcpy (saved_command_line, boot_command_line); strcpy (static_command_line, command_line); } /* |
1da177e4c
|
323 324 325 326 327 328 329 |
* We need to finalize in a non-__init function or else race conditions * between the root thread and the init thread may cause start_kernel to * be reaped by free_initmem before the root thread has proceeded to * cpu_idle. * * gcc-3.4 accidentally inlines this function, so use noinline. */ |
b433c3d45
|
330 |
static __initdata DECLARE_COMPLETION(kthreadd_done); |
f99ebf0a8
|
331 |
static noinline void __init_refok rest_init(void) |
1da177e4c
|
332 |
{ |
73c279927
|
333 |
int pid; |
7db905e63
|
334 |
rcu_scheduler_starting(); |
b433c3d45
|
335 |
/* |
971585692
|
336 |
* We need to spawn init first so that it obtains pid 1, however |
b433c3d45
|
337 338 339 |
* the init task will end up wanting to create kthreads, which, if * we schedule it before we create kthreadd, will OOPS. */ |
aae5f662a
|
340 |
kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); |
1da177e4c
|
341 |
numa_default_policy(); |
73c279927
|
342 |
pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); |
d11c563dd
|
343 |
rcu_read_lock(); |
5cd204550
|
344 |
kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); |
d11c563dd
|
345 |
rcu_read_unlock(); |
b433c3d45
|
346 |
complete(&kthreadd_done); |
f340c0d1a
|
347 348 349 |
/* * The boot idle thread must execute schedule() |
1df21055e
|
350 |
* at least once to get things moving: |
f340c0d1a
|
351 |
*/ |
1df21055e
|
352 |
init_idle_bootup_task(current); |
5bfb5d690
|
353 |
preempt_enable_no_resched(); |
f340c0d1a
|
354 |
schedule(); |
288d5abec
|
355 356 357 |
/* At this point, we can enable user mode helper functionality */ usermodehelper_enable(); |
f340c0d1a
|
358 |
|
5bfb5d690
|
359 |
/* Call into cpu_idle with preempt disabled */ |
288d5abec
|
360 |
preempt_disable(); |
1da177e4c
|
361 |
cpu_idle(); |
1df21055e
|
362 |
} |
1da177e4c
|
363 364 365 366 |
/* Check for early params. */ static int __init do_early_param(char *param, char *val) { |
914dcaa84
|
367 |
const struct obs_kernel_param *p; |
1da177e4c
|
368 369 |
for (p = __setup_start; p < __setup_end; p++) { |
18a8bd949
|
370 371 372 373 |
if ((p->early && strcmp(param, p->str) == 0) || (strcmp(param, "console") == 0 && strcmp(p->str, "earlycon") == 0) ) { |
1da177e4c
|
374 375 376 377 378 379 380 381 382 |
if (p->setup_func(val) != 0) printk(KERN_WARNING "Malformed early option '%s' ", param); } } /* We accept everything at this stage. */ return 0; } |
13977091a
|
383 384 385 386 |
void __init parse_early_options(char *cmdline) { parse_args("early options", cmdline, NULL, 0, do_early_param); } |
1da177e4c
|
387 388 389 390 391 392 393 394 395 396 |
/* Arch code calls this early on, or if not, just before other parsing. */ void __init parse_early_param(void) { static __initdata int done = 0; static __initdata char tmp_cmdline[COMMAND_LINE_SIZE]; if (done) return; /* All fall through to do_early_param. */ |
30d7e0d46
|
397 |
strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE); |
13977091a
|
398 |
parse_early_options(tmp_cmdline); |
1da177e4c
|
399 400 401 402 403 404 |
done = 1; } /* * Activate the first processor. */ |
44fd22992
|
405 406 407 408 |
static void __init boot_cpu_init(void) { int cpu = smp_processor_id(); /* Mark the boot cpu "present", "online" etc for SMP and UP case */ |
915441b60
|
409 |
set_cpu_online(cpu, true); |
933b0618d
|
410 |
set_cpu_active(cpu, true); |
915441b60
|
411 412 |
set_cpu_present(cpu, true); set_cpu_possible(cpu, true); |
44fd22992
|
413 |
} |
839ad62e7
|
414 |
void __init __weak smp_setup_processor_id(void) |
033ab7f8e
|
415 416 |
{ } |
8c9843e57
|
417 418 419 |
void __init __weak thread_info_cache_init(void) { } |
444f478f6
|
420 421 422 423 424 |
/* * Set up kernel memory allocators */ static void __init mm_init(void) { |
ca371c0d7
|
425 426 427 428 429 |
/* * page_cgroup requires countinous pages as memmap * and it's bigger than MAX_ORDER unless SPARSEMEM. */ page_cgroup_init_flatmem(); |
444f478f6
|
430 431 |
mem_init(); kmem_cache_init(); |
099a19d91
|
432 |
percpu_init_late(); |
c868d5501
|
433 |
pgtable_cache_init(); |
444f478f6
|
434 435 |
vmalloc_init(); } |
1da177e4c
|
436 437 438 |
asmlinkage void __init start_kernel(void) { char * command_line; |
914dcaa84
|
439 |
extern const struct kernel_param __start___param[], __stop___param[]; |
033ab7f8e
|
440 441 |
smp_setup_processor_id(); |
fbb9ce953
|
442 443 444 445 446 |
/* * Need to run as early as possible, to initialize the * lockdep hash: */ lockdep_init(); |
3ac7fe5a4
|
447 |
debug_objects_early_init(); |
420594296
|
448 449 450 451 452 |
/* * Set up the the initial canary ASAP: */ boot_init_stack_canary(); |
ddbcc7e8e
|
453 |
cgroup_init_early(); |
fbb9ce953
|
454 455 |
local_irq_disable(); |
2ce802f62
|
456 |
early_boot_irqs_disabled = true; |
fbb9ce953
|
457 |
|
1da177e4c
|
458 459 460 461 |
/* * Interrupts are still disabled. Do necessary setups, then * enable them */ |
906568c9c
|
462 |
tick_init(); |
44fd22992
|
463 |
boot_cpu_init(); |
1da177e4c
|
464 |
page_address_init(); |
657cafa6b
|
465 |
printk(KERN_NOTICE "%s", linux_banner); |
1da177e4c
|
466 |
setup_arch(&command_line); |
cf475ad28
|
467 |
mm_init_owner(&init_mm, &init_task); |
6345d24da
|
468 |
mm_init_cpumask(&init_mm); |
30d7e0d46
|
469 |
setup_command_line(command_line); |
e0982e90c
|
470 |
setup_nr_cpu_ids(); |
d6647bdf9
|
471 |
setup_per_cpu_areas(); |
44fd22992
|
472 |
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ |
1da177e4c
|
473 |
|
1f522509c
|
474 |
build_all_zonelists(NULL); |
83b519e8b
|
475 476 477 478 479 480 481 482 483 484 485 486 |
page_alloc_init(); printk(KERN_NOTICE "Kernel command line: %s ", boot_command_line); parse_early_param(); parse_args("Booting kernel", static_command_line, __start___param, __stop___param - __start___param, &unknown_bootoption); /* * These use large bootmem allocations and must precede * kmem_cache_init() */ |
162a7e750
|
487 |
setup_log_buf(0); |
83b519e8b
|
488 |
pidhash_init(); |
83b519e8b
|
489 490 491 |
vfs_caches_init_early(); sort_main_extable(); trap_init(); |
444f478f6
|
492 |
mm_init(); |
de03c72cf
|
493 |
|
1da177e4c
|
494 495 496 497 498 499 500 501 502 503 504 |
/* * Set up the scheduler prior starting any interrupts (such as the * timer interrupt). Full topology setup happens at smp_init() * time - but meanwhile we still have a functioning scheduler. */ sched_init(); /* * Disable preemption - early bootup scheduling is extremely * fragile until we cpu_idle() for the first time. */ preempt_disable(); |
c4a68306b
|
505 506 507 508 509 510 |
if (!irqs_disabled()) { printk(KERN_WARNING "start_kernel(): bug: interrupts were " "enabled *very* early, fixing it "); local_irq_disable(); } |
9f58a205c
|
511 |
idr_init_cache(); |
24a24bb6f
|
512 |
perf_event_init(); |
1da177e4c
|
513 |
rcu_init(); |
773e3eb7b
|
514 |
radix_tree_init(); |
0b8f1efad
|
515 516 |
/* init some links before init_ISA_irqs() */ early_irq_init(); |
1da177e4c
|
517 |
init_IRQ(); |
3c7b4e6b8
|
518 |
prio_tree_init(); |
1da177e4c
|
519 |
init_timers(); |
c0a313296
|
520 |
hrtimers_init(); |
1da177e4c
|
521 |
softirq_init(); |
ad596171e
|
522 |
timekeeping_init(); |
88fecaa27
|
523 |
time_init(); |
93e028148
|
524 |
profile_init(); |
d8ad7d112
|
525 |
call_function_init(); |
93e028148
|
526 |
if (!irqs_disabled()) |
24d431d06
|
527 528 529 |
printk(KERN_CRIT "start_kernel(): bug: interrupts were " "enabled early "); |
2ce802f62
|
530 |
early_boot_irqs_disabled = false; |
93e028148
|
531 |
local_irq_enable(); |
dcce284a2
|
532 533 |
/* Interrupts are enabled now so all GFP allocations are safe. */ |
452aa6999
|
534 |
gfp_allowed_mask = __GFP_BITS_MASK; |
dcce284a2
|
535 |
|
7e85ee0c1
|
536 |
kmem_cache_init_late(); |
1da177e4c
|
537 538 539 540 541 542 543 544 545 |
/* * HACK ALERT! This is early. We're enabling the console before * we've done PCI setups etc, and console_init() must be aware of * this. But we do want output early, in case something goes wrong. */ console_init(); if (panic_later) panic(panic_later, panic_param); |
fbb9ce953
|
546 547 |
lockdep_info(); |
9a11b49a8
|
548 549 550 551 552 553 |
/* * Need to run this when irqs are enabled, because it wants * to self-test [hard/soft]-irqs on/off lock inversion bugs * too: */ locking_selftest(); |
1da177e4c
|
554 555 |
#ifdef CONFIG_BLK_DEV_INITRD if (initrd_start && !initrd_below_start_ok && |
bd673c7c3
|
556 |
page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) { |
1da177e4c
|
557 |
printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - " |
fb6624ebd
|
558 559 |
"disabling it. ", |
bd673c7c3
|
560 561 |
page_to_pfn(virt_to_page((void *)initrd_start)), min_low_pfn); |
1da177e4c
|
562 563 564 |
initrd_start = 0; } #endif |
94b6da5ab
|
565 |
page_cgroup_init(); |
a03c2a48e
|
566 |
enable_debug_pagealloc(); |
3ac7fe5a4
|
567 |
debug_objects_mem_init(); |
9b090f2da
|
568 |
kmemleak_init(); |
e7c8d5c99
|
569 |
setup_per_cpu_pageset(); |
1da177e4c
|
570 571 572 |
numa_policy_init(); if (late_time_init) late_time_init(); |
fa84e9eec
|
573 |
sched_clock_init(); |
1da177e4c
|
574 575 |
calibrate_delay(); pidmap_init(); |
1da177e4c
|
576 577 578 579 580 |
anon_vma_init(); #ifdef CONFIG_X86 if (efi_enabled) efi_enter_virtual_mode(); #endif |
8c9843e57
|
581 |
thread_info_cache_init(); |
d84f4f992
|
582 |
cred_init(); |
4481374ce
|
583 |
fork_init(totalram_pages); |
1da177e4c
|
584 585 |
proc_caches_init(); buffer_init(); |
1da177e4c
|
586 587 |
key_init(); security_init(); |
0b4b3827d
|
588 |
dbg_late_init(); |
4481374ce
|
589 |
vfs_caches_init(totalram_pages); |
1da177e4c
|
590 591 592 593 594 595 |
signals_init(); /* rootfs populating might need page-writeback */ page_writeback_init(); #ifdef CONFIG_PROC_FS proc_root_init(); #endif |
ddbcc7e8e
|
596 |
cgroup_init(); |
1da177e4c
|
597 |
cpuset_init(); |
c757249af
|
598 |
taskstats_init_early(); |
ca74e92b4
|
599 |
delayacct_init(); |
1da177e4c
|
600 601 602 603 |
check_bugs(); acpi_early_init(); /* before LAPIC and SMP init */ |
6ae6996a4
|
604 |
sfi_init_late(); |
1da177e4c
|
605 |
|
68bf21aa1
|
606 |
ftrace_init(); |
1da177e4c
|
607 608 609 |
/* Do the rest non-__init'ed, we're now alive */ rest_init(); } |
b99b87f70
|
610 611 612 613 |
/* Call all constructor functions linked into the kernel. */ static void __init do_ctors(void) { #ifdef CONFIG_CONSTRUCTORS |
196a15b4e
|
614 |
ctor_fn_t *fn = (ctor_fn_t *) __ctors_start; |
b99b87f70
|
615 |
|
196a15b4e
|
616 617 |
for (; fn < (ctor_fn_t *) __ctors_end; fn++) (*fn)(); |
b99b87f70
|
618 619 |
#endif } |
22a9d6456
|
620 |
int initcall_debug; |
d0ea3d7d2
|
621 |
core_param(initcall_debug, initcall_debug, bool, 0644); |
1da177e4c
|
622 |
|
4a683bf94
|
623 |
static char msgbuf[64]; |
4a683bf94
|
624 |
|
e44612713
|
625 |
static int __init_or_module do_one_initcall_debug(initcall_t fn) |
1da177e4c
|
626 |
{ |
742390728
|
627 |
ktime_t calltime, delta, rettime; |
30dbb20e6
|
628 629 |
unsigned long long duration; int ret; |
1da177e4c
|
630 |
|
22c5c03b4
|
631 632 633 |
printk(KERN_DEBUG "calling %pF @ %i ", fn, task_pid_nr(current)); calltime = ktime_get(); |
30dbb20e6
|
634 |
ret = fn(); |
22c5c03b4
|
635 636 637 638 639 640 |
rettime = ktime_get(); delta = ktime_sub(rettime, calltime); duration = (unsigned long long) ktime_to_ns(delta) >> 10; printk(KERN_DEBUG "initcall %pF returned %d after %lld usecs ", fn, ret, duration); |
1da177e4c
|
641 |
|
22c5c03b4
|
642 643 |
return ret; } |
e44612713
|
644 |
int __init_or_module do_one_initcall(initcall_t fn) |
22c5c03b4
|
645 646 647 648 649 650 651 652 |
{ int count = preempt_count(); int ret; if (initcall_debug) ret = do_one_initcall_debug(fn); else ret = fn(); |
8f0c45cdf
|
653 |
|
e0df154f4
|
654 |
msgbuf[0] = 0; |
e662e1cfd
|
655 |
|
30dbb20e6
|
656 657 |
if (ret && ret != -ENODEV && initcall_debug) sprintf(msgbuf, "error code %d ", ret); |
e662e1cfd
|
658 |
|
e0df154f4
|
659 |
if (preempt_count() != count) { |
a76bfd0da
|
660 |
strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf)); |
e0df154f4
|
661 |
preempt_count() = count; |
1da177e4c
|
662 |
} |
e0df154f4
|
663 |
if (irqs_disabled()) { |
a76bfd0da
|
664 |
strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf)); |
e0df154f4
|
665 666 667 |
local_irq_enable(); } if (msgbuf[0]) { |
96d746c68
|
668 669 |
printk("initcall %pF returned with %s ", fn, msgbuf); |
e0df154f4
|
670 |
} |
59f9415ff
|
671 |
|
30dbb20e6
|
672 |
return ret; |
e0df154f4
|
673 |
} |
c2147a509
|
674 |
extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[]; |
e0df154f4
|
675 676 677 |
static void __init do_initcalls(void) { |
196a15b4e
|
678 |
initcall_t *fn; |
e0df154f4
|
679 |
|
196a15b4e
|
680 681 |
for (fn = __early_initcall_end; fn < __initcall_end; fn++) do_one_initcall(*fn); |
1da177e4c
|
682 683 684 685 686 687 688 689 690 691 692 |
} /* * Ok, the machine is now initialized. None of the devices * have been touched yet, but the CPU subsystem is up and * running, and memory and process management works. * * Now we can finally start doing some real work.. */ static void __init do_basic_setup(void) { |
759ee0915
|
693 |
cpuset_init_smp(); |
1da177e4c
|
694 |
usermodehelper_init(); |
41ffe5d5c
|
695 |
shmem_init(); |
1da177e4c
|
696 |
driver_init(); |
b04c3afb2
|
697 |
init_irq_proc(); |
b99b87f70
|
698 |
do_ctors(); |
1da177e4c
|
699 700 |
do_initcalls(); } |
7babe8db9
|
701 |
static void __init do_pre_smp_initcalls(void) |
c2147a509
|
702 |
{ |
196a15b4e
|
703 |
initcall_t *fn; |
c2147a509
|
704 |
|
196a15b4e
|
705 706 |
for (fn = __initcall_start; fn < __early_initcall_end; fn++) do_one_initcall(*fn); |
c2147a509
|
707 |
} |
d7627467b
|
708 |
static void run_init_process(const char *init_filename) |
1da177e4c
|
709 710 |
{ argv_init[0] = init_filename; |
676085679
|
711 |
kernel_execve(init_filename, argv_init, envp_init); |
1da177e4c
|
712 |
} |
ee5bfa642
|
713 714 715 |
/* This is a non __init function. Force it to be noinline otherwise gcc * makes it inline to init() and it becomes part of init.text section */ |
f99ebf0a8
|
716 |
static noinline int init_post(void) |
ee5bfa642
|
717 |
{ |
22a9d6456
|
718 719 |
/* need to finish all async __init code before freeing the memory */ async_synchronize_full(); |
ee5bfa642
|
720 |
free_initmem(); |
ee5bfa642
|
721 722 723 |
mark_rodata_ro(); system_state = SYSTEM_RUNNING; numa_default_policy(); |
ee5bfa642
|
724 |
|
fae5fa44f
|
725 |
current->signal->flags |= SIGNAL_UNKILLABLE; |
ee5bfa642
|
726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 |
if (ramdisk_execute_command) { run_init_process(ramdisk_execute_command); printk(KERN_WARNING "Failed to execute %s ", ramdisk_execute_command); } /* * We try each of these until one succeeds. * * The Bourne shell can be used instead of init if we are * trying to recover a really broken machine. */ if (execute_command) { run_init_process(execute_command); printk(KERN_WARNING "Failed to execute %s. Attempting " "defaults... ", execute_command); } run_init_process("/sbin/init"); run_init_process("/etc/init"); run_init_process("/bin/init"); run_init_process("/bin/sh"); |
9a85b8d60
|
749 750 |
panic("No init found. Try passing init= option to kernel. " "See Linux Documentation/init.txt for guidance."); |
ee5bfa642
|
751 |
} |
aae5f662a
|
752 |
static int __init kernel_init(void * unused) |
1da177e4c
|
753 |
{ |
b433c3d45
|
754 755 756 757 |
/* * Wait until kthreadd is all set-up. */ wait_for_completion(&kthreadd_done); |
58568d2a8
|
758 759 760 |
/* * init can allocate pages on any node */ |
5ab116c93
|
761 |
set_mems_allowed(node_states[N_HIGH_MEMORY]); |
1da177e4c
|
762 763 764 |
/* * init can run on any cpu. */ |
1a2142afa
|
765 |
set_cpus_allowed_ptr(current, cpu_all_mask); |
1da177e4c
|
766 |
|
9ec52099e
|
767 |
cad_pid = task_pid(current); |
ca74a6f84
|
768 |
smp_prepare_cpus(setup_max_cpus); |
1da177e4c
|
769 770 |
do_pre_smp_initcalls(); |
004417a6d
|
771 |
lockup_detector_init(); |
1da177e4c
|
772 |
|
1da177e4c
|
773 774 |
smp_init(); sched_init_smp(); |
1da177e4c
|
775 |
do_basic_setup(); |
2bd3a997b
|
776 777 778 779 780 781 782 |
/* Open the /dev/console on the rootfs, this should never fail */ if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) printk(KERN_WARNING "Warning: unable to open an initial console. "); (void) sys_dup(0); (void) sys_dup(0); |
1da177e4c
|
783 784 785 786 |
/* * check if there is an early userspace init. If yes, let it do all * the work */ |
ffdfc4097
|
787 788 789 790 791 792 |
if (!ramdisk_execute_command) ramdisk_execute_command = "/init"; if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) { ramdisk_execute_command = NULL; |
1da177e4c
|
793 |
prepare_namespace(); |
ffdfc4097
|
794 |
} |
1da177e4c
|
795 796 797 798 799 800 |
/* * Ok, we have completed the initial bootup, and * we're essentially up and running. Get rid of the * initmem segments and start the user-mode stuff.. */ |
71566a0d1
|
801 |
|
ee5bfa642
|
802 803 |
init_post(); return 0; |
1da177e4c
|
804 |
} |