Blame view
init/main.c
20.2 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 |
/* * linux/init/main.c * * Copyright (C) 1991, 1992 Linus Torvalds * * GK 2/5/95 - Changed to support mounting root fs via NFS * Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96 * Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96 * Simplified starting of init: Michael A. Griffith <grif@acm.org> */ |
1da177e4c
|
11 12 13 |
#include <linux/types.h> #include <linux/module.h> #include <linux/proc_fs.h> |
1da177e4c
|
14 15 |
#include <linux/kernel.h> #include <linux/syscalls.h> |
9b5609fd7
|
16 |
#include <linux/stackprotector.h> |
1da177e4c
|
17 18 19 |
#include <linux/string.h> #include <linux/ctype.h> #include <linux/delay.h> |
1da177e4c
|
20 21 |
#include <linux/ioport.h> #include <linux/init.h> |
1da177e4c
|
22 |
#include <linux/initrd.h> |
1da177e4c
|
23 |
#include <linux/bootmem.h> |
4a7a16dc0
|
24 |
#include <linux/acpi.h> |
1da177e4c
|
25 |
#include <linux/tty.h> |
1da177e4c
|
26 27 |
#include <linux/percpu.h> #include <linux/kmod.h> |
db64fe022
|
28 |
#include <linux/vmalloc.h> |
1da177e4c
|
29 |
#include <linux/kernel_stat.h> |
d7cd56111
|
30 |
#include <linux/start_kernel.h> |
1da177e4c
|
31 |
#include <linux/security.h> |
3d4422332
|
32 |
#include <linux/smp.h> |
1da177e4c
|
33 34 35 36 37 38 39 |
#include <linux/profile.h> #include <linux/rcupdate.h> #include <linux/moduleparam.h> #include <linux/kallsyms.h> #include <linux/writeback.h> #include <linux/cpu.h> #include <linux/cpuset.h> |
ddbcc7e8e
|
40 |
#include <linux/cgroup.h> |
1da177e4c
|
41 |
#include <linux/efi.h> |
906568c9c
|
42 |
#include <linux/tick.h> |
6168a702a
|
43 |
#include <linux/interrupt.h> |
c757249af
|
44 |
#include <linux/taskstats_kern.h> |
ca74e92b4
|
45 |
#include <linux/delayacct.h> |
1da177e4c
|
46 47 48 49 |
#include <linux/unistd.h> #include <linux/rmap.h> #include <linux/mempolicy.h> #include <linux/key.h> |
b6cd0b772
|
50 |
#include <linux/buffer_head.h> |
94b6da5ab
|
51 |
#include <linux/page_cgroup.h> |
9a11b49a8
|
52 |
#include <linux/debug_locks.h> |
3ac7fe5a4
|
53 |
#include <linux/debugobjects.h> |
fbb9ce953
|
54 |
#include <linux/lockdep.h> |
3c7b4e6b8
|
55 |
#include <linux/kmemleak.h> |
84d737866
|
56 |
#include <linux/pid_namespace.h> |
1f21782e6
|
57 |
#include <linux/device.h> |
73c279927
|
58 |
#include <linux/kthread.h> |
e6fe6649b
|
59 |
#include <linux/sched.h> |
a1c9eea9e
|
60 |
#include <linux/signal.h> |
199f0ca51
|
61 |
#include <linux/idr.h> |
0b4b3827d
|
62 |
#include <linux/kgdb.h> |
68bf21aa1
|
63 |
#include <linux/ftrace.h> |
22a9d6456
|
64 |
#include <linux/async.h> |
dfec072ec
|
65 |
#include <linux/kmemcheck.h> |
6ae6996a4
|
66 |
#include <linux/sfi.h> |
2b2af54a5
|
67 |
#include <linux/shmem_fs.h> |
5a0e3ad6a
|
68 |
#include <linux/slab.h> |
24a24bb6f
|
69 |
#include <linux/perf_event.h> |
1da177e4c
|
70 71 72 73 |
#include <asm/io.h> #include <asm/bugs.h> #include <asm/setup.h> |
a940199f2
|
74 |
#include <asm/sections.h> |
37b73c828
|
75 |
#include <asm/cacheflush.h> |
1da177e4c
|
76 |
|
1da177e4c
|
77 78 79 |
#ifdef CONFIG_X86_LOCAL_APIC #include <asm/smp.h> #endif |
aae5f662a
|
80 |
static int kernel_init(void *); |
1da177e4c
|
81 82 |
extern void init_IRQ(void); |
1da177e4c
|
83 84 85 |
extern void fork_init(unsigned long); extern void mca_init(void); extern void sbus_init(void); |
1da177e4c
|
86 87 88 |
extern void prio_tree_init(void); extern void radix_tree_init(void); extern void free_initmem(void); |
37b73c828
|
89 90 91 |
#ifndef CONFIG_DEBUG_RODATA static inline void mark_rodata_ro(void) { } #endif |
1da177e4c
|
92 93 94 95 |
#ifdef CONFIG_TC extern void tc_init(void); #endif |
2ce802f62
|
96 97 98 99 100 101 102 103 |
/* * Debug helper: via this flag we know that we are in 'early bootup code' * where only the boot processor is running with IRQ disabled. This means * two things - IRQ must not be enabled before the flag is cleared and some * operations which are not allowed with IRQ disabled are allowed while the * flag is set. */ bool early_boot_irqs_disabled __read_mostly; |
a68260483
|
104 |
enum system_states system_state __read_mostly; |
1da177e4c
|
105 106 107 108 109 110 111 112 113 114 |
EXPORT_SYMBOL(system_state); /* * Boot command-line arguments */ #define MAX_INIT_ARGS CONFIG_INIT_ENV_ARG_LIMIT #define MAX_INIT_ENVS CONFIG_INIT_ENV_ARG_LIMIT extern void time_init(void); /* Default late time init is NULL. archs can override this later. */ |
d2e3192b6
|
115 |
void (*__initdata late_time_init)(void); |
1da177e4c
|
116 |
extern void softirq_init(void); |
30d7e0d46
|
117 118 119 120 121 122 |
/* Untouched command line saved by arch-specific code. */ char __initdata boot_command_line[COMMAND_LINE_SIZE]; /* Untouched saved command line (eg. for /proc) */ char *saved_command_line; /* Command line for parameter parsing */ static char *static_command_line; |
1da177e4c
|
123 124 |
static char *execute_command; |
ffdfc4097
|
125 |
static char *ramdisk_execute_command; |
1da177e4c
|
126 |
|
8b3b29550
|
127 128 129 130 131 132 133 134 135 136 137 |
/* * If set, this is an indication to the drivers that reset the underlying * device before going ahead with the initialization otherwise driver might * rely on the BIOS and skip the reset operation. * * This is useful if kernel is booting in an unreliable environment. * For ex. kdump situaiton where previous kernel has crashed, BIOS has been * skipped and devices will be in unknown state. */ unsigned int reset_devices; EXPORT_SYMBOL(reset_devices); |
1da177e4c
|
138 |
|
7e96287dd
|
139 140 141 142 143 144 145 |
static int __init set_reset_devices(char *str) { reset_devices = 1; return 1; } __setup("reset_devices", set_reset_devices); |
d7627467b
|
146 147 |
static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, }; const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, }; |
1da177e4c
|
148 |
static const char *panic_later, *panic_param; |
914dcaa84
|
149 |
extern const struct obs_kernel_param __setup_start[], __setup_end[]; |
1da177e4c
|
150 151 152 |
static int __init obsolete_checksetup(char *line) { |
914dcaa84
|
153 |
const struct obs_kernel_param *p; |
33df0d19e
|
154 |
int had_early_param = 0; |
1da177e4c
|
155 156 157 158 |
p = __setup_start; do { int n = strlen(p->str); |
b1e4d20cb
|
159 |
if (parameqn(line, p->str, n)) { |
1da177e4c
|
160 |
if (p->early) { |
33df0d19e
|
161 162 163 164 |
/* Already done in parse_early_param? * (Needs exact match on param part). * Keep iterating, as we can have early * params and __setups of same names 8( */ |
1da177e4c
|
165 |
if (line[n] == '\0' || line[n] == '=') |
33df0d19e
|
166 |
had_early_param = 1; |
1da177e4c
|
167 168 169 170 171 172 173 174 175 176 |
} else if (!p->setup_func) { printk(KERN_WARNING "Parameter %s is obsolete," " ignored ", p->str); return 1; } else if (p->setup_func(line + n)) return 1; } p++; } while (p < __setup_end); |
33df0d19e
|
177 178 |
return had_early_param; |
1da177e4c
|
179 180 181 182 183 184 185 186 187 188 189 190 |
} /* * This should be approx 2 Bo*oMips to start (note initial shift), and will * still work even if initially too large, it will just take slightly longer */ unsigned long loops_per_jiffy = (1<<12); EXPORT_SYMBOL(loops_per_jiffy); static int __init debug_kernel(char *str) { |
1da177e4c
|
191 |
console_loglevel = 10; |
f6f21c814
|
192 |
return 0; |
1da177e4c
|
193 194 195 196 |
} static int __init quiet_kernel(char *str) { |
1da177e4c
|
197 |
console_loglevel = 4; |
f6f21c814
|
198 |
return 0; |
1da177e4c
|
199 |
} |
f6f21c814
|
200 201 |
early_param("debug", debug_kernel); early_param("quiet", quiet_kernel); |
1da177e4c
|
202 203 204 |
static int __init loglevel(char *str) { |
808bf29b9
|
205 206 207 208 209 210 211 212 213 214 215 216 217 |
int newlevel; /* * Only update loglevel value when a correct setting was passed, * to prevent blind crashes (when loglevel being set to 0) that * are quite hard to debug */ if (get_option(&str, &newlevel)) { console_loglevel = newlevel; return 0; } return -EINVAL; |
1da177e4c
|
218 |
} |
f6f21c814
|
219 |
early_param("loglevel", loglevel); |
1da177e4c
|
220 221 222 |
/* * Unknown boot options get handed to init, unless they look like |
f066a4f6d
|
223 |
* unused parameters (modprobe will find them in /proc/cmdline). |
1da177e4c
|
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 |
*/ static int __init unknown_bootoption(char *param, char *val) { /* Change NUL term back to "=", to make "param" the whole string. */ if (val) { /* param=val or param="val"? */ if (val == param+strlen(param)+1) val[-1] = '='; else if (val == param+strlen(param)+2) { val[-2] = '='; memmove(val-1, val, strlen(val)+1); val--; } else BUG(); } /* Handle obsolete-style parameters */ if (obsolete_checksetup(param)) return 0; |
f066a4f6d
|
243 244 |
/* Unused module parameter. */ if (strchr(param, '.') && (!val || strchr(param, '.') < val)) |
1da177e4c
|
245 |
return 0; |
1da177e4c
|
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 |
if (panic_later) return 0; if (val) { /* Environment option */ unsigned int i; for (i = 0; envp_init[i]; i++) { if (i == MAX_INIT_ENVS) { panic_later = "Too many boot env vars at `%s'"; panic_param = param; } if (!strncmp(param, envp_init[i], val - param)) break; } envp_init[i] = param; } else { /* Command line option */ unsigned int i; for (i = 0; argv_init[i]; i++) { if (i == MAX_INIT_ARGS) { panic_later = "Too many boot init vars at `%s'"; panic_param = param; } } argv_init[i] = param; } return 0; } static int __init init_setup(char *str) { unsigned int i; execute_command = str; /* * In case LILO is going to boot us with default command line, * it prepends "auto" before the whole cmdline which makes * the shell think it should execute a script with such name. * So we ignore all arguments entered _before_ init=... [MJ] */ for (i = 1; i < MAX_INIT_ARGS; i++) argv_init[i] = NULL; return 1; } __setup("init=", init_setup); |
ffdfc4097
|
292 293 294 295 296 297 298 299 300 301 302 |
static int __init rdinit_setup(char *str) { unsigned int i; ramdisk_execute_command = str; /* See "auto" comment in init_setup */ for (i = 1; i < MAX_INIT_ARGS; i++) argv_init[i] = NULL; return 1; } __setup("rdinit=", rdinit_setup); |
1da177e4c
|
303 |
#ifndef CONFIG_SMP |
34db18a05
|
304 |
static const unsigned int setup_max_cpus = NR_CPUS; |
1da177e4c
|
305 306 307 308 309 310 311 312 |
#ifdef CONFIG_X86_LOCAL_APIC static void __init smp_init(void) { APIC_init_uniprocessor(); } #else #define smp_init() do { } while (0) #endif |
e0982e90c
|
313 |
static inline void setup_nr_cpu_ids(void) { } |
1da177e4c
|
314 |
static inline void smp_prepare_cpus(unsigned int maxcpus) { } |
1da177e4c
|
315 316 317 |
#endif /* |
30d7e0d46
|
318 319 320 321 322 323 324 325 326 327 328 329 330 331 |
* We need to store the untouched command line for future reference. * We also need to store the touched command line since the parameter * parsing is performed in place, and we should allow a component to * store reference of name/value for future reference. */ static void __init setup_command_line(char *command_line) { saved_command_line = alloc_bootmem(strlen (boot_command_line)+1); static_command_line = alloc_bootmem(strlen (command_line)+1); strcpy (saved_command_line, boot_command_line); strcpy (static_command_line, command_line); } /* |
1da177e4c
|
332 333 334 335 336 337 338 |
* We need to finalize in a non-__init function or else race conditions * between the root thread and the init thread may cause start_kernel to * be reaped by free_initmem before the root thread has proceeded to * cpu_idle. * * gcc-3.4 accidentally inlines this function, so use noinline. */ |
b433c3d45
|
339 |
static __initdata DECLARE_COMPLETION(kthreadd_done); |
f99ebf0a8
|
340 |
static noinline void __init_refok rest_init(void) |
1da177e4c
|
341 |
{ |
73c279927
|
342 |
int pid; |
7db905e63
|
343 |
rcu_scheduler_starting(); |
b433c3d45
|
344 |
/* |
971585692
|
345 |
* We need to spawn init first so that it obtains pid 1, however |
b433c3d45
|
346 347 348 |
* the init task will end up wanting to create kthreads, which, if * we schedule it before we create kthreadd, will OOPS. */ |
aae5f662a
|
349 |
kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); |
1da177e4c
|
350 |
numa_default_policy(); |
73c279927
|
351 |
pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); |
d11c563dd
|
352 |
rcu_read_lock(); |
5cd204550
|
353 |
kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); |
d11c563dd
|
354 |
rcu_read_unlock(); |
b433c3d45
|
355 |
complete(&kthreadd_done); |
f340c0d1a
|
356 357 358 |
/* * The boot idle thread must execute schedule() |
1df21055e
|
359 |
* at least once to get things moving: |
f340c0d1a
|
360 |
*/ |
1df21055e
|
361 |
init_idle_bootup_task(current); |
5bfb5d690
|
362 |
preempt_enable_no_resched(); |
f340c0d1a
|
363 |
schedule(); |
288d5abec
|
364 |
|
5bfb5d690
|
365 |
/* Call into cpu_idle with preempt disabled */ |
288d5abec
|
366 |
preempt_disable(); |
1da177e4c
|
367 |
cpu_idle(); |
1df21055e
|
368 |
} |
1da177e4c
|
369 370 371 372 |
/* Check for early params. */ static int __init do_early_param(char *param, char *val) { |
914dcaa84
|
373 |
const struct obs_kernel_param *p; |
1da177e4c
|
374 375 |
for (p = __setup_start; p < __setup_end; p++) { |
b1e4d20cb
|
376 |
if ((p->early && parameq(param, p->str)) || |
18a8bd949
|
377 378 379 |
(strcmp(param, "console") == 0 && strcmp(p->str, "earlycon") == 0) ) { |
1da177e4c
|
380 381 382 383 384 385 386 387 388 |
if (p->setup_func(val) != 0) printk(KERN_WARNING "Malformed early option '%s' ", param); } } /* We accept everything at this stage. */ return 0; } |
13977091a
|
389 390 391 392 |
void __init parse_early_options(char *cmdline) { parse_args("early options", cmdline, NULL, 0, do_early_param); } |
1da177e4c
|
393 394 395 396 397 398 399 400 401 402 |
/* Arch code calls this early on, or if not, just before other parsing. */ void __init parse_early_param(void) { static __initdata int done = 0; static __initdata char tmp_cmdline[COMMAND_LINE_SIZE]; if (done) return; /* All fall through to do_early_param. */ |
30d7e0d46
|
403 |
strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE); |
13977091a
|
404 |
parse_early_options(tmp_cmdline); |
1da177e4c
|
405 406 407 408 409 410 |
done = 1; } /* * Activate the first processor. */ |
44fd22992
|
411 412 413 414 |
static void __init boot_cpu_init(void) { int cpu = smp_processor_id(); /* Mark the boot cpu "present", "online" etc for SMP and UP case */ |
915441b60
|
415 |
set_cpu_online(cpu, true); |
933b0618d
|
416 |
set_cpu_active(cpu, true); |
915441b60
|
417 418 |
set_cpu_present(cpu, true); set_cpu_possible(cpu, true); |
44fd22992
|
419 |
} |
839ad62e7
|
420 |
void __init __weak smp_setup_processor_id(void) |
033ab7f8e
|
421 422 |
{ } |
8c9843e57
|
423 424 425 |
void __init __weak thread_info_cache_init(void) { } |
444f478f6
|
426 427 428 429 430 |
/* * Set up kernel memory allocators */ static void __init mm_init(void) { |
ca371c0d7
|
431 432 433 434 435 |
/* * page_cgroup requires countinous pages as memmap * and it's bigger than MAX_ORDER unless SPARSEMEM. */ page_cgroup_init_flatmem(); |
444f478f6
|
436 437 |
mem_init(); kmem_cache_init(); |
099a19d91
|
438 |
percpu_init_late(); |
c868d5501
|
439 |
pgtable_cache_init(); |
444f478f6
|
440 441 |
vmalloc_init(); } |
1da177e4c
|
442 443 444 |
asmlinkage void __init start_kernel(void) { char * command_line; |
914dcaa84
|
445 |
extern const struct kernel_param __start___param[], __stop___param[]; |
033ab7f8e
|
446 |
|
fbb9ce953
|
447 448 449 450 451 |
/* * Need to run as early as possible, to initialize the * lockdep hash: */ lockdep_init(); |
73839c5b2
|
452 |
smp_setup_processor_id(); |
3ac7fe5a4
|
453 |
debug_objects_early_init(); |
420594296
|
454 455 456 457 458 |
/* * Set up the the initial canary ASAP: */ boot_init_stack_canary(); |
ddbcc7e8e
|
459 |
cgroup_init_early(); |
fbb9ce953
|
460 461 |
local_irq_disable(); |
2ce802f62
|
462 |
early_boot_irqs_disabled = true; |
fbb9ce953
|
463 |
|
1da177e4c
|
464 465 466 467 |
/* * Interrupts are still disabled. Do necessary setups, then * enable them */ |
906568c9c
|
468 |
tick_init(); |
44fd22992
|
469 |
boot_cpu_init(); |
1da177e4c
|
470 |
page_address_init(); |
657cafa6b
|
471 |
printk(KERN_NOTICE "%s", linux_banner); |
1da177e4c
|
472 |
setup_arch(&command_line); |
cf475ad28
|
473 |
mm_init_owner(&init_mm, &init_task); |
6345d24da
|
474 |
mm_init_cpumask(&init_mm); |
30d7e0d46
|
475 |
setup_command_line(command_line); |
e0982e90c
|
476 |
setup_nr_cpu_ids(); |
d6647bdf9
|
477 |
setup_per_cpu_areas(); |
44fd22992
|
478 |
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ |
1da177e4c
|
479 |
|
1f522509c
|
480 |
build_all_zonelists(NULL); |
83b519e8b
|
481 482 483 484 485 486 487 488 |
page_alloc_init(); printk(KERN_NOTICE "Kernel command line: %s ", boot_command_line); parse_early_param(); parse_args("Booting kernel", static_command_line, __start___param, __stop___param - __start___param, &unknown_bootoption); |
97ce2c88f
|
489 490 |
jump_label_init(); |
83b519e8b
|
491 492 493 494 |
/* * These use large bootmem allocations and must precede * kmem_cache_init() */ |
162a7e750
|
495 |
setup_log_buf(0); |
83b519e8b
|
496 |
pidhash_init(); |
83b519e8b
|
497 498 499 |
vfs_caches_init_early(); sort_main_extable(); trap_init(); |
444f478f6
|
500 |
mm_init(); |
de03c72cf
|
501 |
|
1da177e4c
|
502 503 504 505 506 507 508 509 510 511 512 |
/* * Set up the scheduler prior starting any interrupts (such as the * timer interrupt). Full topology setup happens at smp_init() * time - but meanwhile we still have a functioning scheduler. */ sched_init(); /* * Disable preemption - early bootup scheduling is extremely * fragile until we cpu_idle() for the first time. */ preempt_disable(); |
c4a68306b
|
513 514 515 516 517 518 |
if (!irqs_disabled()) { printk(KERN_WARNING "start_kernel(): bug: interrupts were " "enabled *very* early, fixing it "); local_irq_disable(); } |
9f58a205c
|
519 |
idr_init_cache(); |
24a24bb6f
|
520 |
perf_event_init(); |
1da177e4c
|
521 |
rcu_init(); |
773e3eb7b
|
522 |
radix_tree_init(); |
0b8f1efad
|
523 524 |
/* init some links before init_ISA_irqs() */ early_irq_init(); |
1da177e4c
|
525 |
init_IRQ(); |
3c7b4e6b8
|
526 |
prio_tree_init(); |
1da177e4c
|
527 |
init_timers(); |
c0a313296
|
528 |
hrtimers_init(); |
1da177e4c
|
529 |
softirq_init(); |
ad596171e
|
530 |
timekeeping_init(); |
88fecaa27
|
531 |
time_init(); |
93e028148
|
532 |
profile_init(); |
d8ad7d112
|
533 |
call_function_init(); |
93e028148
|
534 |
if (!irqs_disabled()) |
24d431d06
|
535 536 537 |
printk(KERN_CRIT "start_kernel(): bug: interrupts were " "enabled early "); |
2ce802f62
|
538 |
early_boot_irqs_disabled = false; |
93e028148
|
539 |
local_irq_enable(); |
dcce284a2
|
540 541 |
/* Interrupts are enabled now so all GFP allocations are safe. */ |
452aa6999
|
542 |
gfp_allowed_mask = __GFP_BITS_MASK; |
dcce284a2
|
543 |
|
7e85ee0c1
|
544 |
kmem_cache_init_late(); |
1da177e4c
|
545 546 547 548 549 550 551 552 553 |
/* * HACK ALERT! This is early. We're enabling the console before * we've done PCI setups etc, and console_init() must be aware of * this. But we do want output early, in case something goes wrong. */ console_init(); if (panic_later) panic(panic_later, panic_param); |
fbb9ce953
|
554 555 |
lockdep_info(); |
9a11b49a8
|
556 557 558 559 560 561 |
/* * Need to run this when irqs are enabled, because it wants * to self-test [hard/soft]-irqs on/off lock inversion bugs * too: */ locking_selftest(); |
1da177e4c
|
562 563 |
#ifdef CONFIG_BLK_DEV_INITRD if (initrd_start && !initrd_below_start_ok && |
bd673c7c3
|
564 |
page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) { |
1da177e4c
|
565 |
printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - " |
fb6624ebd
|
566 567 |
"disabling it. ", |
bd673c7c3
|
568 569 |
page_to_pfn(virt_to_page((void *)initrd_start)), min_low_pfn); |
1da177e4c
|
570 571 572 |
initrd_start = 0; } #endif |
94b6da5ab
|
573 |
page_cgroup_init(); |
3ac7fe5a4
|
574 |
debug_objects_mem_init(); |
9b090f2da
|
575 |
kmemleak_init(); |
e7c8d5c99
|
576 |
setup_per_cpu_pageset(); |
1da177e4c
|
577 578 579 |
numa_policy_init(); if (late_time_init) late_time_init(); |
fa84e9eec
|
580 |
sched_clock_init(); |
1da177e4c
|
581 582 |
calibrate_delay(); pidmap_init(); |
1da177e4c
|
583 584 585 586 587 |
anon_vma_init(); #ifdef CONFIG_X86 if (efi_enabled) efi_enter_virtual_mode(); #endif |
8c9843e57
|
588 |
thread_info_cache_init(); |
d84f4f992
|
589 |
cred_init(); |
4481374ce
|
590 |
fork_init(totalram_pages); |
1da177e4c
|
591 592 |
proc_caches_init(); buffer_init(); |
1da177e4c
|
593 594 |
key_init(); security_init(); |
0b4b3827d
|
595 |
dbg_late_init(); |
4481374ce
|
596 |
vfs_caches_init(totalram_pages); |
1da177e4c
|
597 598 599 600 601 602 |
signals_init(); /* rootfs populating might need page-writeback */ page_writeback_init(); #ifdef CONFIG_PROC_FS proc_root_init(); #endif |
ddbcc7e8e
|
603 |
cgroup_init(); |
1da177e4c
|
604 |
cpuset_init(); |
c757249af
|
605 |
taskstats_init_early(); |
ca74e92b4
|
606 |
delayacct_init(); |
1da177e4c
|
607 608 609 610 |
check_bugs(); acpi_early_init(); /* before LAPIC and SMP init */ |
6ae6996a4
|
611 |
sfi_init_late(); |
1da177e4c
|
612 |
|
68bf21aa1
|
613 |
ftrace_init(); |
1da177e4c
|
614 615 616 |
/* Do the rest non-__init'ed, we're now alive */ rest_init(); } |
b99b87f70
|
617 618 619 620 |
/* Call all constructor functions linked into the kernel. */ static void __init do_ctors(void) { #ifdef CONFIG_CONSTRUCTORS |
196a15b4e
|
621 |
ctor_fn_t *fn = (ctor_fn_t *) __ctors_start; |
b99b87f70
|
622 |
|
196a15b4e
|
623 624 |
for (; fn < (ctor_fn_t *) __ctors_end; fn++) (*fn)(); |
b99b87f70
|
625 626 |
#endif } |
2329abfa3
|
627 |
bool initcall_debug; |
d0ea3d7d2
|
628 |
core_param(initcall_debug, initcall_debug, bool, 0644); |
1da177e4c
|
629 |
|
4a683bf94
|
630 |
static char msgbuf[64]; |
4a683bf94
|
631 |
|
e44612713
|
632 |
static int __init_or_module do_one_initcall_debug(initcall_t fn) |
1da177e4c
|
633 |
{ |
742390728
|
634 |
ktime_t calltime, delta, rettime; |
30dbb20e6
|
635 636 |
unsigned long long duration; int ret; |
1da177e4c
|
637 |
|
22c5c03b4
|
638 639 640 |
printk(KERN_DEBUG "calling %pF @ %i ", fn, task_pid_nr(current)); calltime = ktime_get(); |
30dbb20e6
|
641 |
ret = fn(); |
22c5c03b4
|
642 643 644 645 646 647 |
rettime = ktime_get(); delta = ktime_sub(rettime, calltime); duration = (unsigned long long) ktime_to_ns(delta) >> 10; printk(KERN_DEBUG "initcall %pF returned %d after %lld usecs ", fn, ret, duration); |
1da177e4c
|
648 |
|
22c5c03b4
|
649 650 |
return ret; } |
e44612713
|
651 |
int __init_or_module do_one_initcall(initcall_t fn) |
22c5c03b4
|
652 653 654 655 656 657 658 659 |
{ int count = preempt_count(); int ret; if (initcall_debug) ret = do_one_initcall_debug(fn); else ret = fn(); |
8f0c45cdf
|
660 |
|
e0df154f4
|
661 |
msgbuf[0] = 0; |
e662e1cfd
|
662 |
|
30dbb20e6
|
663 664 |
if (ret && ret != -ENODEV && initcall_debug) sprintf(msgbuf, "error code %d ", ret); |
e662e1cfd
|
665 |
|
e0df154f4
|
666 |
if (preempt_count() != count) { |
a76bfd0da
|
667 |
strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf)); |
e0df154f4
|
668 |
preempt_count() = count; |
1da177e4c
|
669 |
} |
e0df154f4
|
670 |
if (irqs_disabled()) { |
a76bfd0da
|
671 |
strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf)); |
e0df154f4
|
672 673 674 |
local_irq_enable(); } if (msgbuf[0]) { |
96d746c68
|
675 676 |
printk("initcall %pF returned with %s ", fn, msgbuf); |
e0df154f4
|
677 |
} |
59f9415ff
|
678 |
|
30dbb20e6
|
679 |
return ret; |
e0df154f4
|
680 |
} |
c2147a509
|
681 |
extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[]; |
e0df154f4
|
682 683 684 |
static void __init do_initcalls(void) { |
196a15b4e
|
685 |
initcall_t *fn; |
e0df154f4
|
686 |
|
196a15b4e
|
687 688 |
for (fn = __early_initcall_end; fn < __initcall_end; fn++) do_one_initcall(*fn); |
1da177e4c
|
689 690 691 692 693 694 695 696 697 698 699 |
} /* * Ok, the machine is now initialized. None of the devices * have been touched yet, but the CPU subsystem is up and * running, and memory and process management works. * * Now we can finally start doing some real work.. */ static void __init do_basic_setup(void) { |
759ee0915
|
700 |
cpuset_init_smp(); |
1da177e4c
|
701 |
usermodehelper_init(); |
41ffe5d5c
|
702 |
shmem_init(); |
1da177e4c
|
703 |
driver_init(); |
b04c3afb2
|
704 |
init_irq_proc(); |
b99b87f70
|
705 |
do_ctors(); |
d5767c535
|
706 |
usermodehelper_enable(); |
b0f84374b
|
707 |
do_initcalls(); |
1da177e4c
|
708 |
} |
7babe8db9
|
709 |
static void __init do_pre_smp_initcalls(void) |
c2147a509
|
710 |
{ |
196a15b4e
|
711 |
initcall_t *fn; |
c2147a509
|
712 |
|
196a15b4e
|
713 714 |
for (fn = __initcall_start; fn < __early_initcall_end; fn++) do_one_initcall(*fn); |
c2147a509
|
715 |
} |
d7627467b
|
716 |
static void run_init_process(const char *init_filename) |
1da177e4c
|
717 718 |
{ argv_init[0] = init_filename; |
676085679
|
719 |
kernel_execve(init_filename, argv_init, envp_init); |
1da177e4c
|
720 |
} |
ee5bfa642
|
721 722 723 |
/* This is a non __init function. Force it to be noinline otherwise gcc * makes it inline to init() and it becomes part of init.text section */ |
f99ebf0a8
|
724 |
static noinline int init_post(void) |
ee5bfa642
|
725 |
{ |
22a9d6456
|
726 727 |
/* need to finish all async __init code before freeing the memory */ async_synchronize_full(); |
ee5bfa642
|
728 |
free_initmem(); |
ee5bfa642
|
729 730 731 |
mark_rodata_ro(); system_state = SYSTEM_RUNNING; numa_default_policy(); |
ee5bfa642
|
732 |
|
fae5fa44f
|
733 |
current->signal->flags |= SIGNAL_UNKILLABLE; |
ee5bfa642
|
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 |
if (ramdisk_execute_command) { run_init_process(ramdisk_execute_command); printk(KERN_WARNING "Failed to execute %s ", ramdisk_execute_command); } /* * We try each of these until one succeeds. * * The Bourne shell can be used instead of init if we are * trying to recover a really broken machine. */ if (execute_command) { run_init_process(execute_command); printk(KERN_WARNING "Failed to execute %s. Attempting " "defaults... ", execute_command); } run_init_process("/sbin/init"); run_init_process("/etc/init"); run_init_process("/bin/init"); run_init_process("/bin/sh"); |
9a85b8d60
|
757 758 |
panic("No init found. Try passing init= option to kernel. " "See Linux Documentation/init.txt for guidance."); |
ee5bfa642
|
759 |
} |
aae5f662a
|
760 |
static int __init kernel_init(void * unused) |
1da177e4c
|
761 |
{ |
b433c3d45
|
762 763 764 765 |
/* * Wait until kthreadd is all set-up. */ wait_for_completion(&kthreadd_done); |
58568d2a8
|
766 767 768 |
/* * init can allocate pages on any node */ |
5ab116c93
|
769 |
set_mems_allowed(node_states[N_HIGH_MEMORY]); |
1da177e4c
|
770 771 772 |
/* * init can run on any cpu. */ |
1a2142afa
|
773 |
set_cpus_allowed_ptr(current, cpu_all_mask); |
1da177e4c
|
774 |
|
9ec52099e
|
775 |
cad_pid = task_pid(current); |
ca74a6f84
|
776 |
smp_prepare_cpus(setup_max_cpus); |
1da177e4c
|
777 778 |
do_pre_smp_initcalls(); |
004417a6d
|
779 |
lockup_detector_init(); |
1da177e4c
|
780 |
|
1da177e4c
|
781 782 |
smp_init(); sched_init_smp(); |
1da177e4c
|
783 |
do_basic_setup(); |
2bd3a997b
|
784 785 786 787 788 789 790 |
/* Open the /dev/console on the rootfs, this should never fail */ if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) printk(KERN_WARNING "Warning: unable to open an initial console. "); (void) sys_dup(0); (void) sys_dup(0); |
1da177e4c
|
791 792 793 794 |
/* * check if there is an early userspace init. If yes, let it do all * the work */ |
ffdfc4097
|
795 796 797 798 799 800 |
if (!ramdisk_execute_command) ramdisk_execute_command = "/init"; if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) { ramdisk_execute_command = NULL; |
1da177e4c
|
801 |
prepare_namespace(); |
ffdfc4097
|
802 |
} |
1da177e4c
|
803 804 805 806 807 808 |
/* * Ok, we have completed the initial bootup, and * we're essentially up and running. Get rid of the * initmem segments and start the user-mode stuff.. */ |
71566a0d1
|
809 |
|
ee5bfa642
|
810 811 |
init_post(); return 0; |
1da177e4c
|
812 |
} |