Commit e66ac3f26aef131f5ca60350d25fba95f43acd0d

Authored by Simon Horman
Committed by Paul Mundt
1 parent d11584a044

sh: kexec: Add PHYSICAL_START

Add PHYSICAL_START kernel configuration parameter to set the address at
which the kernel should be loaded.

It has been observed on an sh7757lcr that simply modifying MEMORY_START
does not achieve this goal for 32bit sh. This is due to MEMORY_OFFSET in
arch/sh/kernel/vmlinux.lds.S bot being based on MEMORY_START on such
systems.

Signed-off-by: Simon Horman <horms@verge.net.au>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>

Showing 5 changed files with 31 additions and 8 deletions Side-by-side Diff

... ... @@ -645,7 +645,7 @@
645 645 a specially reserved region and then later executed after
646 646 a crash by kdump/kexec. The crash dump kernel must be compiled
647 647 to a memory address not used by the main kernel using
648   - MEMORY_START.
  648 + PHYSICAL_START.
649 649  
650 650 For more details see Documentation/kdump/kdump.txt
651 651  
... ... @@ -655,6 +655,17 @@
655 655 help
656 656 Jump between original kernel and kexeced kernel and invoke
657 657 code via KEXEC
  658 +
  659 +config PHYSICAL_START
  660 + hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
  661 + default MEMORY_START
  662 + ---help---
  663 + This gives the physical address where the kernel is loaded
  664 + and is ordinarily the same as MEMORY_START.
  665 +
  666 + Different values are primarily used in the case of kexec on panic
  667 + where the fail safe kernel needs to run at a different address
  668 + than the panic-ed kernel.
658 669  
659 670 config SECCOMP
660 671 bool "Enable seccomp to safely compute untrusted bytecode"
arch/sh/boot/Makefile
... ... @@ -19,6 +19,7 @@
19 19 CONFIG_BOOT_LINK_OFFSET ?= 0x00800000
20 20 CONFIG_ZERO_PAGE_OFFSET ?= 0x00001000
21 21 CONFIG_ENTRY_OFFSET ?= 0x00001000
  22 +CONFIG_PHYSICAL_START ?= $(CONFIG_MEMORY_START)
22 23  
23 24 suffix-y := bin
24 25 suffix-$(CONFIG_KERNEL_GZIP) := gz
... ... @@ -48,7 +49,7 @@
48 49 $(Q)$(MAKE) $(build)=$(obj)/romimage $@
49 50  
50 51 KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \
51   - $$[$(CONFIG_MEMORY_START) & 0x1fffffff]')
  52 + $$[$(CONFIG_PHYSICAL_START) & 0x1fffffff]')
52 53  
53 54 KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \
54 55 $$[$(CONFIG_PAGE_OFFSET) + \
... ... @@ -114,5 +115,6 @@
114 115 @echo ' Image $@ is ready'
115 116  
116 117 export CONFIG_PAGE_OFFSET CONFIG_MEMORY_START CONFIG_BOOT_LINK_OFFSET \
117   - CONFIG_ZERO_PAGE_OFFSET CONFIG_ENTRY_OFFSET KERNEL_MEMORY suffix-y
  118 + CONFIG_PHYSICAL_START CONFIG_ZERO_PAGE_OFFSET CONFIG_ENTRY_OFFSET \
  119 + KERNEL_MEMORY suffix-y
arch/sh/include/asm/page.h
... ... @@ -113,6 +113,16 @@
113 113 #define __MEMORY_SIZE CONFIG_MEMORY_SIZE
114 114  
115 115 /*
  116 + * PHYSICAL_OFFSET is the offset in physical memory where the base
  117 + * of the kernel is loaded.
  118 + */
  119 +#ifdef CONFIG_PHYSICAL_START
  120 +#define PHYSICAL_OFFSET (CONFIG_PHYSICAL_START - __MEMORY_START)
  121 +#else
  122 +#define PHYSICAL_OFFSET 0
  123 +#endif
  124 +
  125 +/*
116 126 * PAGE_OFFSET is the virtual address of the start of kernel address
117 127 * space.
118 128 */
arch/sh/kernel/vmlinux.lds.S
... ... @@ -23,7 +23,7 @@
23 23 ENTRY(_start)
24 24 SECTIONS
25 25 {
26   - . = PAGE_OFFSET + MEMORY_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
  26 + . = PAGE_OFFSET + MEMORY_OFFSET + PHYSICAL_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
27 27  
28 28 _text = .; /* Text and read-only data */
29 29  
... ... @@ -287,6 +287,8 @@
287 287 static void __init early_reserve_mem(void)
288 288 {
289 289 unsigned long start_pfn;
  290 + u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
  291 + u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
290 292  
291 293 /*
292 294 * Partially used pages are not usable - thus
293 295  
... ... @@ -300,15 +302,13 @@
300 302 * this catches the (definitely buggy) case of us accidentally
301 303 * initializing the bootmem allocator with an invalid RAM area.
302 304 */
303   - memblock_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
304   - (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
305   - (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
  305 + memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
306 306  
307 307 /*
308 308 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
309 309 */
310 310 if (CONFIG_ZERO_PAGE_OFFSET != 0)
311   - memblock_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
  311 + memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
312 312  
313 313 /*
314 314 * Handle additional early reservations