Commit 0536bdf33faff4d940ac094c77998cfac368cfff

Authored by Nicolas Pitre
Committed by Nicolas Pitre
1 parent be9b7335e7

ARM: move iotable mappings within the vmalloc region

In order to remove the build time variation between different SOCs with
regards to VMALLOC_END, the iotable mappings are now allocated inside
the vmalloc region.  This allows for VMALLOC_END to be identical across
all machines.

The value for VMALLOC_END is now set to 0xff000000 which is right where
the consistent DMA area starts.

To accommodate all static mappings on machines with possible highmem usage,
the default vmalloc area size is changed to 240 MB so that VMALLOC_START
is no higher than 0xf0000000 by default.

Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Stephen Warren <swarren@nvidia.com>
Tested-by: Kevin Hilman <khilman@ti.com>
Tested-by: Jamie Iles <jamie@jamieiles.com>

Showing 3 changed files with 41 additions and 27 deletions Side-by-side Diff

Documentation/arm/memory.txt
... ... @@ -51,15 +51,14 @@
51 51 ff000000 ffbfffff Reserved for future expansion of DMA
52 52 mapping region.
53 53  
54   -VMALLOC_END feffffff Free for platform use, recommended.
55   - VMALLOC_END must be aligned to a 2MB
56   - boundary.
57   -
58 54 VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space.
59 55 Memory returned by vmalloc/ioremap will
60 56 be dynamically placed in this region.
61   - VMALLOC_START may be based upon the value
62   - of the high_memory variable.
  57 + Machine specific static mappings are also
  58 + located here through iotable_init().
  59 + VMALLOC_START is based upon the value
  60 + of the high_memory variable, and VMALLOC_END
  61 + is equal to 0xff000000.
63 62  
64 63 PAGE_OFFSET high_memory-1 Kernel direct-mapped RAM region.
65 64 This maps the platforms RAM, and typically
arch/arm/include/asm/pgtable.h
... ... @@ -21,7 +21,6 @@
21 21 #else
22 22  
23 23 #include <asm/memory.h>
24   -#include <mach/vmalloc.h>
25 24 #include <asm/pgtable-hwdef.h>
26 25  
27 26 #include <asm/pgtable-2level.h>
28 27  
29 28  
... ... @@ -33,15 +32,10 @@
33 32 * any out-of-bounds memory accesses will hopefully be caught.
34 33 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
35 34 * area for the same reason. ;)
36   - *
37   - * Note that platforms may override VMALLOC_START, but they must provide
38   - * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space,
39   - * which may not overlap IO space.
40 35 */
41   -#ifndef VMALLOC_START
42 36 #define VMALLOC_OFFSET (8*1024*1024)
43 37 #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
44   -#endif
  38 +#define VMALLOC_END 0xff000000UL
45 39  
46 40 #define LIBRARY_TEXT_START 0x0c000000
47 41  
... ... @@ -15,6 +15,7 @@
15 15 #include <linux/nodemask.h>
16 16 #include <linux/memblock.h>
17 17 #include <linux/fs.h>
  18 +#include <linux/vmalloc.h>
18 19  
19 20 #include <asm/cputype.h>
20 21 #include <asm/sections.h>
21 22  
22 23  
... ... @@ -529,13 +530,18 @@
529 530  
530 531 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
531 532  
532   -static void __init *early_alloc(unsigned long sz)
  533 +static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
533 534 {
534   - void *ptr = __va(memblock_alloc(sz, sz));
  535 + void *ptr = __va(memblock_alloc(sz, align));
535 536 memset(ptr, 0, sz);
536 537 return ptr;
537 538 }
538 539  
  540 +static void __init *early_alloc(unsigned long sz)
  541 +{
  542 + return early_alloc_aligned(sz, sz);
  543 +}
  544 +
539 545 static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
540 546 {
541 547 if (pmd_none(*pmd)) {
542 548  
... ... @@ -685,9 +691,10 @@
685 691 }
686 692  
687 693 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
688   - md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
  694 + md->virtual >= PAGE_OFFSET &&
  695 + (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
689 696 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
690   - " at 0x%08lx overlaps vmalloc space\n",
  697 + " at 0x%08lx out of vmalloc space\n",
691 698 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
692 699 }
693 700  
694 701  
695 702  
696 703  
... ... @@ -729,18 +736,32 @@
729 736 */
730 737 void __init iotable_init(struct map_desc *io_desc, int nr)
731 738 {
732   - int i;
  739 + struct map_desc *md;
  740 + struct vm_struct *vm;
733 741  
734   - for (i = 0; i < nr; i++)
735   - create_mapping(io_desc + i);
  742 + if (!nr)
  743 + return;
  744 +
  745 + vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
  746 +
  747 + for (md = io_desc; nr; md++, nr--) {
  748 + create_mapping(md);
  749 + vm->addr = (void *)(md->virtual & PAGE_MASK);
  750 + vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
  751 + vm->phys_addr = __pfn_to_phys(md->pfn);
  752 + vm->flags = VM_IOREMAP;
  753 + vm->caller = iotable_init;
  754 + vm_area_add_early(vm++);
  755 + }
736 756 }
737 757  
738   -static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
  758 +static void * __initdata vmalloc_min =
  759 + (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
739 760  
740 761 /*
741 762 * vmalloc=size forces the vmalloc area to be exactly 'size'
742 763 * bytes. This can be used to increase (or decrease) the vmalloc
743   - * area - the default is 128m.
  764 + * area - the default is 240m.
744 765 */
745 766 static int __init early_vmalloc(char *arg)
746 767 {
747 768  
... ... @@ -891,10 +912,10 @@
891 912  
892 913 /*
893 914 * Clear out all the kernel space mappings, except for the first
894   - * memory bank, up to the end of the vmalloc region.
  915 + * memory bank, up to the vmalloc region.
895 916 */
896 917 for (addr = __phys_to_virt(end);
897   - addr < VMALLOC_END; addr += PMD_SIZE)
  918 + addr < VMALLOC_START; addr += PMD_SIZE)
898 919 pmd_clear(pmd_off_k(addr));
899 920 }
900 921  
... ... @@ -921,8 +942,8 @@
921 942 }
922 943  
923 944 /*
924   - * Set up device the mappings. Since we clear out the page tables for all
925   - * mappings above VMALLOC_END, we will remove any debug device mappings.
  945 + * Set up the device mappings. Since we clear out the page tables for all
  946 + * mappings above VMALLOC_START, we will remove any debug device mappings.
926 947 * This means you have to be careful how you debug this function, or any
927 948 * called function. This means you can't use any function or debugging
928 949 * method which may touch any device, otherwise the kernel _will_ crash.
... ... @@ -937,7 +958,7 @@
937 958 */
938 959 vectors_page = early_alloc(PAGE_SIZE);
939 960  
940   - for (addr = VMALLOC_END; addr; addr += PMD_SIZE)
  961 + for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
941 962 pmd_clear(pmd_off_k(addr));
942 963  
943 964 /*