Commit 30eebb54b13ef198a3f1a143ee9dd68f295c60de

Authored by Linus Torvalds

Merge branch 'next' of git://git.monstr.eu/linux-2.6-microblaze

Pull arch/microblaze fixes from Michal Simek

* 'next' of git://git.monstr.eu/linux-2.6-microblaze:
  microblaze: Handle TLB skip size dynamically
  microblaze: Introduce TLB skip size
  microblaze: Improve TLB calculation for small systems
  microblaze: Extend space for compiled-in FDT to 32kB
  microblaze: Clear all MSR flags on the first kernel instruction
  microblaze: Use node name instead of compatible string
  microblaze: Fix mapin_ram function
  microblaze: Highmem support
  microblaze: Use active regions
  microblaze: Show more detailed information about memory
  microblaze: Introduce fixmap
  microblaze: mm: Fix lowmem max memory size limits
  microblaze: mm: Use ZONE_DMA instead of ZONE_NORMAL
  microblaze: trivial: Fix typo fault in timer.c
  microblaze: Use vsprintf extention %pf with builtin_return_address
  microblaze: Add PVR version string for MB 8.20.b and 8.30.a
  microblaze: Fix makefile to work with latest toolchain
  microblaze: Fix typo in early_printk.c

Showing 23 changed files Side-by-side Diff

arch/microblaze/Kconfig
1 1 config MICROBLAZE
2 2 def_bool y
3 3 select HAVE_MEMBLOCK
  4 + select HAVE_MEMBLOCK_NODE_MAP
4 5 select HAVE_FUNCTION_TRACER
5 6 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
6 7 select HAVE_FUNCTION_GRAPH_TRACER
... ... @@ -28,6 +29,12 @@
28 29 config RWSEM_GENERIC_SPINLOCK
29 30 def_bool y
30 31  
  32 +config ZONE_DMA
  33 + def_bool y
  34 +
  35 +config ARCH_POPULATES_NODE_MAP
  36 + def_bool y
  37 +
31 38 config RWSEM_XCHGADD_ALGORITHM
32 39 bool
33 40  
34 41  
35 42  
36 43  
... ... @@ -153,21 +160,19 @@
153 160 The feature requires the design to define the RAM memory controller
154 161 window to be twice as large as the actual physical memory.
155 162  
156   -config HIGHMEM_START_BOOL
157   - bool "Set high memory pool address"
158   - depends on ADVANCED_OPTIONS && HIGHMEM
  163 +config HIGHMEM
  164 + bool "High memory support"
  165 + depends on MMU
159 166 help
160   - This option allows you to set the base address of the kernel virtual
161   - area used to map high memory pages. This can be useful in
162   - optimizing the layout of kernel virtual memory.
  167 + The address space of Microblaze processors is only 4 Gigabytes large
  168 + and it has to accommodate user address space, kernel address
  169 + space as well as some memory mapped IO. That means that, if you
  170 + have a large amount of physical memory and/or IO, not all of the
  171 + memory can be "permanently mapped" by the kernel. The physical
  172 + memory that is not permanently mapped is called "high memory".
163 173  
164   - Say N here unless you know what you are doing.
  174 + If unsure, say n.
165 175  
166   -config HIGHMEM_START
167   - hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL
168   - depends on MMU
169   - default "0xfe000000"
170   -
171 176 config LOWMEM_SIZE_BOOL
172 177 bool "Set maximum low memory"
173 178 depends on ADVANCED_OPTIONS && MMU
... ... @@ -254,6 +259,10 @@
254 259 bool "32k page size"
255 260  
256 261 endchoice
  262 +
  263 +config KERNEL_PAD
  264 + hex "Kernel PAD for unpacking" if ADVANCED_OPTIONS
  265 + default "0x80000" if MMU
257 266  
258 267 endmenu
259 268  
arch/microblaze/boot/Makefile
... ... @@ -8,7 +8,7 @@
8 8  
9 9 targets := linux.bin linux.bin.gz simpleImage.%
10 10  
11   -OBJCOPYFLAGS := -O binary
  11 +OBJCOPYFLAGS := -R .note -R .comment -R .note.gnu.build-id -O binary
12 12  
13 13 # Ensure system.dtb exists
14 14 $(obj)/linked_dtb.o: $(obj)/system.dtb
arch/microblaze/include/asm/fixmap.h
  1 +/*
  2 + * fixmap.h: compile-time virtual memory allocation
  3 + *
  4 + * This file is subject to the terms and conditions of the GNU General Public
  5 + * License. See the file "COPYING" in the main directory of this archive
  6 + * for more details.
  7 + *
  8 + * Copyright (C) 1998 Ingo Molnar
  9 + *
  10 + * Copyright 2008 Freescale Semiconductor Inc.
  11 + * Port to powerpc added by Kumar Gala
  12 + *
  13 + * Copyright 2011 Michal Simek <monstr@monstr.eu>
  14 + * Copyright 2011 PetaLogix Qld Pty Ltd
  15 + * Port to Microblaze
  16 + */
  17 +
  18 +#ifndef _ASM_FIXMAP_H
  19 +#define _ASM_FIXMAP_H
  20 +
  21 +#ifndef __ASSEMBLY__
  22 +#include <linux/kernel.h>
  23 +#include <asm/page.h>
  24 +#ifdef CONFIG_HIGHMEM
  25 +#include <linux/threads.h>
  26 +#include <asm/kmap_types.h>
  27 +#endif
  28 +
  29 +#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
  30 +
  31 +/*
  32 + * Here we define all the compile-time 'special' virtual
  33 + * addresses. The point is to have a constant address at
  34 + * compile time, but to set the physical address only
  35 + * in the boot process. We allocate these special addresses
  36 + * from the end of virtual memory (0xfffff000) backwards.
  37 + * Also this lets us do fail-safe vmalloc(), we
  38 + * can guarantee that these special addresses and
  39 + * vmalloc()-ed addresses never overlap.
  40 + *
  41 + * these 'compile-time allocated' memory buffers are
  42 + * fixed-size 4k pages. (or larger if used with an increment
  43 + * highger than 1) use fixmap_set(idx,phys) to associate
  44 + * physical memory with fixmap indices.
  45 + *
  46 + * TLB entries of such buffers will not be flushed across
  47 + * task switches.
  48 + */
  49 +enum fixed_addresses {
  50 + FIX_HOLE,
  51 +#ifdef CONFIG_HIGHMEM
  52 + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
  53 + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * num_possible_cpus()) - 1,
  54 +#endif
  55 + __end_of_fixed_addresses
  56 +};
  57 +
  58 +extern void __set_fixmap(enum fixed_addresses idx,
  59 + phys_addr_t phys, pgprot_t flags);
  60 +
  61 +#define set_fixmap(idx, phys) \
  62 + __set_fixmap(idx, phys, PAGE_KERNEL)
  63 +/*
  64 + * Some hardware wants to get fixmapped without caching.
  65 + */
  66 +#define set_fixmap_nocache(idx, phys) \
  67 + __set_fixmap(idx, phys, PAGE_KERNEL_CI)
  68 +
  69 +#define clear_fixmap(idx) \
  70 + __set_fixmap(idx, 0, __pgprot(0))
  71 +
  72 +#define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
  73 +#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
  74 +
  75 +#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
  76 +#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
  77 +
  78 +extern void __this_fixmap_does_not_exist(void);
  79 +
  80 +/*
  81 + * 'index to address' translation. If anyone tries to use the idx
  82 + * directly without tranlation, we catch the bug with a NULL-deference
  83 + * kernel oops. Illegal ranges of incoming indices are caught too.
  84 + */
  85 +static __always_inline unsigned long fix_to_virt(const unsigned int idx)
  86 +{
  87 + /*
  88 + * this branch gets completely eliminated after inlining,
  89 + * except when someone tries to use fixaddr indices in an
  90 + * illegal way. (such as mixing up address types or using
  91 + * out-of-range indices).
  92 + *
  93 + * If it doesn't get removed, the linker will complain
  94 + * loudly with a reasonably clear error message..
  95 + */
  96 + if (idx >= __end_of_fixed_addresses)
  97 + __this_fixmap_does_not_exist();
  98 +
  99 + return __fix_to_virt(idx);
  100 +}
  101 +
  102 +static inline unsigned long virt_to_fix(const unsigned long vaddr)
  103 +{
  104 + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
  105 + return __virt_to_fix(vaddr);
  106 +}
  107 +
  108 +#endif /* !__ASSEMBLY__ */
  109 +#endif
arch/microblaze/include/asm/highmem.h
  1 +/*
  2 + * highmem.h: virtual kernel memory mappings for high memory
  3 + *
  4 + * Used in CONFIG_HIGHMEM systems for memory pages which
  5 + * are not addressable by direct kernel virtual addresses.
  6 + *
  7 + * Copyright (C) 1999 Gerhard Wichert, Siemens AG
  8 + * Gerhard.Wichert@pdb.siemens.de
  9 + *
  10 + *
  11 + * Redesigned the x86 32-bit VM architecture to deal with
  12 + * up to 16 Terabyte physical memory. With current x86 CPUs
  13 + * we now support up to 64 Gigabytes physical RAM.
  14 + *
  15 + * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
  16 + */
  17 +#ifndef _ASM_HIGHMEM_H
  18 +#define _ASM_HIGHMEM_H
  19 +
  20 +#ifdef __KERNEL__
  21 +
  22 +#include <linux/init.h>
  23 +#include <linux/interrupt.h>
  24 +#include <linux/uaccess.h>
  25 +#include <asm/fixmap.h>
  26 +
  27 +extern pte_t *kmap_pte;
  28 +extern pgprot_t kmap_prot;
  29 +extern pte_t *pkmap_page_table;
  30 +
  31 +/*
  32 + * Right now we initialize only a single pte table. It can be extended
  33 + * easily, subsequent pte tables have to be allocated in one physical
  34 + * chunk of RAM.
  35 + */
  36 +/*
  37 + * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte
  38 + * table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP
  39 + * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP
  40 + * in case of 16K/64K/256K page sizes.
  41 + */
  42 +
  43 +#define PKMAP_ORDER PTE_SHIFT
  44 +#define LAST_PKMAP (1 << PKMAP_ORDER)
  45 +
  46 +#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
  47 + & PMD_MASK)
  48 +
  49 +#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
  50 +#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
  51 +#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
  52 +
  53 +extern void *kmap_high(struct page *page);
  54 +extern void kunmap_high(struct page *page);
  55 +extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
  56 +extern void __kunmap_atomic(void *kvaddr);
  57 +
  58 +static inline void *kmap(struct page *page)
  59 +{
  60 + might_sleep();
  61 + if (!PageHighMem(page))
  62 + return page_address(page);
  63 + return kmap_high(page);
  64 +}
  65 +
  66 +static inline void kunmap(struct page *page)
  67 +{
  68 + BUG_ON(in_interrupt());
  69 + if (!PageHighMem(page))
  70 + return;
  71 + kunmap_high(page);
  72 +}
  73 +
  74 +static inline void *__kmap_atomic(struct page *page)
  75 +{
  76 + return kmap_atomic_prot(page, kmap_prot);
  77 +}
  78 +
  79 +static inline struct page *kmap_atomic_to_page(void *ptr)
  80 +{
  81 + unsigned long idx, vaddr = (unsigned long) ptr;
  82 + pte_t *pte;
  83 +
  84 + if (vaddr < FIXADDR_START)
  85 + return virt_to_page(ptr);
  86 +
  87 + idx = virt_to_fix(vaddr);
  88 + pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
  89 + return pte_page(*pte);
  90 +}
  91 +
  92 +#define flush_cache_kmaps() { flush_icache(); flush_dcache(); }
  93 +
  94 +#endif /* __KERNEL__ */
  95 +
  96 +#endif /* _ASM_HIGHMEM_H */
arch/microblaze/include/asm/mmu.h
... ... @@ -56,6 +56,12 @@
56 56  
57 57 extern void _tlbie(unsigned long va); /* invalidate a TLB entry */
58 58 extern void _tlbia(void); /* invalidate all TLB entries */
  59 +
  60 +/*
  61 + * tlb_skip size stores actual number skipped TLBs from TLB0 - every directy TLB
  62 + * mapping has to increase tlb_skip size.
  63 + */
  64 +extern u32 tlb_skip;
59 65 # endif /* __ASSEMBLY__ */
60 66  
61 67 /*
... ... @@ -68,6 +74,12 @@
68 74 */
69 75  
70 76 # define MICROBLAZE_TLB_SIZE 64
  77 +
  78 +/* For cases when you want to skip some TLB entries */
  79 +# define MICROBLAZE_TLB_SKIP 0
  80 +
  81 +/* Use the last TLB for temporary access to LMB */
  82 +# define MICROBLAZE_LMB_TLB_ID 63
71 83  
72 84 /*
73 85 * TLB entries are defined by a "high" tag portion and a "low" data
arch/microblaze/include/asm/page.h
... ... @@ -135,8 +135,8 @@
135 135 extern unsigned long max_pfn;
136 136  
137 137 extern unsigned long memory_start;
138   -extern unsigned long memory_end;
139 138 extern unsigned long memory_size;
  139 +extern unsigned long lowmem_size;
140 140  
141 141 extern int page_is_ram(unsigned long pfn);
142 142  
arch/microblaze/include/asm/pgtable.h
... ... @@ -94,8 +94,7 @@
94 94 /* Start and end of the vmalloc area. */
95 95 /* Make sure to map the vmalloc area above the pinned kernel memory area
96 96 of 32Mb. */
97   -#define VMALLOC_START (CONFIG_KERNEL_START + \
98   - max(32 * 1024 * 1024UL, memory_size))
  97 +#define VMALLOC_START (CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE)
99 98 #define VMALLOC_END ioremap_bot
100 99  
101 100 #endif /* __ASSEMBLY__ */
arch/microblaze/include/asm/setup.h
... ... @@ -39,7 +39,8 @@
39 39 void time_init(void);
40 40 void init_IRQ(void);
41 41 void machine_early_init(const char *cmdline, unsigned int ram,
42   - unsigned int fdt, unsigned int msr);
  42 + unsigned int fdt, unsigned int msr, unsigned int tlb0,
  43 + unsigned int tlb1);
43 44  
44 45 void machine_restart(char *cmd);
45 46 void machine_shutdown(void);
arch/microblaze/include/asm/system.h
... ... @@ -83,6 +83,7 @@
83 83 void free_init_pages(char *what, unsigned long begin, unsigned long end);
84 84 void free_initmem(void);
85 85 extern char *klimit;
  86 +extern unsigned long kernel_tlb;
86 87 extern void ret_from_fork(void);
87 88  
88 89 extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
arch/microblaze/include/asm/uaccess.h
... ... @@ -80,7 +80,7 @@
80 80 static inline int ___range_ok(unsigned long addr, unsigned long size)
81 81 {
82 82 return ((addr < memory_start) ||
83   - ((addr + size) > memory_end));
  83 + ((addr + size - 1) > (memory_start + memory_size - 1)));
84 84 }
85 85  
86 86 #define __range_ok(addr, size) \
arch/microblaze/kernel/cpu/cpuinfo.c
... ... @@ -35,6 +35,8 @@
35 35 {"8.00.b", 0x13},
36 36 {"8.10.a", 0x14},
37 37 {"8.20.a", 0x15},
  38 + {"8.20.b", 0x16},
  39 + {"8.30.a", 0x17},
38 40 {NULL, 0},
39 41 };
40 42  
arch/microblaze/kernel/early_printk.c
... ... @@ -171,10 +171,24 @@
171 171 {
172 172 if (!early_console_initialized || !early_console)
173 173 return;
174   - printk(KERN_INFO "early_printk_console remaping from 0x%x to ",
  174 + printk(KERN_INFO "early_printk_console remapping from 0x%x to ",
175 175 base_addr);
176 176 base_addr = (u32) ioremap(base_addr, PAGE_SIZE);
177 177 printk(KERN_CONT "0x%x\n", base_addr);
  178 +
  179 + /*
  180 + * Early console is on the top of skipped TLB entries
  181 + * decrease tlb_skip size ensure that hardcoded TLB entry will be
  182 + * used by generic algorithm
  183 + * FIXME check if early console mapping is on the top by rereading
  184 + * TLB entry and compare baseaddr
  185 + * mts rtlbx, (tlb_skip - 1)
  186 + * nop
  187 + * mfs rX, rtlblo
  188 + * nop
  189 + * cmp rX, orig_base_addr
  190 + */
  191 + tlb_skip -= 1;
178 192 }
179 193  
180 194 void __init disable_early_printk(void)
arch/microblaze/kernel/head.S
... ... @@ -63,9 +63,7 @@
63 63 real_start:
64 64 #endif
65 65  
66   - mfs r1, rmsr
67   - andi r1, r1, ~2
68   - mts rmsr, r1
  66 + mts rmsr, r0
69 67 /*
70 68 * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc'
71 69 * if the msrclr instruction is not enabled. We use this to detect
... ... @@ -73,6 +71,7 @@
73 71 * r8 == 0 - msr instructions are implemented
74 72 * r8 != 0 - msr instructions are not implemented
75 73 */
  74 + mfs r1, rmsr
76 75 msrclr r8, 0 /* clear nothing - just read msr for test */
77 76 cmpu r8, r8, r1 /* r1 must contain msr reg content */
78 77  
... ... @@ -96,7 +95,7 @@
96 95 _prepare_copy_fdt:
97 96 or r11, r0, r0 /* incremment */
98 97 ori r4, r0, TOPHYS(_fdt_start)
99   - ori r3, r0, (0x4000 - 4)
  98 + ori r3, r0, (0x8000 - 4)
100 99 _copy_fdt:
101 100 lw r12, r7, r11 /* r12 = r7 + r11 */
102 101 sw r12, r4, r11 /* addr[r4 + r11] = r12 */
... ... @@ -150,6 +149,7 @@
150 149 _invalidate:
151 150 mts rtlbx, r3
152 151 mts rtlbhi, r0 /* flush: ensure V is clear */
  152 + mts rtlblo, r0
153 153 bgtid r3, _invalidate /* loop for all entries */
154 154 addik r3, r3, -1
155 155 /* sync */
... ... @@ -169,6 +169,53 @@
169 169 addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
170 170 tophys(r4,r3) /* Load the kernel physical address */
171 171  
  172 + /* start to do TLB calculation */
  173 + addik r12, r0, _end
  174 + rsub r12, r3, r12
  175 + addik r12, r12, CONFIG_KERNEL_PAD /* that's the pad */
  176 +
  177 + or r9, r0, r0 /* TLB0 = 0 */
  178 + or r10, r0, r0 /* TLB1 = 0 */
  179 +
  180 + addik r11, r12, -0x1000000
  181 + bgei r11, GT16 /* size is greater than 16MB */
  182 + addik r11, r12, -0x0800000
  183 + bgei r11, GT8 /* size is greater than 8MB */
  184 + addik r11, r12, -0x0400000
  185 + bgei r11, GT4 /* size is greater than 4MB */
  186 + /* size is less than 4MB */
  187 + addik r11, r12, -0x0200000
  188 + bgei r11, GT2 /* size is greater than 2MB */
  189 + addik r9, r0, 0x0100000 /* TLB0 must be 1MB */
  190 + addik r11, r12, -0x0100000
  191 + bgei r11, GT1 /* size is greater than 1MB */
  192 + /* TLB1 is 0 which is setup above */
  193 + bri tlb_end
  194 +GT4: /* r11 contains the rest - will be either 1 or 4 */
  195 + ori r9, r0, 0x400000 /* TLB0 is 4MB */
  196 + bri TLB1
  197 +GT16: /* TLB0 is 16MB */
  198 + addik r9, r0, 0x1000000 /* means TLB0 is 16MB */
  199 +TLB1:
  200 + /* must be used r2 because of substract if failed */
  201 + addik r2, r11, -0x0400000
  202 + bgei r2, GT20 /* size is greater than 16MB */
  203 + /* size is >16MB and <20MB */
  204 + addik r11, r11, -0x0100000
  205 + bgei r11, GT17 /* size is greater than 17MB */
  206 + /* kernel is >16MB and < 17MB */
  207 +GT1:
  208 + addik r10, r0, 0x0100000 /* means TLB1 is 1MB */
  209 + bri tlb_end
  210 +GT2: /* TLB0 is 0 and TLB1 will be 4MB */
  211 +GT17: /* TLB1 is 4MB - kernel size <20MB */
  212 + addik r10, r0, 0x0400000 /* means TLB1 is 4MB */
  213 + bri tlb_end
  214 +GT8: /* TLB0 is still zero that's why I can use only TLB1 */
  215 +GT20: /* TLB1 is 16MB - kernel size >20MB */
  216 + addik r10, r0, 0x1000000 /* means TLB1 is 16MB */
  217 +tlb_end:
  218 +
172 219 /*
173 220 * Configure and load two entries into TLB slots 0 and 1.
174 221 * In case we are pinning TLBs, these are reserved in by the
175 222  
176 223  
177 224  
178 225  
179 226  
180 227  
181 228  
... ... @@ -178,28 +225,81 @@
178 225 andi r4,r4,0xfffffc00 /* Mask off the real page number */
179 226 ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
180 227  
  228 + /*
  229 + * TLB0 is always used - check if is not zero (r9 stores TLB0 value)
  230 + * if is use TLB1 value and clear it (r10 stores TLB1 value)
  231 + */
  232 + bnei r9, tlb0_not_zero
  233 + add r9, r10, r0
  234 + add r10, r0, r0
  235 +tlb0_not_zero:
  236 +
  237 + /* look at the code below */
  238 + ori r30, r0, 0x200
  239 + andi r29, r9, 0x100000
  240 + bneid r29, 1f
  241 + addik r30, r30, 0x80
  242 + andi r29, r9, 0x400000
  243 + bneid r29, 1f
  244 + addik r30, r30, 0x80
  245 + andi r29, r9, 0x1000000
  246 + bneid r29, 1f
  247 + addik r30, r30, 0x80
  248 +1:
181 249 andi r3,r3,0xfffffc00 /* Mask off the effective page number */
182   - ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M))
  250 + ori r3,r3,(TLB_VALID)
  251 + or r3, r3, r30
183 252  
184   - mts rtlbx,r0 /* TLB slow 0 */
  253 + /* Load tlb_skip size value which is index to first unused TLB entry */
  254 + lwi r11, r0, TOPHYS(tlb_skip)
  255 + mts rtlbx,r11 /* TLB slow 0 */
185 256  
186 257 mts rtlblo,r4 /* Load the data portion of the entry */
187 258 mts rtlbhi,r3 /* Load the tag portion of the entry */
188 259  
189   - addik r4, r4, 0x01000000 /* Map next 16 M entries */
190   - addik r3, r3, 0x01000000
  260 + /* Increase tlb_skip size */
  261 + addik r11, r11, 1
  262 + swi r11, r0, TOPHYS(tlb_skip)
191 263  
192   - ori r6,r0,1 /* TLB slot 1 */
193   - mts rtlbx,r6
  264 + /* TLB1 can be zeroes that's why we not setup it */
  265 + beqi r10, jump_over2
194 266  
  267 + /* look at the code below */
  268 + ori r30, r0, 0x200
  269 + andi r29, r10, 0x100000
  270 + bneid r29, 1f
  271 + addik r30, r30, 0x80
  272 + andi r29, r10, 0x400000
  273 + bneid r29, 1f
  274 + addik r30, r30, 0x80
  275 + andi r29, r10, 0x1000000
  276 + bneid r29, 1f
  277 + addik r30, r30, 0x80
  278 +1:
  279 + addk r4, r4, r9 /* previous addr + TLB0 size */
  280 + addk r3, r3, r9
  281 +
  282 + andi r3,r3,0xfffffc00 /* Mask off the effective page number */
  283 + ori r3,r3,(TLB_VALID)
  284 + or r3, r3, r30
  285 +
  286 + lwi r11, r0, TOPHYS(tlb_skip)
  287 + mts rtlbx, r11 /* r11 is used from TLB0 */
  288 +
195 289 mts rtlblo,r4 /* Load the data portion of the entry */
196 290 mts rtlbhi,r3 /* Load the tag portion of the entry */
197 291  
  292 + /* Increase tlb_skip size */
  293 + addik r11, r11, 1
  294 + swi r11, r0, TOPHYS(tlb_skip)
  295 +
  296 +jump_over2:
198 297 /*
199 298 * Load a TLB entry for LMB, since we need access to
200 299 * the exception vectors, using a 4k real==virtual mapping.
201 300 */
202   - ori r6,r0,3 /* TLB slot 3 */
  301 + /* Use temporary TLB_ID for LMB - clear this temporary mapping later */
  302 + ori r6, r0, MICROBLAZE_LMB_TLB_ID
203 303 mts rtlbx,r6
204 304  
205 305 ori r4,r0,(TLB_WR | TLB_EX)
... ... @@ -238,8 +338,8 @@
238 338 * Please see $(ARCH)/mach-$(SUBARCH)/setup.c for
239 339 * the function.
240 340 */
241   - addik r9, r0, machine_early_init
242   - brald r15, r9
  341 + addik r11, r0, machine_early_init
  342 + brald r15, r11
243 343 nop
244 344  
245 345 #ifndef CONFIG_MMU
... ... @@ -268,8 +368,7 @@
268 368  
269 369 /* Load up the kernel context */
270 370 kernel_load_context:
271   - # Keep entry 0 and 1 valid. Entry 3 mapped to LMB can go away.
272   - ori r5,r0,3
  371 + ori r5, r0, MICROBLAZE_LMB_TLB_ID
273 372 mts rtlbx,r5
274 373 nop
275 374 mts rtlbhi,r0
arch/microblaze/kernel/hw_exception_handler.S
... ... @@ -820,19 +820,26 @@
820 820 * Upon exit, we reload everything and RFI.
821 821 * A common place to load the TLB.
822 822 */
  823 +.section .data
  824 +.align 4
  825 +.global tlb_skip
  826 + tlb_skip:
  827 + .long MICROBLAZE_TLB_SKIP
823 828 tlb_index:
824   - .long 1 /* MS: storing last used tlb index */
  829 + /* MS: storing last used tlb index */
  830 + .long MICROBLAZE_TLB_SIZE/2
  831 +.previous
825 832 finish_tlb_load:
826 833 /* MS: load the last used TLB index. */
827 834 lwi r5, r0, TOPHYS(tlb_index)
828 835 addik r5, r5, 1 /* MS: inc tlb_index -> use next one */
829 836  
830 837 /* MS: FIXME this is potential fault, because this is mask not count */
831   - andi r5, r5, (MICROBLAZE_TLB_SIZE-1)
  838 + andi r5, r5, MICROBLAZE_TLB_SIZE - 1
832 839 ori r6, r0, 1
833 840 cmp r31, r5, r6
834 841 blti r31, ex12
835   - addik r5, r6, 1
  842 + lwi r5, r0, TOPHYS(tlb_skip)
836 843 ex12:
837 844 /* MS: save back current TLB index */
838 845 swi r5, r0, TOPHYS(tlb_index)
arch/microblaze/kernel/intc.c
... ... @@ -151,8 +151,8 @@
151 151 #ifdef CONFIG_SELFMOD_INTC
152 152 selfmod_function((int *) arr_func, intc_baseaddr);
153 153 #endif
154   - printk(KERN_INFO "XPS intc #0 at 0x%08x, num_irq=%d, edge=0x%x\n",
155   - intc_baseaddr, nr_irq, intr_mask);
  154 + printk(KERN_INFO "%s #0 at 0x%08x, num_irq=%d, edge=0x%x\n",
  155 + intc->name, intc_baseaddr, nr_irq, intr_mask);
156 156  
157 157 /*
158 158 * Disable all external interrupts until they are
arch/microblaze/kernel/misc.S
... ... @@ -29,16 +29,16 @@
29 29 .type _tlbia, @function
30 30 .align 4;
31 31 _tlbia:
32   - addik r12, r0, MICROBLAZE_TLB_SIZE - 1 /* flush all entries (63 - 3) */
  32 + lwi r12, r0, tlb_skip;
33 33 /* isync */
34 34 _tlbia_1:
35 35 mts rtlbx, r12
36 36 nop
37 37 mts rtlbhi, r0 /* flush: ensure V is clear */
38 38 nop
39   - addik r11, r12, -2
  39 + rsubi r11, r12, MICROBLAZE_TLB_SIZE - 1
40 40 bneid r11, _tlbia_1 /* loop for all entries */
41   - addik r12, r12, -1
  41 + addik r12, r12, 1
42 42 /* sync */
43 43 rtsd r15, 8
44 44 nop
... ... @@ -75,7 +75,7 @@
75 75 * Load a TLB entry for the UART, so that microblaze_progress() can use
76 76 * the UARTs nice and early. We use a 4k real==virtual mapping.
77 77 */
78   - ori r4, r0, MICROBLAZE_TLB_SIZE - 1
  78 + lwi r4, r0, tlb_skip
79 79 mts rtlbx, r4 /* TLB slot 63 */
80 80  
81 81 or r4,r5,r0
... ... @@ -89,6 +89,11 @@
89 89 nop
90 90 mts rtlbhi,r5 /* Load the tag portion of the entry */
91 91 nop
  92 +
  93 + lwi r5, r0, tlb_skip
  94 + addik r5, r5, 1
  95 + swi r5, r0, tlb_skip
  96 +
92 97 rtsd r15, 8
93 98 nop
94 99  
arch/microblaze/kernel/setup.c
... ... @@ -95,8 +95,11 @@
95 95 }
96 96 #endif /* CONFIG_MTD_UCLINUX_EBSS */
97 97  
  98 +unsigned long kernel_tlb;
  99 +
98 100 void __init machine_early_init(const char *cmdline, unsigned int ram,
99   - unsigned int fdt, unsigned int msr)
  101 + unsigned int fdt, unsigned int msr, unsigned int tlb0,
  102 + unsigned int tlb1)
100 103 {
101 104 unsigned long *src, *dst;
102 105 unsigned int offset = 0;
... ... @@ -143,6 +146,12 @@
143 146 setup_early_printk(NULL);
144 147 #endif
145 148  
  149 + /* setup kernel_tlb after BSS cleaning
  150 + * Maybe worth to move to asm code */
  151 + kernel_tlb = tlb0 + tlb1;
  152 + /* printk("TLB1 0x%08x, TLB0 0x%08x, tlb 0x%x\n", tlb0,
  153 + tlb1, kernel_tlb); */
  154 +
146 155 printk("Ramdisk addr 0x%08x, ", ram);
147 156 if (fdt)
148 157 printk("FDT at 0x%08x\n", fdt);
... ... @@ -197,6 +206,19 @@
197 206 return of_debugfs_root == NULL;
198 207 }
199 208 arch_initcall(microblaze_debugfs_init);
  209 +
  210 +static int __init debugfs_tlb(void)
  211 +{
  212 + struct dentry *d;
  213 +
  214 + if (!of_debugfs_root)
  215 + return -ENODEV;
  216 +
  217 + d = debugfs_create_u32("tlb_skip", S_IRUGO, of_debugfs_root, &tlb_skip);
  218 + if (!d)
  219 + return -ENOMEM;
  220 +}
  221 +device_initcall(debugfs_tlb);
200 222 #endif
201 223  
202 224 static int dflt_bus_notify(struct notifier_block *nb,
arch/microblaze/kernel/timer.c
... ... @@ -79,7 +79,7 @@
79 79 * !PWMA - disable pwm
80 80 * TINT - clear interrupt status
81 81 * ENT- enable timer itself
82   - * EINT - enable interrupt
  82 + * ENIT - enable interrupt
83 83 * !LOAD - clear the bit to let go
84 84 * ARHT - auto reload
85 85 * !CAPT - no external trigger
... ... @@ -274,8 +274,8 @@
274 274 #ifdef CONFIG_SELFMOD_TIMER
275 275 selfmod_function((int *) arr_func, timer_baseaddr);
276 276 #endif
277   - printk(KERN_INFO "XPS timer #0 at 0x%08x, irq=%d\n",
278   - timer_baseaddr, irq);
  277 + printk(KERN_INFO "%s #0 at 0x%08x, irq=%d\n",
  278 + timer->name, timer_baseaddr, irq);
279 279  
280 280 /* If there is clock-frequency property than use it */
281 281 prop = of_get_property(timer, "clock-frequency", NULL);
arch/microblaze/kernel/vmlinux.lds.S
... ... @@ -44,7 +44,7 @@
44 44 __fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET) {
45 45 _fdt_start = . ; /* place for fdt blob */
46 46 *(__fdt_blob) ; /* Any link-placed DTB */
47   - . = _fdt_start + 0x4000; /* Pad up to 16kbyte */
  47 + . = _fdt_start + 0x8000; /* Pad up to 32kbyte */
48 48 _fdt_end = . ;
49 49 }
50 50  
arch/microblaze/mm/Makefile
... ... @@ -5,4 +5,5 @@
5 5 obj-y := consistent.o init.o
6 6  
7 7 obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o
  8 +obj-$(CONFIG_HIGHMEM) += highmem.o
arch/microblaze/mm/highmem.c
  1 +/*
  2 + * highmem.c: virtual kernel memory mappings for high memory
  3 + *
  4 + * PowerPC version, stolen from the i386 version.
  5 + *
  6 + * Used in CONFIG_HIGHMEM systems for memory pages which
  7 + * are not addressable by direct kernel virtual addresses.
  8 + *
  9 + * Copyright (C) 1999 Gerhard Wichert, Siemens AG
  10 + * Gerhard.Wichert@pdb.siemens.de
  11 + *
  12 + *
  13 + * Redesigned the x86 32-bit VM architecture to deal with
  14 + * up to 16 Terrabyte physical memory. With current x86 CPUs
  15 + * we now support up to 64 Gigabytes physical RAM.
  16 + *
  17 + * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
  18 + *
  19 + * Reworked for PowerPC by various contributors. Moved from
  20 + * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
  21 + */
  22 +
  23 +#include <linux/highmem.h>
  24 +#include <linux/module.h>
  25 +
  26 +/*
  27 + * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  28 + * gives a more generic (and caching) interface. But kmap_atomic can
  29 + * be used in IRQ contexts, so in some (very limited) cases we need
  30 + * it.
  31 + */
  32 +#include <asm/tlbflush.h>
  33 +
  34 +void *kmap_atomic_prot(struct page *page, pgprot_t prot)
  35 +{
  36 +
  37 + unsigned long vaddr;
  38 + int idx, type;
  39 +
  40 + /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
  41 + pagefault_disable();
  42 + if (!PageHighMem(page))
  43 + return page_address(page);
  44 +
  45 +
  46 + type = kmap_atomic_idx_push();
  47 + idx = type + KM_TYPE_NR*smp_processor_id();
  48 + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  49 +#ifdef CONFIG_DEBUG_HIGHMEM
  50 + BUG_ON(!pte_none(*(kmap_pte-idx)));
  51 +#endif
  52 + set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
  53 + local_flush_tlb_page(NULL, vaddr);
  54 +
  55 + return (void *) vaddr;
  56 +}
  57 +EXPORT_SYMBOL(kmap_atomic_prot);
  58 +
  59 +void __kunmap_atomic(void *kvaddr)
  60 +{
  61 + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  62 + int type;
  63 +
  64 + if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
  65 + pagefault_enable();
  66 + return;
  67 + }
  68 +
  69 + type = kmap_atomic_idx();
  70 +#ifdef CONFIG_DEBUG_HIGHMEM
  71 + {
  72 + unsigned int idx;
  73 +
  74 + idx = type + KM_TYPE_NR * smp_processor_id();
  75 + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  76 +
  77 + /*
  78 + * force other mappings to Oops if they'll try to access
  79 + * this pte without first remap it
  80 + */
  81 + pte_clear(&init_mm, vaddr, kmap_pte-idx);
  82 + local_flush_tlb_page(NULL, vaddr);
  83 + }
  84 +#endif
  85 + kmap_atomic_idx_pop();
  86 + pagefault_enable();
  87 +}
  88 +EXPORT_SYMBOL(__kunmap_atomic);
arch/microblaze/mm/init.c
... ... @@ -24,6 +24,7 @@
24 24 #include <asm/pgalloc.h>
25 25 #include <asm/sections.h>
26 26 #include <asm/tlb.h>
  27 +#include <asm/fixmap.h>
27 28  
28 29 /* Use for MMU and noMMU because of PCI generic code */
29 30 int mem_init_done;
30 31  
31 32  
32 33  
33 34  
34 35  
35 36  
... ... @@ -44,27 +45,85 @@
44 45 */
45 46 unsigned long memory_start;
46 47 EXPORT_SYMBOL(memory_start);
47   -unsigned long memory_end; /* due to mm/nommu.c */
48 48 unsigned long memory_size;
49 49 EXPORT_SYMBOL(memory_size);
  50 +unsigned long lowmem_size;
50 51  
  52 +#ifdef CONFIG_HIGHMEM
  53 +pte_t *kmap_pte;
  54 +EXPORT_SYMBOL(kmap_pte);
  55 +pgprot_t kmap_prot;
  56 +EXPORT_SYMBOL(kmap_prot);
  57 +
  58 +static inline pte_t *virt_to_kpte(unsigned long vaddr)
  59 +{
  60 + return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr),
  61 + vaddr), vaddr);
  62 +}
  63 +
  64 +static void __init highmem_init(void)
  65 +{
  66 + pr_debug("%x\n", (u32)PKMAP_BASE);
  67 + map_page(PKMAP_BASE, 0, 0); /* XXX gross */
  68 + pkmap_page_table = virt_to_kpte(PKMAP_BASE);
  69 +
  70 + kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
  71 + kmap_prot = PAGE_KERNEL;
  72 +}
  73 +
  74 +static unsigned long highmem_setup(void)
  75 +{
  76 + unsigned long pfn;
  77 + unsigned long reservedpages = 0;
  78 +
  79 + for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) {
  80 + struct page *page = pfn_to_page(pfn);
  81 +
  82 + /* FIXME not sure about */
  83 + if (memblock_is_reserved(pfn << PAGE_SHIFT))
  84 + continue;
  85 + ClearPageReserved(page);
  86 + init_page_count(page);
  87 + __free_page(page);
  88 + totalhigh_pages++;
  89 + reservedpages++;
  90 + }
  91 + totalram_pages += totalhigh_pages;
  92 + printk(KERN_INFO "High memory: %luk\n",
  93 + totalhigh_pages << (PAGE_SHIFT-10));
  94 +
  95 + return reservedpages;
  96 +}
  97 +#endif /* CONFIG_HIGHMEM */
  98 +
51 99 /*
52 100 * paging_init() sets up the page tables - in fact we've already done this.
53 101 */
54 102 static void __init paging_init(void)
55 103 {
56 104 unsigned long zones_size[MAX_NR_ZONES];
  105 +#ifdef CONFIG_MMU
  106 + int idx;
57 107  
  108 + /* Setup fixmaps */
  109 + for (idx = 0; idx < __end_of_fixed_addresses; idx++)
  110 + clear_fixmap(idx);
  111 +#endif
  112 +
58 113 /* Clean every zones */
59 114 memset(zones_size, 0, sizeof(zones_size));
60 115  
61   - /*
62   - * old: we can DMA to/from any address.put all page into ZONE_DMA
63   - * We use only ZONE_NORMAL
64   - */
65   - zones_size[ZONE_NORMAL] = max_mapnr;
  116 +#ifdef CONFIG_HIGHMEM
  117 + highmem_init();
66 118  
67   - free_area_init(zones_size);
  119 + zones_size[ZONE_DMA] = max_low_pfn;
  120 + zones_size[ZONE_HIGHMEM] = max_pfn;
  121 +#else
  122 + zones_size[ZONE_DMA] = max_pfn;
  123 +#endif
  124 +
  125 + /* We don't have holes in memory map */
  126 + free_area_init_nodes(zones_size);
68 127 }
69 128  
70 129 void __init setup_memory(void)
71 130  
72 131  
73 132  
74 133  
75 134  
76 135  
... ... @@ -78,32 +137,31 @@
78 137 /* Find main memory where is the kernel */
79 138 for_each_memblock(memory, reg) {
80 139 memory_start = (u32)reg->base;
81   - memory_end = (u32) reg->base + reg->size;
  140 + lowmem_size = reg->size;
82 141 if ((memory_start <= (u32)_text) &&
83   - ((u32)_text <= memory_end)) {
84   - memory_size = memory_end - memory_start;
  142 + ((u32)_text <= (memory_start + lowmem_size - 1))) {
  143 + memory_size = lowmem_size;
85 144 PAGE_OFFSET = memory_start;
86   - printk(KERN_INFO "%s: Main mem: 0x%x-0x%x, "
  145 + printk(KERN_INFO "%s: Main mem: 0x%x, "
87 146 "size 0x%08x\n", __func__, (u32) memory_start,
88   - (u32) memory_end, (u32) memory_size);
  147 + (u32) memory_size);
89 148 break;
90 149 }
91 150 }
92 151  
93   - if (!memory_start || !memory_end) {
94   - panic("%s: Missing memory setting 0x%08x-0x%08x\n",
95   - __func__, (u32) memory_start, (u32) memory_end);
  152 + if (!memory_start || !memory_size) {
  153 + panic("%s: Missing memory setting 0x%08x, size=0x%08x\n",
  154 + __func__, (u32) memory_start, (u32) memory_size);
96 155 }
97 156  
98 157 /* reservation of region where is the kernel */
99 158 kernel_align_start = PAGE_DOWN((u32)_text);
100 159 /* ALIGN can be remove because _end in vmlinux.lds.S is align */
101 160 kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
102   - memblock_reserve(kernel_align_start, kernel_align_size);
103   - printk(KERN_INFO "%s: kernel addr=0x%08x-0x%08x size=0x%08x\n",
  161 + printk(KERN_INFO "%s: kernel addr:0x%08x-0x%08x size=0x%08x\n",
104 162 __func__, kernel_align_start, kernel_align_start
105 163 + kernel_align_size, kernel_align_size);
106   -
  164 + memblock_reserve(kernel_align_start, kernel_align_size);
107 165 #endif
108 166 /*
109 167 * Kernel:
110 168  
... ... @@ -120,11 +178,13 @@
120 178 min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */
121 179 /* RAM is assumed contiguous */
122 180 num_physpages = max_mapnr = memory_size >> PAGE_SHIFT;
123   - max_pfn = max_low_pfn = memory_end >> PAGE_SHIFT;
  181 + max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
  182 + max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
124 183  
125 184 printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr);
126 185 printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
127 186 printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
  187 + printk(KERN_INFO "%s: max_pfn: %#lx\n", __func__, max_pfn);
128 188  
129 189 /*
130 190 * Find an area to use for the bootmem bitmap.
131 191  
132 192  
133 193  
... ... @@ -137,15 +197,39 @@
137 197 PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
138 198 memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
139 199  
  200 + /* Add active regions with valid PFNs */
  201 + for_each_memblock(memory, reg) {
  202 + unsigned long start_pfn, end_pfn;
  203 +
  204 + start_pfn = memblock_region_memory_base_pfn(reg);
  205 + end_pfn = memblock_region_memory_end_pfn(reg);
  206 + memblock_set_node(start_pfn << PAGE_SHIFT,
  207 + (end_pfn - start_pfn) << PAGE_SHIFT, 0);
  208 + }
  209 +
140 210 /* free bootmem is whole main memory */
141   - free_bootmem(memory_start, memory_size);
  211 + free_bootmem_with_active_regions(0, max_low_pfn);
142 212  
143 213 /* reserve allocate blocks */
144 214 for_each_memblock(reserved, reg) {
145   - pr_debug("reserved - 0x%08x-0x%08x\n",
146   - (u32) reg->base, (u32) reg->size);
147   - reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
  215 + unsigned long top = reg->base + reg->size - 1;
  216 +
  217 + pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n",
  218 + (u32) reg->base, (u32) reg->size, top,
  219 + memory_start + lowmem_size - 1);
  220 +
  221 + if (top <= (memory_start + lowmem_size - 1)) {
  222 + reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
  223 + } else if (reg->base < (memory_start + lowmem_size - 1)) {
  224 + unsigned long trunc_size = memory_start + lowmem_size -
  225 + reg->base;
  226 + reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
  227 + }
148 228 }
  229 +
  230 + /* XXX need to clip this if using highmem? */
  231 + sparse_memory_present_with_active_regions(0);
  232 +
149 233 #ifdef CONFIG_MMU
150 234 init_bootmem_done = 1;
151 235 #endif
152 236  
... ... @@ -190,13 +274,58 @@
190 274  
191 275 void __init mem_init(void)
192 276 {
193   - high_memory = (void *)__va(memory_end);
  277 + pg_data_t *pgdat;
  278 + unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
  279 +
  280 + high_memory = (void *)__va(memory_start + lowmem_size - 1);
  281 +
194 282 /* this will put all memory onto the freelists */
195 283 totalram_pages += free_all_bootmem();
196 284  
197   - printk(KERN_INFO "Memory: %luk/%luk available\n",
198   - nr_free_pages() << (PAGE_SHIFT-10),
199   - num_physpages << (PAGE_SHIFT-10));
  285 + for_each_online_pgdat(pgdat) {
  286 + unsigned long i;
  287 + struct page *page;
  288 +
  289 + for (i = 0; i < pgdat->node_spanned_pages; i++) {
  290 + if (!pfn_valid(pgdat->node_start_pfn + i))
  291 + continue;
  292 + page = pgdat_page_nr(pgdat, i);
  293 + if (PageReserved(page))
  294 + reservedpages++;
  295 + }
  296 + }
  297 +
  298 +#ifdef CONFIG_HIGHMEM
  299 + reservedpages -= highmem_setup();
  300 +#endif
  301 +
  302 + codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
  303 + datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
  304 + initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
  305 + bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
  306 +
  307 + pr_info("Memory: %luk/%luk available (%luk kernel code, "
  308 + "%luk reserved, %luk data, %luk bss, %luk init)\n",
  309 + nr_free_pages() << (PAGE_SHIFT-10),
  310 + num_physpages << (PAGE_SHIFT-10),
  311 + codesize >> 10,
  312 + reservedpages << (PAGE_SHIFT-10),
  313 + datasize >> 10,
  314 + bsssize >> 10,
  315 + initsize >> 10);
  316 +
  317 +#ifdef CONFIG_MMU
  318 + pr_info("Kernel virtual memory layout:\n");
  319 + pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
  320 +#ifdef CONFIG_HIGHMEM
  321 + pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
  322 + PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
  323 +#endif /* CONFIG_HIGHMEM */
  324 + pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
  325 + ioremap_bot, ioremap_base);
  326 + pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
  327 + (unsigned long)VMALLOC_START, VMALLOC_END);
  328 +#endif
200 329 mem_init_done = 1;
201 330 }
202 331  
... ... @@ -226,7 +355,6 @@
226 355 maxmem = memparse(p, &p);
227 356 if (maxmem && memory_size > maxmem) {
228 357 memory_size = maxmem;
229   - memory_end = memory_start + memory_size;
230 358 memblock.memory.regions[0].size = memory_size;
231 359 }
232 360 }
233 361  
234 362  
235 363  
... ... @@ -270,16 +398,27 @@
270 398 machine_restart(NULL);
271 399 }
272 400  
273   - if ((u32) memblock.memory.regions[0].size < 0x1000000) {
274   - printk(KERN_EMERG "Memory must be greater than 16MB\n");
  401 + if ((u32) memblock.memory.regions[0].size < 0x400000) {
  402 + printk(KERN_EMERG "Memory must be greater than 4MB\n");
275 403 machine_restart(NULL);
276 404 }
  405 +
  406 + if ((u32) memblock.memory.regions[0].size < kernel_tlb) {
  407 + printk(KERN_EMERG "Kernel size is greater than memory node\n");
  408 + machine_restart(NULL);
  409 + }
  410 +
277 411 /* Find main memory where the kernel is */
278 412 memory_start = (u32) memblock.memory.regions[0].base;
279   - memory_end = (u32) memblock.memory.regions[0].base +
280   - (u32) memblock.memory.regions[0].size;
281   - memory_size = memory_end - memory_start;
  413 + lowmem_size = memory_size = (u32) memblock.memory.regions[0].size;
282 414  
  415 + if (lowmem_size > CONFIG_LOWMEM_SIZE) {
  416 + lowmem_size = CONFIG_LOWMEM_SIZE;
  417 +#ifndef CONFIG_HIGHMEM
  418 + memory_size = lowmem_size;
  419 +#endif
  420 + }
  421 +
283 422 mm_cmdline_setup(); /* FIXME parse args from command line - not used */
284 423  
285 424 /*
286 425  
287 426  
... ... @@ -305,15 +444,20 @@
305 444 /* Map in all of RAM starting at CONFIG_KERNEL_START */
306 445 mapin_ram();
307 446  
308   -#ifdef CONFIG_HIGHMEM_START_BOOL
309   - ioremap_base = CONFIG_HIGHMEM_START;
  447 + /* Extend vmalloc and ioremap area as big as possible */
  448 +#ifdef CONFIG_HIGHMEM
  449 + ioremap_base = ioremap_bot = PKMAP_BASE;
310 450 #else
311   - ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
312   -#endif /* CONFIG_HIGHMEM_START_BOOL */
313   - ioremap_bot = ioremap_base;
  451 + ioremap_base = ioremap_bot = FIXADDR_START;
  452 +#endif
314 453  
315 454 /* Initialize the context management stuff */
316 455 mmu_context_init();
  456 +
  457 + /* Shortly after that, the entire linear mapping will be available */
  458 + /* This will also cause that unflatten device tree will be allocated
  459 + * inside 768MB limit */
  460 + memblock_set_current_limit(memory_start + lowmem_size - 1);
317 461 }
318 462  
319 463 /* This is only called until mem_init is done. */
320 464  
... ... @@ -324,11 +468,11 @@
324 468 p = alloc_bootmem_pages(PAGE_SIZE);
325 469 } else {
326 470 /*
327   - * Mem start + 32MB -> here is limit
  471 + * Mem start + kernel_tlb -> here is limit
328 472 * because of mem mapping from head.S
329 473 */
330 474 p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
331   - memory_start + 0x2000000));
  475 + memory_start + kernel_tlb));
332 476 }
333 477 return p;
334 478 }
arch/microblaze/mm/pgtable.c
... ... @@ -37,6 +37,7 @@
37 37 #include <linux/io.h>
38 38 #include <asm/mmu.h>
39 39 #include <asm/sections.h>
  40 +#include <asm/fixmap.h>
40 41  
41 42 #define flush_HPTE(X, va, pg) _tlbie(va)
42 43  
... ... @@ -44,11 +45,6 @@
44 45 unsigned long ioremap_bot;
45 46 EXPORT_SYMBOL(ioremap_bot);
46 47  
47   -/* The maximum lowmem defaults to 768Mb, but this can be configured to
48   - * another value.
49   - */
50   -#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
51   -
52 48 #ifndef CONFIG_SMP
53 49 struct pgtable_cache_struct quicklists;
54 50 #endif
... ... @@ -80,7 +76,7 @@
80 76 !(p >= virt_to_phys((unsigned long)&__bss_stop) &&
81 77 p < virt_to_phys((unsigned long)__bss_stop))) {
82 78 printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT
83   - " is RAM lr %p\n", (unsigned long)p,
  79 + " is RAM lr %pf\n", (unsigned long)p,
84 80 __builtin_return_address(0));
85 81 return NULL;
86 82 }
... ... @@ -171,7 +167,7 @@
171 167  
172 168 v = CONFIG_KERNEL_START;
173 169 p = memory_start;
174   - for (s = 0; s < memory_size; s += PAGE_SIZE) {
  170 + for (s = 0; s < lowmem_size; s += PAGE_SIZE) {
175 171 f = _PAGE_PRESENT | _PAGE_ACCESSED |
176 172 _PAGE_SHARED | _PAGE_HWEXEC;
177 173 if ((char *) v < _stext || (char *) v >= _etext)
... ... @@ -253,5 +249,15 @@
253 249 clear_page(pte);
254 250 }
255 251 return pte;
  252 +}
  253 +
  254 +void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
  255 +{
  256 + unsigned long address = __fix_to_virt(idx);
  257 +
  258 + if (idx >= __end_of_fixed_addresses)
  259 + BUG();
  260 +
  261 + map_page(address, phys, pgprot_val(flags));
256 262 }