Commit 12d810c1b8c2b913d48e629e2b5c01d105029839
Committed by
Linus Torvalds
1 parent
00c541eae7
m68k: discontinuous memory support
Fix support for discontinuous memory Signed-off-by: Roman Zippel <zippel@linux-m68k.org> Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 16 changed files with 247 additions and 248 deletions Side-by-side Diff
- arch/m68k/Kconfig
- arch/m68k/kernel/module.c
- arch/m68k/kernel/setup.c
- arch/m68k/mm/init.c
- arch/m68k/mm/memory.c
- arch/m68k/mm/motorola.c
- arch/m68k/sun3/config.c
- include/asm-m68k/mmzone.h
- include/asm-m68k/module.h
- include/asm-m68k/motorola_pgtable.h
- include/asm-m68k/page.h
- include/asm-m68k/pgalloc.h
- include/asm-m68k/pgtable.h
- include/asm-m68k/sun3_pgtable.h
- include/asm-m68k/virtconvert.h
- mm/page_alloc.c
arch/m68k/Kconfig
... | ... | @@ -355,8 +355,9 @@ |
355 | 355 | adventurous. |
356 | 356 | |
357 | 357 | config SINGLE_MEMORY_CHUNK |
358 | - bool "Use one physical chunk of memory only" | |
359 | - depends on ADVANCED && !SUN3 | |
358 | + bool "Use one physical chunk of memory only" if ADVANCED && !SUN3 | |
359 | + default y if SUN3 | |
360 | + select NEED_MULTIPLE_NODES | |
360 | 361 | help |
361 | 362 | Ignore all but the first contiguous chunk of physical memory for VM |
362 | 363 | purposes. This will save a few bytes kernel size and may speed up |
... | ... | @@ -376,6 +377,14 @@ |
376 | 377 | drivers on 68060 based systems where the 68060 bus snooping signal |
377 | 378 | is hardwired on. The 53c710 SCSI driver is known to suffer from |
378 | 379 | this problem. |
380 | + | |
381 | +config ARCH_DISCONTIGMEM_ENABLE | |
382 | + def_bool !SINGLE_MEMORY_CHUNK | |
383 | + | |
384 | +config NODES_SHIFT | |
385 | + int | |
386 | + default "3" | |
387 | + depends on !SINGLE_MEMORY_CHUNK | |
379 | 388 | |
380 | 389 | source "mm/Kconfig" |
381 | 390 |
arch/m68k/kernel/module.c
arch/m68k/kernel/setup.c
... | ... | @@ -60,14 +60,12 @@ |
60 | 60 | int m68k_num_memory; |
61 | 61 | int m68k_realnum_memory; |
62 | 62 | EXPORT_SYMBOL(m68k_realnum_memory); |
63 | -#ifdef CONFIG_SINGLE_MEMORY_CHUNK | |
64 | 63 | unsigned long m68k_memoffset; |
65 | 64 | EXPORT_SYMBOL(m68k_memoffset); |
66 | -#endif | |
67 | 65 | struct mem_info m68k_memory[NUM_MEMINFO]; |
68 | 66 | EXPORT_SYMBOL(m68k_memory); |
69 | 67 | |
70 | -static struct mem_info m68k_ramdisk; | |
68 | +struct mem_info m68k_ramdisk; | |
71 | 69 | |
72 | 70 | static char m68k_command_line[CL_SIZE]; |
73 | 71 | |
... | ... | @@ -208,9 +206,6 @@ |
208 | 206 | void __init setup_arch(char **cmdline_p) |
209 | 207 | { |
210 | 208 | extern int _etext, _edata, _end; |
211 | -#ifndef CONFIG_SUN3 | |
212 | - unsigned long endmem, startmem; | |
213 | -#endif | |
214 | 209 | int i; |
215 | 210 | |
216 | 211 | /* The bootinfo is located right after the kernel bss */ |
217 | 212 | |
218 | 213 | |
... | ... | @@ -320,30 +315,16 @@ |
320 | 315 | panic("No configuration setup"); |
321 | 316 | } |
322 | 317 | |
323 | -#ifndef CONFIG_SUN3 | |
324 | - startmem= m68k_memory[0].addr; | |
325 | - endmem = startmem + m68k_memory[0].size; | |
326 | - high_memory = (void *)PAGE_OFFSET; | |
327 | - for (i = 0; i < m68k_num_memory; i++) { | |
328 | - m68k_memory[i].size &= MASK_256K; | |
329 | - if (m68k_memory[i].addr < startmem) | |
330 | - startmem = m68k_memory[i].addr; | |
331 | - if (m68k_memory[i].addr+m68k_memory[i].size > endmem) | |
332 | - endmem = m68k_memory[i].addr+m68k_memory[i].size; | |
333 | - high_memory += m68k_memory[i].size; | |
334 | - } | |
318 | + paging_init(); | |
335 | 319 | |
336 | - availmem += init_bootmem_node(NODE_DATA(0), availmem >> PAGE_SHIFT, | |
337 | - startmem >> PAGE_SHIFT, endmem >> PAGE_SHIFT); | |
338 | - | |
339 | - for (i = 0; i < m68k_num_memory; i++) | |
340 | - free_bootmem(m68k_memory[i].addr, m68k_memory[i].size); | |
341 | - | |
342 | - reserve_bootmem(m68k_memory[0].addr, availmem - m68k_memory[0].addr); | |
343 | - | |
320 | +#ifndef CONFIG_SUN3 | |
321 | + for (i = 1; i < m68k_num_memory; i++) | |
322 | + free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr, | |
323 | + m68k_memory[i].size); | |
344 | 324 | #ifdef CONFIG_BLK_DEV_INITRD |
345 | 325 | if (m68k_ramdisk.size) { |
346 | - reserve_bootmem(m68k_ramdisk.addr, m68k_ramdisk.size); | |
326 | + reserve_bootmem_node(__virt_to_node(phys_to_virt(m68k_ramdisk.addr)), | |
327 | + m68k_ramdisk.addr, m68k_ramdisk.size); | |
347 | 328 | initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr); |
348 | 329 | initrd_end = initrd_start + m68k_ramdisk.size; |
349 | 330 | printk("initrd: %08lx - %08lx\n", initrd_start, initrd_end); |
... | ... | @@ -361,8 +342,6 @@ |
361 | 342 | #endif |
362 | 343 | |
363 | 344 | #endif /* !CONFIG_SUN3 */ |
364 | - | |
365 | - paging_init(); | |
366 | 345 | |
367 | 346 | /* set ISA defs early as possible */ |
368 | 347 | #if defined(CONFIG_ISA) && defined(MULTI_ISA) |
arch/m68k/mm/init.c
... | ... | @@ -7,6 +7,7 @@ |
7 | 7 | * to motorola.c and sun3mmu.c |
8 | 8 | */ |
9 | 9 | |
10 | +#include <linux/module.h> | |
10 | 11 | #include <linux/signal.h> |
11 | 12 | #include <linux/sched.h> |
12 | 13 | #include <linux/mm.h> |
... | ... | @@ -31,6 +32,37 @@ |
31 | 32 | |
32 | 33 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
33 | 34 | |
35 | +static bootmem_data_t __initdata bootmem_data[MAX_NUMNODES]; | |
36 | + | |
37 | +pg_data_t pg_data_map[MAX_NUMNODES]; | |
38 | +EXPORT_SYMBOL(pg_data_map); | |
39 | + | |
40 | +int m68k_virt_to_node_shift; | |
41 | + | |
42 | +#ifndef CONFIG_SINGLE_MEMORY_CHUNK | |
43 | +pg_data_t *pg_data_table[65]; | |
44 | +EXPORT_SYMBOL(pg_data_table); | |
45 | +#endif | |
46 | + | |
47 | +void m68k_setup_node(int node) | |
48 | +{ | |
49 | +#ifndef CONFIG_SINGLE_MEMORY_CHUNK | |
50 | + struct mem_info *info = m68k_memory + node; | |
51 | + int i, end; | |
52 | + | |
53 | + i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift(); | |
54 | + end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift(); | |
55 | + for (; i <= end; i++) { | |
56 | + if (pg_data_table[i]) | |
57 | + printk("overlap at %u for chunk %u\n", i, node); | |
58 | + pg_data_table[i] = pg_data_map + node; | |
59 | + } | |
60 | +#endif | |
61 | + pg_data_map[node].bdata = bootmem_data + node; | |
62 | + node_set_online(node); | |
63 | +} | |
64 | + | |
65 | + | |
34 | 66 | /* |
35 | 67 | * ZERO_PAGE is a special page that is used for zero-initialized |
36 | 68 | * data and COW. |
37 | 69 | |
38 | 70 | |
39 | 71 | |
40 | 72 | |
41 | 73 | |
42 | 74 | |
43 | 75 | |
44 | 76 | |
45 | 77 | |
46 | 78 | |
... | ... | @@ -40,72 +72,77 @@ |
40 | 72 | |
41 | 73 | void show_mem(void) |
42 | 74 | { |
43 | - unsigned long i; | |
44 | - int free = 0, total = 0, reserved = 0, shared = 0; | |
45 | - int cached = 0; | |
75 | + pg_data_t *pgdat; | |
76 | + int free = 0, total = 0, reserved = 0, shared = 0; | |
77 | + int cached = 0; | |
78 | + int i; | |
46 | 79 | |
47 | - printk("\nMem-info:\n"); | |
48 | - show_free_areas(); | |
49 | - printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | |
50 | - i = max_mapnr; | |
51 | - while (i-- > 0) { | |
52 | - total++; | |
53 | - if (PageReserved(mem_map+i)) | |
54 | - reserved++; | |
55 | - else if (PageSwapCache(mem_map+i)) | |
56 | - cached++; | |
57 | - else if (!page_count(mem_map+i)) | |
58 | - free++; | |
59 | - else | |
60 | - shared += page_count(mem_map+i) - 1; | |
61 | - } | |
62 | - printk("%d pages of RAM\n",total); | |
63 | - printk("%d free pages\n",free); | |
64 | - printk("%d reserved pages\n",reserved); | |
65 | - printk("%d pages shared\n",shared); | |
66 | - printk("%d pages swap cached\n",cached); | |
80 | + printk("\nMem-info:\n"); | |
81 | + show_free_areas(); | |
82 | + printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | |
83 | + for_each_online_pgdat(pgdat) { | |
84 | + for (i = 0; i < pgdat->node_spanned_pages; i++) { | |
85 | + struct page *page = pgdat->node_mem_map + i; | |
86 | + total++; | |
87 | + if (PageReserved(page)) | |
88 | + reserved++; | |
89 | + else if (PageSwapCache(page)) | |
90 | + cached++; | |
91 | + else if (!page_count(page)) | |
92 | + free++; | |
93 | + else | |
94 | + shared += page_count(page) - 1; | |
95 | + } | |
96 | + } | |
97 | + printk("%d pages of RAM\n",total); | |
98 | + printk("%d free pages\n",free); | |
99 | + printk("%d reserved pages\n",reserved); | |
100 | + printk("%d pages shared\n",shared); | |
101 | + printk("%d pages swap cached\n",cached); | |
67 | 102 | } |
68 | 103 | |
69 | 104 | extern void init_pointer_table(unsigned long ptable); |
70 | 105 | |
71 | 106 | /* References to section boundaries */ |
72 | 107 | |
73 | -extern char _text, _etext, _edata, __bss_start, _end; | |
74 | -extern char __init_begin, __init_end; | |
108 | +extern char _text[], _etext[]; | |
109 | +extern char __init_begin[], __init_end[]; | |
75 | 110 | |
76 | 111 | extern pmd_t *zero_pgtable; |
77 | 112 | |
78 | 113 | void __init mem_init(void) |
79 | 114 | { |
115 | + pg_data_t *pgdat; | |
80 | 116 | int codepages = 0; |
81 | 117 | int datapages = 0; |
82 | 118 | int initpages = 0; |
83 | - unsigned long tmp; | |
84 | -#ifndef CONFIG_SUN3 | |
85 | 119 | int i; |
86 | -#endif | |
87 | 120 | |
88 | - max_mapnr = num_physpages = (((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT); | |
89 | - | |
90 | 121 | #ifdef CONFIG_ATARI |
91 | 122 | if (MACH_IS_ATARI) |
92 | 123 | atari_stram_mem_init_hook(); |
93 | 124 | #endif |
94 | 125 | |
95 | 126 | /* this will put all memory onto the freelists */ |
96 | - totalram_pages = free_all_bootmem(); | |
127 | + totalram_pages = num_physpages = 0; | |
128 | + for_each_online_pgdat(pgdat) { | |
129 | + num_physpages += pgdat->node_present_pages; | |
97 | 130 | |
98 | - for (tmp = PAGE_OFFSET ; tmp < (unsigned long)high_memory; tmp += PAGE_SIZE) { | |
99 | - if (PageReserved(virt_to_page(tmp))) { | |
100 | - if (tmp >= (unsigned long)&_text | |
101 | - && tmp < (unsigned long)&_etext) | |
131 | + totalram_pages += free_all_bootmem_node(pgdat); | |
132 | + for (i = 0; i < pgdat->node_spanned_pages; i++) { | |
133 | + struct page *page = pgdat->node_mem_map + i; | |
134 | + char *addr = page_to_virt(page); | |
135 | + | |
136 | + if (!PageReserved(page)) | |
137 | + continue; | |
138 | + if (addr >= _text && | |
139 | + addr < _etext) | |
102 | 140 | codepages++; |
103 | - else if (tmp >= (unsigned long) &__init_begin | |
104 | - && tmp < (unsigned long) &__init_end) | |
141 | + else if (addr >= __init_begin && | |
142 | + addr < __init_end) | |
105 | 143 | initpages++; |
106 | 144 | else |
107 | 145 | datapages++; |
108 | - continue; | |
109 | 146 | } |
110 | 147 | } |
111 | 148 | |
... | ... | @@ -124,7 +161,7 @@ |
124 | 161 | |
125 | 162 | printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n", |
126 | 163 | (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), |
127 | - max_mapnr << (PAGE_SHIFT-10), | |
164 | + totalram_pages << (PAGE_SHIFT-10), | |
128 | 165 | codepages << (PAGE_SHIFT-10), |
129 | 166 | datapages << (PAGE_SHIFT-10), |
130 | 167 | initpages << (PAGE_SHIFT-10)); |
arch/m68k/mm/memory.c
... | ... | @@ -127,67 +127,6 @@ |
127 | 127 | return 0; |
128 | 128 | } |
129 | 129 | |
130 | -#ifdef DEBUG_INVALID_PTOV | |
131 | -int mm_inv_cnt = 5; | |
132 | -#endif | |
133 | - | |
134 | -#ifndef CONFIG_SINGLE_MEMORY_CHUNK | |
135 | -/* | |
136 | - * The following two routines map from a physical address to a kernel | |
137 | - * virtual address and vice versa. | |
138 | - */ | |
139 | -unsigned long mm_vtop(unsigned long vaddr) | |
140 | -{ | |
141 | - int i=0; | |
142 | - unsigned long voff = (unsigned long)vaddr - PAGE_OFFSET; | |
143 | - | |
144 | - do { | |
145 | - if (voff < m68k_memory[i].size) { | |
146 | -#ifdef DEBUGPV | |
147 | - printk ("VTOP(%p)=%lx\n", vaddr, | |
148 | - m68k_memory[i].addr + voff); | |
149 | -#endif | |
150 | - return m68k_memory[i].addr + voff; | |
151 | - } | |
152 | - voff -= m68k_memory[i].size; | |
153 | - } while (++i < m68k_num_memory); | |
154 | - | |
155 | - /* As a special case allow `__pa(high_memory)'. */ | |
156 | - if (voff == 0) | |
157 | - return m68k_memory[i-1].addr + m68k_memory[i-1].size; | |
158 | - | |
159 | - return -1; | |
160 | -} | |
161 | -EXPORT_SYMBOL(mm_vtop); | |
162 | - | |
163 | -unsigned long mm_ptov (unsigned long paddr) | |
164 | -{ | |
165 | - int i = 0; | |
166 | - unsigned long poff, voff = PAGE_OFFSET; | |
167 | - | |
168 | - do { | |
169 | - poff = paddr - m68k_memory[i].addr; | |
170 | - if (poff < m68k_memory[i].size) { | |
171 | -#ifdef DEBUGPV | |
172 | - printk ("PTOV(%lx)=%lx\n", paddr, poff + voff); | |
173 | -#endif | |
174 | - return poff + voff; | |
175 | - } | |
176 | - voff += m68k_memory[i].size; | |
177 | - } while (++i < m68k_num_memory); | |
178 | - | |
179 | -#ifdef DEBUG_INVALID_PTOV | |
180 | - if (mm_inv_cnt > 0) { | |
181 | - mm_inv_cnt--; | |
182 | - printk("Invalid use of phys_to_virt(0x%lx) at 0x%p!\n", | |
183 | - paddr, __builtin_return_address(0)); | |
184 | - } | |
185 | -#endif | |
186 | - return -1; | |
187 | -} | |
188 | -EXPORT_SYMBOL(mm_ptov); | |
189 | -#endif | |
190 | - | |
191 | 130 | /* invalidate page in both caches */ |
192 | 131 | static inline void clear040(unsigned long paddr) |
193 | 132 | { |
... | ... | @@ -353,17 +292,4 @@ |
353 | 292 | #endif |
354 | 293 | } |
355 | 294 | EXPORT_SYMBOL(cache_push); |
356 | - | |
357 | -#ifndef CONFIG_SINGLE_MEMORY_CHUNK | |
358 | -int mm_end_of_chunk (unsigned long addr, int len) | |
359 | -{ | |
360 | - int i; | |
361 | - | |
362 | - for (i = 0; i < m68k_num_memory; i++) | |
363 | - if (m68k_memory[i].addr + m68k_memory[i].size == addr + len) | |
364 | - return 1; | |
365 | - return 0; | |
366 | -} | |
367 | -EXPORT_SYMBOL(mm_end_of_chunk); | |
368 | -#endif |
arch/m68k/mm/motorola.c
... | ... | @@ -43,6 +43,11 @@ |
43 | 43 | EXPORT_SYMBOL(mm_cachebits); |
44 | 44 | #endif |
45 | 45 | |
46 | +/* size of memory already mapped in head.S */ | |
47 | +#define INIT_MAPPED_SIZE (4UL<<20) | |
48 | + | |
49 | +extern unsigned long availmem; | |
50 | + | |
46 | 51 | static pte_t * __init kernel_page_table(void) |
47 | 52 | { |
48 | 53 | pte_t *ptablep; |
49 | 54 | |
50 | 55 | |
... | ... | @@ -98,19 +103,20 @@ |
98 | 103 | return last_pgtable; |
99 | 104 | } |
100 | 105 | |
101 | -static unsigned long __init | |
102 | -map_chunk (unsigned long addr, long size) | |
106 | +static void __init map_node(int node) | |
103 | 107 | { |
104 | 108 | #define PTRTREESIZE (256*1024) |
105 | 109 | #define ROOTTREESIZE (32*1024*1024) |
106 | - static unsigned long virtaddr = PAGE_OFFSET; | |
107 | - unsigned long physaddr; | |
110 | + unsigned long physaddr, virtaddr, size; | |
108 | 111 | pgd_t *pgd_dir; |
109 | 112 | pmd_t *pmd_dir; |
110 | 113 | pte_t *pte_dir; |
111 | 114 | |
112 | - physaddr = (addr | m68k_supervisor_cachemode | | |
113 | - _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); | |
115 | + size = m68k_memory[node].size; | |
116 | + physaddr = m68k_memory[node].addr; | |
117 | + virtaddr = (unsigned long)phys_to_virt(physaddr); | |
118 | + physaddr |= m68k_supervisor_cachemode | | |
119 | + _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY; | |
114 | 120 | if (CPU_IS_040_OR_060) |
115 | 121 | physaddr |= _PAGE_GLOBAL040; |
116 | 122 | |
... | ... | @@ -190,8 +196,6 @@ |
190 | 196 | #ifdef DEBUG |
191 | 197 | printk("\n"); |
192 | 198 | #endif |
193 | - | |
194 | - return virtaddr; | |
195 | 199 | } |
196 | 200 | |
197 | 201 | /* |
198 | 202 | |
199 | 203 | |
... | ... | @@ -200,15 +204,16 @@ |
200 | 204 | */ |
201 | 205 | void __init paging_init(void) |
202 | 206 | { |
203 | - int chunk; | |
204 | - unsigned long mem_avail = 0; | |
205 | 207 | unsigned long zones_size[MAX_NR_ZONES] = { 0, }; |
208 | + unsigned long min_addr, max_addr; | |
209 | + unsigned long addr, size, end; | |
210 | + int i; | |
206 | 211 | |
207 | 212 | #ifdef DEBUG |
208 | 213 | { |
209 | 214 | extern unsigned long availmem; |
210 | - printk ("start of paging_init (%p, %lx, %lx, %lx)\n", | |
211 | - kernel_pg_dir, availmem, start_mem, end_mem); | |
215 | + printk ("start of paging_init (%p, %lx)\n", | |
216 | + kernel_pg_dir, availmem); | |
212 | 217 | } |
213 | 218 | #endif |
214 | 219 | |
215 | 220 | |
216 | 221 | |
217 | 222 | |
218 | 223 | |
219 | 224 | |
220 | 225 | |
... | ... | @@ -222,27 +227,62 @@ |
222 | 227 | pgprot_val(protection_map[i]) |= _PAGE_CACHE040; |
223 | 228 | } |
224 | 229 | |
230 | + min_addr = m68k_memory[0].addr; | |
231 | + max_addr = min_addr + m68k_memory[0].size; | |
232 | + for (i = 1; i < m68k_num_memory;) { | |
233 | + if (m68k_memory[i].addr < min_addr) { | |
234 | + printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n", | |
235 | + m68k_memory[i].addr, m68k_memory[i].size); | |
236 | + printk("Fix your bootloader or use a memfile to make use of this area!\n"); | |
237 | + m68k_num_memory--; | |
238 | + memmove(m68k_memory + i, m68k_memory + i + 1, | |
239 | + (m68k_num_memory - i) * sizeof(struct mem_info)); | |
240 | + continue; | |
241 | + } | |
242 | + addr = m68k_memory[i].addr + m68k_memory[i].size; | |
243 | + if (addr > max_addr) | |
244 | + max_addr = addr; | |
245 | + i++; | |
246 | + } | |
247 | + m68k_memoffset = min_addr - PAGE_OFFSET; | |
248 | + m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6; | |
249 | + | |
225 | 250 | module_fixup(NULL, __start_fixup, __stop_fixup); |
226 | 251 | flush_icache(); |
227 | 252 | |
253 | + high_memory = phys_to_virt(max_addr); | |
254 | + | |
255 | + min_low_pfn = availmem >> PAGE_SHIFT; | |
256 | + max_low_pfn = max_addr >> PAGE_SHIFT; | |
257 | + | |
258 | + for (i = 0; i < m68k_num_memory; i++) { | |
259 | + addr = m68k_memory[i].addr; | |
260 | + end = addr + m68k_memory[i].size; | |
261 | + m68k_setup_node(i); | |
262 | + availmem = PAGE_ALIGN(availmem); | |
263 | + availmem += init_bootmem_node(NODE_DATA(i), | |
264 | + availmem >> PAGE_SHIFT, | |
265 | + addr >> PAGE_SHIFT, | |
266 | + end >> PAGE_SHIFT); | |
267 | + } | |
268 | + | |
228 | 269 | /* |
229 | 270 | * Map the physical memory available into the kernel virtual |
230 | - * address space. It may allocate some memory for page | |
231 | - * tables and thus modify availmem. | |
271 | + * address space. First initialize the bootmem allocator with | |
272 | + * the memory we already mapped, so map_node() has something | |
273 | + * to allocate. | |
232 | 274 | */ |
275 | + addr = m68k_memory[0].addr; | |
276 | + size = m68k_memory[0].size; | |
277 | + free_bootmem_node(NODE_DATA(0), availmem, min(INIT_MAPPED_SIZE, size) - (availmem - addr)); | |
278 | + map_node(0); | |
279 | + if (size > INIT_MAPPED_SIZE) | |
280 | + free_bootmem_node(NODE_DATA(0), addr + INIT_MAPPED_SIZE, size - INIT_MAPPED_SIZE); | |
233 | 281 | |
234 | - for (chunk = 0; chunk < m68k_num_memory; chunk++) { | |
235 | - mem_avail = map_chunk (m68k_memory[chunk].addr, | |
236 | - m68k_memory[chunk].size); | |
282 | + for (i = 1; i < m68k_num_memory; i++) | |
283 | + map_node(i); | |
237 | 284 | |
238 | - } | |
239 | - | |
240 | 285 | flush_tlb_all(); |
241 | -#ifdef DEBUG | |
242 | - printk ("memory available is %ldKB\n", mem_avail >> 10); | |
243 | - printk ("start_mem is %#lx\nvirtual_end is %#lx\n", | |
244 | - start_mem, end_mem); | |
245 | -#endif | |
246 | 286 | |
247 | 287 | /* |
248 | 288 | * initialize the bad page table and bad page to point |
... | ... | @@ -259,14 +299,11 @@ |
259 | 299 | #ifdef DEBUG |
260 | 300 | printk ("before free_area_init\n"); |
261 | 301 | #endif |
262 | - zones_size[ZONE_DMA] = (mach_max_dma_address < (unsigned long)high_memory ? | |
263 | - (mach_max_dma_address+1) : (unsigned long)high_memory); | |
264 | - zones_size[ZONE_NORMAL] = (unsigned long)high_memory - zones_size[0]; | |
265 | - | |
266 | - zones_size[ZONE_DMA] = (zones_size[ZONE_DMA] - PAGE_OFFSET) >> PAGE_SHIFT; | |
267 | - zones_size[ZONE_NORMAL] >>= PAGE_SHIFT; | |
268 | - | |
269 | - free_area_init(zones_size); | |
302 | + for (i = 0; i < m68k_num_memory; i++) { | |
303 | + zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT; | |
304 | + free_area_init_node(i, pg_data_map + i, zones_size, | |
305 | + m68k_memory[i].addr >> PAGE_SHIFT, NULL); | |
306 | + } | |
270 | 307 | } |
271 | 308 | |
272 | 309 | extern char __init_begin, __init_end; |
arch/m68k/sun3/config.c
... | ... | @@ -21,6 +21,7 @@ |
21 | 21 | #include <asm/contregs.h> |
22 | 22 | #include <asm/movs.h> |
23 | 23 | #include <asm/pgtable.h> |
24 | +#include <asm/pgalloc.h> | |
24 | 25 | #include <asm/sun3-head.h> |
25 | 26 | #include <asm/sun3mmu.h> |
26 | 27 | #include <asm/rtc.h> |
... | ... | @@ -127,6 +128,7 @@ |
127 | 128 | high_memory = (void *)memory_end; |
128 | 129 | availmem = memory_start; |
129 | 130 | |
131 | + m68k_setup_node(0); | |
130 | 132 | availmem += init_bootmem_node(NODE_DATA(0), start_page, 0, num_pages); |
131 | 133 | availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK; |
132 | 134 |
include/asm-m68k/mmzone.h
include/asm-m68k/module.h
include/asm-m68k/motorola_pgtable.h
... | ... | @@ -130,7 +130,7 @@ |
130 | 130 | #define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE)) |
131 | 131 | #define pte_clear(mm,addr,ptep) ({ pte_val(*(ptep)) = 0; }) |
132 | 132 | |
133 | -#define pte_page(pte) (mem_map + ((unsigned long)(__va(pte_val(pte)) - PAGE_OFFSET) >> PAGE_SHIFT)) | |
133 | +#define pte_page(pte) virt_to_page(__va(pte_val(pte))) | |
134 | 134 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) |
135 | 135 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
136 | 136 | |
... | ... | @@ -143,7 +143,7 @@ |
143 | 143 | while (--__i >= 0) \ |
144 | 144 | *__ptr++ = 0; \ |
145 | 145 | }) |
146 | -#define pmd_page(pmd) (mem_map + ((unsigned long)(__va(pmd_val(pmd)) - PAGE_OFFSET) >> PAGE_SHIFT)) | |
146 | +#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) | |
147 | 147 | |
148 | 148 | |
149 | 149 | #define pgd_none(pgd) (!pgd_val(pgd)) |
150 | 150 | |
... | ... | @@ -223,10 +223,10 @@ |
223 | 223 | return (pte_t *)__pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); |
224 | 224 | } |
225 | 225 | |
226 | -#define pte_offset_map(pmdp,address) ((pte_t *)kmap(pmd_page(*pmdp)) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) | |
226 | +#define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) | |
227 | 227 | #define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address) |
228 | -#define pte_unmap(pte) kunmap(pte) | |
229 | -#define pte_unmap_nested(pte) kunmap(pte) | |
228 | +#define pte_unmap(pte) ((void)0) | |
229 | +#define pte_unmap_nested(pte) ((void)0) | |
230 | 230 | |
231 | 231 | /* |
232 | 232 | * Allocate and free page tables. The xxx_kernel() versions are |
include/asm-m68k/page.h
... | ... | @@ -121,7 +121,6 @@ |
121 | 121 | #ifndef CONFIG_SUN3 |
122 | 122 | |
123 | 123 | #define WANT_PAGE_VIRTUAL |
124 | -#ifdef CONFIG_SINGLE_MEMORY_CHUNK | |
125 | 124 | |
126 | 125 | static inline unsigned long ___pa(void *vaddr) |
127 | 126 | { |
... | ... | @@ -133,7 +132,7 @@ |
133 | 132 | : "0" (vaddr), "i" (m68k_fixup_memoffset)); |
134 | 133 | return paddr; |
135 | 134 | } |
136 | -#define __pa(vaddr) ___pa((void *)(vaddr)) | |
135 | +#define __pa(vaddr) ___pa((void *)(vaddr)) | |
137 | 136 | static inline void *__va(unsigned long paddr) |
138 | 137 | { |
139 | 138 | void *vaddr; |
... | ... | @@ -145,11 +144,6 @@ |
145 | 144 | return vaddr; |
146 | 145 | } |
147 | 146 | |
148 | -#else | |
149 | -#define __pa(vaddr) virt_to_phys((void *)(vaddr)) | |
150 | -#define __va(paddr) phys_to_virt((unsigned long)(paddr)) | |
151 | -#endif | |
152 | - | |
153 | 147 | #else /* !CONFIG_SUN3 */ |
154 | 148 | /* This #define is a horrible hack to suppress lots of warnings. --m */ |
155 | 149 | #define __pa(x) ___pa((unsigned long)(x)) |
156 | 150 | |
... | ... | @@ -184,11 +178,47 @@ |
184 | 178 | #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) |
185 | 179 | #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) |
186 | 180 | |
187 | -#define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr)-PAGE_OFFSET) >> PAGE_SHIFT)) | |
188 | -#define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) | |
181 | +extern int m68k_virt_to_node_shift; | |
189 | 182 | |
190 | -#define pfn_to_page(pfn) virt_to_page(pfn_to_virt(pfn)) | |
191 | -#define page_to_pfn(page) virt_to_pfn(page_to_virt(page)) | |
183 | +#ifdef CONFIG_SINGLE_MEMORY_CHUNK | |
184 | +#define __virt_to_node(addr) (&pg_data_map[0]) | |
185 | +#else | |
186 | +extern struct pglist_data *pg_data_table[]; | |
187 | + | |
188 | +static inline __attribute_const__ int __virt_to_node_shift(void) | |
189 | +{ | |
190 | + int shift; | |
191 | + | |
192 | + asm ( | |
193 | + "1: moveq #0,%0\n" | |
194 | + m68k_fixup(%c1, 1b) | |
195 | + : "=d" (shift) | |
196 | + : "i" (m68k_fixup_vnode_shift)); | |
197 | + return shift; | |
198 | +} | |
199 | + | |
200 | +#define __virt_to_node(addr) (pg_data_table[(unsigned long)(addr) >> __virt_to_node_shift()]) | |
201 | +#endif | |
202 | + | |
203 | +#define virt_to_page(addr) ({ \ | |
204 | + pfn_to_page(virt_to_pfn(addr)); \ | |
205 | +}) | |
206 | +#define page_to_virt(page) ({ \ | |
207 | + pfn_to_virt(page_to_pfn(page)); \ | |
208 | +}) | |
209 | + | |
210 | +#define pfn_to_page(pfn) ({ \ | |
211 | + unsigned long __pfn = (pfn); \ | |
212 | + struct pglist_data *pgdat; \ | |
213 | + pgdat = __virt_to_node((unsigned long)pfn_to_virt(__pfn)); \ | |
214 | + pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \ | |
215 | +}) | |
216 | +#define page_to_pfn(_page) ({ \ | |
217 | + struct page *__p = (_page); \ | |
218 | + struct pglist_data *pgdat; \ | |
219 | + pgdat = &pg_data_map[page_to_nid(__p)]; \ | |
220 | + ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \ | |
221 | +}) | |
192 | 222 | |
193 | 223 | #define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && (void *)(kaddr) < high_memory) |
194 | 224 | #define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn)) |
include/asm-m68k/pgalloc.h
include/asm-m68k/pgtable.h
... | ... | @@ -107,22 +107,7 @@ |
107 | 107 | /* 64-bit machines, beware! SRB. */ |
108 | 108 | #define SIZEOF_PTR_LOG2 2 |
109 | 109 | |
110 | -/* | |
111 | - * Check if the addr/len goes up to the end of a physical | |
112 | - * memory chunk. Used for DMA functions. | |
113 | - */ | |
114 | -#ifdef CONFIG_SINGLE_MEMORY_CHUNK | |
115 | -/* | |
116 | - * It makes no sense to consider whether we cross a memory boundary if | |
117 | - * we support just one physical chunk of memory. | |
118 | - */ | |
119 | -static inline int mm_end_of_chunk(unsigned long addr, int len) | |
120 | -{ | |
121 | - return 0; | |
122 | -} | |
123 | -#else | |
124 | -int mm_end_of_chunk (unsigned long addr, int len); | |
125 | -#endif | |
110 | +#define mm_end_of_chunk(addr, len) 0 | |
126 | 111 | |
127 | 112 | extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode); |
128 | 113 |
include/asm-m68k/sun3_pgtable.h
... | ... | @@ -132,8 +132,8 @@ |
132 | 132 | #define pfn_pte(pfn, pgprot) \ |
133 | 133 | ({ pte_t __pte; pte_val(__pte) = pfn | pgprot_val(pgprot); __pte; }) |
134 | 134 | |
135 | -#define pte_page(pte) (mem_map+((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)) | |
136 | -#define pmd_page(pmd) (mem_map+((__pmd_page(pmd) - PAGE_OFFSET) >> PAGE_SHIFT)) | |
135 | +#define pte_page(pte) virt_to_page(__pte_page(pte)) | |
136 | +#define pmd_page(pmd) virt_to_page(__pmd_page(pmd)) | |
137 | 137 | |
138 | 138 | |
139 | 139 | static inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); } |
include/asm-m68k/virtconvert.h
... | ... | @@ -8,56 +8,35 @@ |
8 | 8 | #ifdef __KERNEL__ |
9 | 9 | |
10 | 10 | #include <linux/compiler.h> |
11 | +#include <linux/mmzone.h> | |
11 | 12 | #include <asm/setup.h> |
12 | 13 | #include <asm/page.h> |
13 | 14 | |
14 | -#ifdef CONFIG_AMIGA | |
15 | -#include <asm/amigahw.h> | |
16 | -#endif | |
17 | - | |
18 | 15 | /* |
19 | 16 | * Change virtual addresses to physical addresses and vv. |
20 | 17 | */ |
21 | -#ifndef CONFIG_SUN3 | |
22 | -extern unsigned long mm_vtop(unsigned long addr) __attribute_const__; | |
23 | -extern unsigned long mm_ptov(unsigned long addr) __attribute_const__; | |
24 | -#else | |
25 | -static inline unsigned long mm_vtop(unsigned long vaddr) | |
26 | -{ | |
27 | - return __pa(vaddr); | |
28 | -} | |
29 | - | |
30 | -static inline unsigned long mm_ptov(unsigned long paddr) | |
31 | -{ | |
32 | - return (unsigned long)__va(paddr); | |
33 | -} | |
34 | -#endif | |
35 | - | |
36 | -#ifdef CONFIG_SINGLE_MEMORY_CHUNK | |
37 | -static inline unsigned long virt_to_phys(void *vaddr) | |
38 | -{ | |
39 | - return (unsigned long)vaddr - PAGE_OFFSET + m68k_memory[0].addr; | |
40 | -} | |
41 | - | |
42 | -static inline void * phys_to_virt(unsigned long paddr) | |
43 | -{ | |
44 | - return (void *)(paddr - m68k_memory[0].addr + PAGE_OFFSET); | |
45 | -} | |
46 | -#else | |
47 | 18 | static inline unsigned long virt_to_phys(void *address) |
48 | 19 | { |
49 | - return mm_vtop((unsigned long)address); | |
20 | + return __pa(address); | |
50 | 21 | } |
51 | 22 | |
52 | 23 | static inline void *phys_to_virt(unsigned long address) |
53 | 24 | { |
54 | - return (void *) mm_ptov(address); | |
25 | + return __va(address); | |
55 | 26 | } |
56 | -#endif | |
57 | 27 | |
58 | 28 | /* Permanent address of a page. */ |
59 | -#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)) | |
60 | -#define page_to_phys(page) virt_to_phys((void *)__page_address(page)) | |
29 | +#ifdef CONFIG_SINGLE_MEMORY_CHUNK | |
30 | +#define page_to_phys(page) \ | |
31 | + __pa(PAGE_OFFSET + (((page) - pg_data_map[0].node_mem_map) << PAGE_SHIFT)) | |
32 | +#else | |
33 | +#define page_to_phys(_page) ({ \ | |
34 | + struct page *__page = _page; \ | |
35 | + struct pglist_data *pgdat; \ | |
36 | + pgdat = pg_data_table[page_to_nid(__page)]; \ | |
37 | + page_to_pfn(__page) << PAGE_SHIFT; \ | |
38 | +}) | |
39 | +#endif | |
61 | 40 | |
62 | 41 | /* |
63 | 42 | * IO bus memory addresses are 1:1 with the physical address, |
mm/page_alloc.c
... | ... | @@ -2689,7 +2689,7 @@ |
2689 | 2689 | map = alloc_bootmem_node(pgdat, size); |
2690 | 2690 | pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); |
2691 | 2691 | } |
2692 | -#ifdef CONFIG_FLATMEM | |
2692 | +#ifndef CONFIG_NEED_MULTIPLE_NODES | |
2693 | 2693 | /* |
2694 | 2694 | * With no DISCONTIG, the global mem_map is just set as node 0's |
2695 | 2695 | */ |