Commit b6e3590f8145c77b8fcef3247e2412335221412f
Committed by
Andi Kleen
1 parent
de90c5ce83
[PATCH] x86: Allow percpu variables to be page-aligned
Let's allow page-alignment in general for per-cpu data (wanted by Xen, and Ingo suggested KVM as well). Because larger alignments can use more room, we increase the max per-cpu memory to 64k rather than 32k: it's getting a little tight. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Andi Kleen <ak@suse.de> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Showing 21 changed files with 29 additions and 35 deletions Side-by-side Diff
- arch/alpha/kernel/vmlinux.lds.S
- arch/arm/kernel/vmlinux.lds.S
- arch/cris/arch-v32/vmlinux.lds.S
- arch/frv/kernel/vmlinux.lds.S
- arch/i386/kernel/vmlinux.lds.S
- arch/m32r/kernel/vmlinux.lds.S
- arch/mips/kernel/vmlinux.lds.S
- arch/parisc/kernel/vmlinux.lds.S
- arch/powerpc/kernel/setup_64.c
- arch/powerpc/kernel/vmlinux.lds.S
- arch/ppc/kernel/vmlinux.lds.S
- arch/s390/kernel/vmlinux.lds.S
- arch/sh/kernel/vmlinux.lds.S
- arch/sh64/kernel/vmlinux.lds.S
- arch/sparc/kernel/vmlinux.lds.S
- arch/sparc64/kernel/smp.c
- arch/x86_64/kernel/setup64.c
- arch/x86_64/kernel/vmlinux.lds.S
- arch/xtensa/kernel/vmlinux.lds.S
- init/main.c
- kernel/module.c
arch/alpha/kernel/vmlinux.lds.S
arch/arm/kernel/vmlinux.lds.S
arch/cris/arch-v32/vmlinux.lds.S
arch/frv/kernel/vmlinux.lds.S
arch/i386/kernel/vmlinux.lds.S
arch/m32r/kernel/vmlinux.lds.S
arch/mips/kernel/vmlinux.lds.S
arch/parisc/kernel/vmlinux.lds.S
arch/powerpc/kernel/setup_64.c
... | ... | @@ -582,14 +582,14 @@ |
582 | 582 | char *ptr; |
583 | 583 | |
584 | 584 | /* Copy section for each CPU (we discard the original) */ |
585 | - size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); | |
585 | + size = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE); | |
586 | 586 | #ifdef CONFIG_MODULES |
587 | 587 | if (size < PERCPU_ENOUGH_ROOM) |
588 | 588 | size = PERCPU_ENOUGH_ROOM; |
589 | 589 | #endif |
590 | 590 | |
591 | 591 | for_each_possible_cpu(i) { |
592 | - ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size); | |
592 | + ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size); | |
593 | 593 | if (!ptr) |
594 | 594 | panic("Cannot allocate cpu data for CPU %d\n", i); |
595 | 595 |
arch/powerpc/kernel/vmlinux.lds.S
arch/ppc/kernel/vmlinux.lds.S
arch/s390/kernel/vmlinux.lds.S
arch/sh/kernel/vmlinux.lds.S
arch/sh64/kernel/vmlinux.lds.S
... | ... | @@ -85,7 +85,7 @@ |
85 | 85 | . = ALIGN(PAGE_SIZE); |
86 | 86 | .data.page_aligned : C_PHYS(.data.page_aligned) { *(.data.page_aligned) } |
87 | 87 | |
88 | - . = ALIGN(L1_CACHE_BYTES); | |
88 | + . = ALIGN(PAGE_SIZE); | |
89 | 89 | __per_cpu_start = .; |
90 | 90 | .data.percpu : C_PHYS(.data.percpu) { *(.data.percpu) } |
91 | 91 | __per_cpu_end = . ; |
arch/sparc/kernel/vmlinux.lds.S
arch/sparc64/kernel/smp.c
... | ... | @@ -1343,11 +1343,11 @@ |
1343 | 1343 | /* Copy section for each CPU (we discard the original) */ |
1344 | 1344 | goal = PERCPU_ENOUGH_ROOM; |
1345 | 1345 | |
1346 | - __per_cpu_shift = 0; | |
1347 | - for (size = 1UL; size < goal; size <<= 1UL) | |
1346 | + __per_cpu_shift = PAGE_SHIFT; | |
1347 | + for (size = PAGE_SIZE; size < goal; size <<= 1UL) | |
1348 | 1348 | __per_cpu_shift++; |
1349 | 1349 | |
1350 | - ptr = alloc_bootmem(size * NR_CPUS); | |
1350 | + ptr = alloc_bootmem_pages(size * NR_CPUS); | |
1351 | 1351 | |
1352 | 1352 | __per_cpu_base = ptr - __per_cpu_start; |
1353 | 1353 |
arch/x86_64/kernel/setup64.c
... | ... | @@ -103,9 +103,9 @@ |
103 | 103 | if (!NODE_DATA(cpu_to_node(i))) { |
104 | 104 | printk("cpu with no node %d, num_online_nodes %d\n", |
105 | 105 | i, num_online_nodes()); |
106 | - ptr = alloc_bootmem(size); | |
106 | + ptr = alloc_bootmem_pages(size); | |
107 | 107 | } else { |
108 | - ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size); | |
108 | + ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size); | |
109 | 109 | } |
110 | 110 | if (!ptr) |
111 | 111 | panic("Cannot allocate cpu data for CPU %d\n", i); |
arch/x86_64/kernel/vmlinux.lds.S
arch/xtensa/kernel/vmlinux.lds.S
init/main.c
... | ... | @@ -369,12 +369,8 @@ |
369 | 369 | unsigned long nr_possible_cpus = num_possible_cpus(); |
370 | 370 | |
371 | 371 | /* Copy section for each CPU (we discard the original) */ |
372 | - size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); | |
373 | -#ifdef CONFIG_MODULES | |
374 | - if (size < PERCPU_ENOUGH_ROOM) | |
375 | - size = PERCPU_ENOUGH_ROOM; | |
376 | -#endif | |
377 | - ptr = alloc_bootmem(size * nr_possible_cpus); | |
372 | + size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE); | |
373 | + ptr = alloc_bootmem_pages(size * nr_possible_cpus); | |
378 | 374 | |
379 | 375 | for_each_possible_cpu(i) { |
380 | 376 | __per_cpu_offset[i] = ptr - __per_cpu_start; |
kernel/module.c
... | ... | @@ -346,10 +346,10 @@ |
346 | 346 | unsigned int i; |
347 | 347 | void *ptr; |
348 | 348 | |
349 | - if (align > SMP_CACHE_BYTES) { | |
350 | - printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n", | |
351 | - name, align, SMP_CACHE_BYTES); | |
352 | - align = SMP_CACHE_BYTES; | |
349 | + if (align > PAGE_SIZE) { | |
350 | + printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", | |
351 | + name, align, PAGE_SIZE); | |
352 | + align = PAGE_SIZE; | |
353 | 353 | } |
354 | 354 | |
355 | 355 | ptr = __per_cpu_start; |