Commit b6e3590f8145c77b8fcef3247e2412335221412f

Authored by Jeremy Fitzhardinge
Committed by Andi Kleen
1 parent de90c5ce83

[PATCH] x86: Allow percpu variables to be page-aligned

Let's allow page-alignment in general for per-cpu data (wanted by Xen, and
Ingo suggested KVM as well).

Because larger alignments can use more room, we increase the max per-cpu
memory to 64k rather than 32k: it's getting a little tight.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

Showing 21 changed files with 29 additions and 35 deletions Side-by-side Diff

arch/alpha/kernel/vmlinux.lds.S
... ... @@ -69,7 +69,7 @@
69 69 . = ALIGN(8);
70 70 SECURITY_INIT
71 71  
72   - . = ALIGN(64);
  72 + . = ALIGN(8192);
73 73 __per_cpu_start = .;
74 74 .data.percpu : { *(.data.percpu) }
75 75 __per_cpu_end = .;
arch/arm/kernel/vmlinux.lds.S
... ... @@ -59,7 +59,7 @@
59 59 usr/built-in.o(.init.ramfs)
60 60 __initramfs_end = .;
61 61 #endif
62   - . = ALIGN(64);
  62 + . = ALIGN(4096);
63 63 __per_cpu_start = .;
64 64 *(.data.percpu)
65 65 __per_cpu_end = .;
arch/cris/arch-v32/vmlinux.lds.S
... ... @@ -91,6 +91,7 @@
91 91 }
92 92 SECURITY_INIT
93 93  
  94 + . = ALIGN (8192);
94 95 __per_cpu_start = .;
95 96 .data.percpu : { *(.data.percpu) }
96 97 __per_cpu_end = .;
arch/frv/kernel/vmlinux.lds.S
... ... @@ -57,6 +57,7 @@
57 57 __alt_instructions_end = .;
58 58 .altinstr_replacement : { *(.altinstr_replacement) }
59 59  
  60 + . = ALIGN(4096);
60 61 __per_cpu_start = .;
61 62 .data.percpu : { *(.data.percpu) }
62 63 __per_cpu_end = .;
arch/i386/kernel/vmlinux.lds.S
... ... @@ -194,7 +194,7 @@
194 194 __initramfs_end = .;
195 195 }
196 196 #endif
197   - . = ALIGN(L1_CACHE_BYTES);
  197 + . = ALIGN(4096);
198 198 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
199 199 __per_cpu_start = .;
200 200 *(.data.percpu)
arch/m32r/kernel/vmlinux.lds.S
... ... @@ -110,7 +110,7 @@
110 110 __initramfs_end = .;
111 111 #endif
112 112  
113   - . = ALIGN(32);
  113 + . = ALIGN(4096);
114 114 __per_cpu_start = .;
115 115 .data.percpu : { *(.data.percpu) }
116 116 __per_cpu_end = .;
arch/mips/kernel/vmlinux.lds.S
... ... @@ -119,7 +119,7 @@
119 119 .init.ramfs : { *(.init.ramfs) }
120 120 __initramfs_end = .;
121 121 #endif
122   - . = ALIGN(32);
  122 + . = ALIGN(_PAGE_SIZE);
123 123 __per_cpu_start = .;
124 124 .data.percpu : { *(.data.percpu) }
125 125 __per_cpu_end = .;
arch/parisc/kernel/vmlinux.lds.S
... ... @@ -181,7 +181,7 @@
181 181 .init.ramfs : { *(.init.ramfs) }
182 182 __initramfs_end = .;
183 183 #endif
184   - . = ALIGN(32);
  184 + . = ALIGN(ASM_PAGE_SIZE);
185 185 __per_cpu_start = .;
186 186 .data.percpu : { *(.data.percpu) }
187 187 __per_cpu_end = .;
arch/powerpc/kernel/setup_64.c
... ... @@ -582,14 +582,14 @@
582 582 char *ptr;
583 583  
584 584 /* Copy section for each CPU (we discard the original) */
585   - size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
  585 + size = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE);
586 586 #ifdef CONFIG_MODULES
587 587 if (size < PERCPU_ENOUGH_ROOM)
588 588 size = PERCPU_ENOUGH_ROOM;
589 589 #endif
590 590  
591 591 for_each_possible_cpu(i) {
592   - ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
  592 + ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size);
593 593 if (!ptr)
594 594 panic("Cannot allocate cpu data for CPU %d\n", i);
595 595  
arch/powerpc/kernel/vmlinux.lds.S
... ... @@ -139,11 +139,7 @@
139 139 __initramfs_end = .;
140 140 }
141 141 #endif
142   -#ifdef CONFIG_PPC32
143   - . = ALIGN(32);
144   -#else
145   - . = ALIGN(128);
146   -#endif
  142 + . = ALIGN(PAGE_SIZE);
147 143 .data.percpu : {
148 144 __per_cpu_start = .;
149 145 *(.data.percpu)
arch/ppc/kernel/vmlinux.lds.S
... ... @@ -130,7 +130,7 @@
130 130 __ftr_fixup : { *(__ftr_fixup) }
131 131 __stop___ftr_fixup = .;
132 132  
133   - . = ALIGN(32);
  133 + . = ALIGN(4096);
134 134 __per_cpu_start = .;
135 135 .data.percpu : { *(.data.percpu) }
136 136 __per_cpu_end = .;
arch/s390/kernel/vmlinux.lds.S
... ... @@ -107,7 +107,7 @@
107 107 . = ALIGN(2);
108 108 __initramfs_end = .;
109 109 #endif
110   - . = ALIGN(256);
  110 + . = ALIGN(4096);
111 111 __per_cpu_start = .;
112 112 .data.percpu : { *(.data.percpu) }
113 113 __per_cpu_end = .;
arch/sh/kernel/vmlinux.lds.S
... ... @@ -54,7 +54,7 @@
54 54 . = ALIGN(PAGE_SIZE);
55 55 .data.page_aligned : { *(.data.page_aligned) }
56 56  
57   - . = ALIGN(L1_CACHE_BYTES);
  57 + . = ALIGN(PAGE_SIZE);
58 58 __per_cpu_start = .;
59 59 .data.percpu : { *(.data.percpu) }
60 60 __per_cpu_end = .;
arch/sh64/kernel/vmlinux.lds.S
... ... @@ -85,7 +85,7 @@
85 85 . = ALIGN(PAGE_SIZE);
86 86 .data.page_aligned : C_PHYS(.data.page_aligned) { *(.data.page_aligned) }
87 87  
88   - . = ALIGN(L1_CACHE_BYTES);
  88 + . = ALIGN(PAGE_SIZE);
89 89 __per_cpu_start = .;
90 90 .data.percpu : C_PHYS(.data.percpu) { *(.data.percpu) }
91 91 __per_cpu_end = . ;
arch/sparc/kernel/vmlinux.lds.S
... ... @@ -65,7 +65,7 @@
65 65 __initramfs_end = .;
66 66 #endif
67 67  
68   - . = ALIGN(32);
  68 + . = ALIGN(4096);
69 69 __per_cpu_start = .;
70 70 .data.percpu : { *(.data.percpu) }
71 71 __per_cpu_end = .;
arch/sparc64/kernel/smp.c
... ... @@ -1343,11 +1343,11 @@
1343 1343 /* Copy section for each CPU (we discard the original) */
1344 1344 goal = PERCPU_ENOUGH_ROOM;
1345 1345  
1346   - __per_cpu_shift = 0;
1347   - for (size = 1UL; size < goal; size <<= 1UL)
  1346 + __per_cpu_shift = PAGE_SHIFT;
  1347 + for (size = PAGE_SIZE; size < goal; size <<= 1UL)
1348 1348 __per_cpu_shift++;
1349 1349  
1350   - ptr = alloc_bootmem(size * NR_CPUS);
  1350 + ptr = alloc_bootmem_pages(size * NR_CPUS);
1351 1351  
1352 1352 __per_cpu_base = ptr - __per_cpu_start;
1353 1353  
arch/x86_64/kernel/setup64.c
... ... @@ -103,9 +103,9 @@
103 103 if (!NODE_DATA(cpu_to_node(i))) {
104 104 printk("cpu with no node %d, num_online_nodes %d\n",
105 105 i, num_online_nodes());
106   - ptr = alloc_bootmem(size);
  106 + ptr = alloc_bootmem_pages(size);
107 107 } else {
108   - ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
  108 + ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size);
109 109 }
110 110 if (!ptr)
111 111 panic("Cannot allocate cpu data for CPU %d\n", i);
arch/x86_64/kernel/vmlinux.lds.S
... ... @@ -195,7 +195,7 @@
195 195 __initramfs_end = .;
196 196 #endif
197 197  
198   - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  198 + . = ALIGN(4096);
199 199 __per_cpu_start = .;
200 200 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
201 201 __per_cpu_end = .;
arch/xtensa/kernel/vmlinux.lds.S
... ... @@ -198,7 +198,7 @@
198 198 __ftr_fixup : { *(__ftr_fixup) }
199 199 __stop___ftr_fixup = .;
200 200  
201   - . = ALIGN(32);
  201 + . = ALIGN(4096);
202 202 __per_cpu_start = .;
203 203 .data.percpu : { *(.data.percpu) }
204 204 __per_cpu_end = .;
... ... @@ -369,12 +369,8 @@
369 369 unsigned long nr_possible_cpus = num_possible_cpus();
370 370  
371 371 /* Copy section for each CPU (we discard the original) */
372   - size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
373   -#ifdef CONFIG_MODULES
374   - if (size < PERCPU_ENOUGH_ROOM)
375   - size = PERCPU_ENOUGH_ROOM;
376   -#endif
377   - ptr = alloc_bootmem(size * nr_possible_cpus);
  372 + size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
  373 + ptr = alloc_bootmem_pages(size * nr_possible_cpus);
378 374  
379 375 for_each_possible_cpu(i) {
380 376 __per_cpu_offset[i] = ptr - __per_cpu_start;
... ... @@ -346,10 +346,10 @@
346 346 unsigned int i;
347 347 void *ptr;
348 348  
349   - if (align > SMP_CACHE_BYTES) {
350   - printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
351   - name, align, SMP_CACHE_BYTES);
352   - align = SMP_CACHE_BYTES;
  349 + if (align > PAGE_SIZE) {
  350 + printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
  351 + name, align, PAGE_SIZE);
  352 + align = PAGE_SIZE;
353 353 }
354 354  
355 355 ptr = __per_cpu_start;