Commit d34696c2208b2dc1b27ec8f0a017a91e4e6eb85d

Authored by Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 fixes from Martin Schwidefsky:
 "Two patches to save some memory if CONFIG_NR_CPUS is large, a changed
  default for the use of compare-and-delay, and a couple of bug fixes"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/spinlock: disabled compare-and-delay by default
  s390/mm: align 64-bit PIE binaries to 4GB
  s390/cacheinfo: coding style changes
  s390/cacheinfo: fix shared cpu masks
  s390/smp: reduce size of struct pcpu
  s390/topology: convert cpu_topology array to per cpu variable
  s390/topology: delay initialization of topology cpu masks
  s390/vdso: fix clock_gettime for CLOCK_THREAD_CPUTIME_ID, -2 and -3

Showing 8 changed files Side-by-side Diff

arch/s390/include/asm/topology.h
... ... @@ -18,15 +18,15 @@
18 18 cpumask_t book_mask;
19 19 };
20 20  
21   -extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
  21 +DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
22 22  
23   -#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
24   -#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id)
25   -#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_mask)
26   -#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
27   -#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
28   -#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
29   -#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
  23 +#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
  24 +#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
  25 +#define topology_thread_cpumask(cpu) (&per_cpu(cpu_topology, cpu).thread_mask)
  26 +#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
  27 +#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
  28 +#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
  29 +#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask)
30 30  
31 31 #define mc_capable() 1
32 32  
... ... @@ -50,14 +50,6 @@
50 50 #define POLARIZATION_VL (1)
51 51 #define POLARIZATION_VM (2)
52 52 #define POLARIZATION_VH (3)
53   -
54   -#ifdef CONFIG_SCHED_BOOK
55   -void s390_init_cpu_topology(void);
56   -#else
57   -static inline void s390_init_cpu_topology(void)
58   -{
59   -};
60   -#endif
61 53  
62 54 #include <asm-generic/topology.h>
63 55  
arch/s390/kernel/cache.c
... ... @@ -91,12 +91,9 @@
91 91 {
92 92 if (level >= CACHE_MAX_LEVEL)
93 93 return CACHE_TYPE_NOCACHE;
94   -
95 94 ci += level;
96   -
97 95 if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
98 96 return CACHE_TYPE_NOCACHE;
99   -
100 97 return cache_type_map[ci->type];
101 98 }
102 99  
103 100  
104 101  
105 102  
106 103  
... ... @@ -111,23 +108,19 @@
111 108 }
112 109  
113 110 static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
114   - enum cache_type type, unsigned int level)
  111 + enum cache_type type, unsigned int level, int cpu)
115 112 {
116 113 int ti, num_sets;
117   - int cpu = smp_processor_id();
118 114  
119 115 if (type == CACHE_TYPE_INST)
120 116 ti = CACHE_TI_INSTRUCTION;
121 117 else
122 118 ti = CACHE_TI_UNIFIED;
123   -
124 119 this_leaf->level = level + 1;
125 120 this_leaf->type = type;
126 121 this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
127   - this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY,
128   - level, ti);
  122 + this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
129 123 this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
130   -
131 124 num_sets = this_leaf->size / this_leaf->coherency_line_size;
132 125 num_sets /= this_leaf->ways_of_associativity;
133 126 this_leaf->number_of_sets = num_sets;
... ... @@ -145,7 +138,6 @@
145 138  
146 139 if (!this_cpu_ci)
147 140 return -EINVAL;
148   -
149 141 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
150 142 do {
151 143 ctype = get_cache_type(&ct.ci[0], level);
152 144  
153 145  
154 146  
155 147  
156 148  
157 149  
... ... @@ -154,34 +146,31 @@
154 146 /* Separate instruction and data caches */
155 147 leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
156 148 } while (++level < CACHE_MAX_LEVEL);
157   -
158 149 this_cpu_ci->num_levels = level;
159 150 this_cpu_ci->num_leaves = leaves;
160   -
161 151 return 0;
162 152 }
163 153  
164 154 int populate_cache_leaves(unsigned int cpu)
165 155 {
  156 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
  157 + struct cacheinfo *this_leaf = this_cpu_ci->info_list;
166 158 unsigned int level, idx, pvt;
167 159 union cache_topology ct;
168 160 enum cache_type ctype;
169   - struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
170   - struct cacheinfo *this_leaf = this_cpu_ci->info_list;
171 161  
172 162 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
173 163 for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
174 164 idx < this_cpu_ci->num_leaves; idx++, level++) {
175 165 if (!this_leaf)
176 166 return -EINVAL;
177   -
178 167 pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
179 168 ctype = get_cache_type(&ct.ci[0], level);
180 169 if (ctype == CACHE_TYPE_SEPARATE) {
181   - ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level);
182   - ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level);
  170 + ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
  171 + ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
183 172 } else {
184   - ci_leaf_init(this_leaf++, pvt, ctype, level);
  173 + ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
185 174 }
186 175 }
187 176 return 0;
arch/s390/kernel/early.c
... ... @@ -393,17 +393,19 @@
393 393 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
394 394 if (test_facility(129))
395 395 S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
396   - if (test_facility(128))
397   - S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
398 396 #endif
399 397 }
400 398  
401   -static int __init nocad_setup(char *str)
  399 +static int __init cad_setup(char *str)
402 400 {
403   - S390_lowcore.machine_flags &= ~MACHINE_FLAG_CAD;
  401 + int val;
  402 +
  403 + get_option(&str, &val);
  404 + if (val && test_facility(128))
  405 + S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
404 406 return 0;
405 407 }
406   -early_param("nocad", nocad_setup);
  408 +early_param("cad", cad_setup);
407 409  
408 410 static int __init cad_init(void)
409 411 {
arch/s390/kernel/setup.c
... ... @@ -909,7 +909,6 @@
909 909 setup_lowcore();
910 910 smp_fill_possible_mask();
911 911 cpu_init();
912   - s390_init_cpu_topology();
913 912  
914 913 /*
915 914 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
arch/s390/kernel/smp.c
... ... @@ -59,14 +59,13 @@
59 59 CPU_STATE_CONFIGURED,
60 60 };
61 61  
  62 +static DEFINE_PER_CPU(struct cpu *, cpu_device);
  63 +
62 64 struct pcpu {
63   - struct cpu *cpu;
64 65 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
65   - unsigned long async_stack; /* async stack for the cpu */
66   - unsigned long panic_stack; /* panic stack for the cpu */
67 66 unsigned long ec_mask; /* bit mask for ec_xxx functions */
68   - int state; /* physical cpu state */
69   - int polarization; /* physical polarization */
  67 + signed char state; /* physical cpu state */
  68 + signed char polarization; /* physical polarization */
70 69 u16 address; /* physical cpu address */
71 70 };
72 71  
73 72  
74 73  
75 74  
76 75  
... ... @@ -173,25 +172,30 @@
173 172 pcpu_sigp_retry(pcpu, order, 0);
174 173 }
175 174  
  175 +#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
  176 +#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
  177 +
176 178 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
177 179 {
  180 + unsigned long async_stack, panic_stack;
178 181 struct _lowcore *lc;
179 182  
180 183 if (pcpu != &pcpu_devices[0]) {
181 184 pcpu->lowcore = (struct _lowcore *)
182 185 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
183   - pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
184   - pcpu->panic_stack = __get_free_page(GFP_KERNEL);
185   - if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
  186 + async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
  187 + panic_stack = __get_free_page(GFP_KERNEL);
  188 + if (!pcpu->lowcore || !panic_stack || !async_stack)
186 189 goto out;
  190 + } else {
  191 + async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
  192 + panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
187 193 }
188 194 lc = pcpu->lowcore;
189 195 memcpy(lc, &S390_lowcore, 512);
190 196 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
191   - lc->async_stack = pcpu->async_stack + ASYNC_SIZE
192   - - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
193   - lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
194   - - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
  197 + lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
  198 + lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
195 199 lc->cpu_nr = cpu;
196 200 lc->spinlock_lockval = arch_spin_lockval(cpu);
197 201 #ifndef CONFIG_64BIT
... ... @@ -212,8 +216,8 @@
212 216 return 0;
213 217 out:
214 218 if (pcpu != &pcpu_devices[0]) {
215   - free_page(pcpu->panic_stack);
216   - free_pages(pcpu->async_stack, ASYNC_ORDER);
  219 + free_page(panic_stack);
  220 + free_pages(async_stack, ASYNC_ORDER);
217 221 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
218 222 }
219 223 return -ENOMEM;
... ... @@ -235,11 +239,11 @@
235 239 #else
236 240 vdso_free_per_cpu(pcpu->lowcore);
237 241 #endif
238   - if (pcpu != &pcpu_devices[0]) {
239   - free_page(pcpu->panic_stack);
240   - free_pages(pcpu->async_stack, ASYNC_ORDER);
241   - free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
242   - }
  242 + if (pcpu == &pcpu_devices[0])
  243 + return;
  244 + free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
  245 + free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
  246 + free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
243 247 }
244 248  
245 249 #endif /* CONFIG_HOTPLUG_CPU */
... ... @@ -366,7 +370,8 @@
366 370 void smp_call_ipl_cpu(void (*func)(void *), void *data)
367 371 {
368 372 pcpu_delegate(&pcpu_devices[0], func, data,
369   - pcpu_devices->panic_stack + PAGE_SIZE);
  373 + pcpu_devices->lowcore->panic_stack -
  374 + PANIC_FRAME_OFFSET + PAGE_SIZE);
370 375 }
371 376  
372 377 int smp_find_processor_id(u16 address)
... ... @@ -935,10 +940,6 @@
935 940 pcpu->state = CPU_STATE_CONFIGURED;
936 941 pcpu->address = stap();
937 942 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
938   - pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
939   - + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
940   - pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE
941   - + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
942 943 S390_lowcore.percpu_offset = __per_cpu_offset[0];
943 944 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
944 945 set_cpu_present(0, true);
... ... @@ -1078,8 +1079,7 @@
1078 1079 void *hcpu)
1079 1080 {
1080 1081 unsigned int cpu = (unsigned int)(long)hcpu;
1081   - struct cpu *c = pcpu_devices[cpu].cpu;
1082   - struct device *s = &c->dev;
  1082 + struct device *s = &per_cpu(cpu_device, cpu)->dev;
1083 1083 int err = 0;
1084 1084  
1085 1085 switch (action & ~CPU_TASKS_FROZEN) {
... ... @@ -1102,7 +1102,7 @@
1102 1102 c = kzalloc(sizeof(*c), GFP_KERNEL);
1103 1103 if (!c)
1104 1104 return -ENOMEM;
1105   - pcpu_devices[cpu].cpu = c;
  1105 + per_cpu(cpu_device, cpu) = c;
1106 1106 s = &c->dev;
1107 1107 c->hotpluggable = 1;
1108 1108 rc = register_cpu(c, cpu);
arch/s390/kernel/topology.c
... ... @@ -7,14 +7,14 @@
7 7 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
8 8  
9 9 #include <linux/workqueue.h>
10   -#include <linux/bootmem.h>
11 10 #include <linux/cpuset.h>
12 11 #include <linux/device.h>
13 12 #include <linux/export.h>
14 13 #include <linux/kernel.h>
15 14 #include <linux/sched.h>
16   -#include <linux/init.h>
17 15 #include <linux/delay.h>
  16 +#include <linux/init.h>
  17 +#include <linux/slab.h>
18 18 #include <linux/cpu.h>
19 19 #include <linux/smp.h>
20 20 #include <linux/mm.h>
... ... @@ -42,8 +42,8 @@
42 42 static struct mask_info socket_info;
43 43 static struct mask_info book_info;
44 44  
45   -struct cpu_topology_s390 cpu_topology[NR_CPUS];
46   -EXPORT_SYMBOL_GPL(cpu_topology);
  45 +DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
  46 +EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
47 47  
48 48 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
49 49 {
50 50  
51 51  
... ... @@ -90,15 +90,15 @@
90 90 if (lcpu < 0)
91 91 continue;
92 92 for (i = 0; i <= smp_cpu_mtid; i++) {
93   - cpu_topology[lcpu + i].book_id = book->id;
94   - cpu_topology[lcpu + i].core_id = rcore;
95   - cpu_topology[lcpu + i].thread_id = lcpu + i;
  93 + per_cpu(cpu_topology, lcpu + i).book_id = book->id;
  94 + per_cpu(cpu_topology, lcpu + i).core_id = rcore;
  95 + per_cpu(cpu_topology, lcpu + i).thread_id = lcpu + i;
96 96 cpumask_set_cpu(lcpu + i, &book->mask);
97 97 cpumask_set_cpu(lcpu + i, &socket->mask);
98 98 if (one_socket_per_cpu)
99   - cpu_topology[lcpu + i].socket_id = rcore;
  99 + per_cpu(cpu_topology, lcpu + i).socket_id = rcore;
100 100 else
101   - cpu_topology[lcpu + i].socket_id = socket->id;
  101 + per_cpu(cpu_topology, lcpu + i).socket_id = socket->id;
102 102 smp_cpu_set_polarization(lcpu + i, tl_core->pp);
103 103 }
104 104 if (one_socket_per_cpu)
105 105  
... ... @@ -249,14 +249,14 @@
249 249  
250 250 spin_lock_irqsave(&topology_lock, flags);
251 251 for_each_possible_cpu(cpu) {
252   - cpu_topology[cpu].thread_mask = cpu_thread_map(cpu);
253   - cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
254   - cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
  252 + per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu);
  253 + per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu);
  254 + per_cpu(cpu_topology, cpu).book_mask = cpu_group_map(&book_info, cpu);
255 255 if (!MACHINE_HAS_TOPOLOGY) {
256   - cpu_topology[cpu].thread_id = cpu;
257   - cpu_topology[cpu].core_id = cpu;
258   - cpu_topology[cpu].socket_id = cpu;
259   - cpu_topology[cpu].book_id = cpu;
  256 + per_cpu(cpu_topology, cpu).thread_id = cpu;
  257 + per_cpu(cpu_topology, cpu).core_id = cpu;
  258 + per_cpu(cpu_topology, cpu).socket_id = cpu;
  259 + per_cpu(cpu_topology, cpu).book_id = cpu;
260 260 }
261 261 }
262 262 spin_unlock_irqrestore(&topology_lock, flags);
... ... @@ -334,50 +334,6 @@
334 334 set_topology_timer();
335 335 }
336 336  
337   -static int __init early_parse_topology(char *p)
338   -{
339   - if (strncmp(p, "off", 3))
340   - return 0;
341   - topology_enabled = 0;
342   - return 0;
343   -}
344   -early_param("topology", early_parse_topology);
345   -
346   -static void __init alloc_masks(struct sysinfo_15_1_x *info,
347   - struct mask_info *mask, int offset)
348   -{
349   - int i, nr_masks;
350   -
351   - nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
352   - for (i = 0; i < info->mnest - offset; i++)
353   - nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
354   - nr_masks = max(nr_masks, 1);
355   - for (i = 0; i < nr_masks; i++) {
356   - mask->next = alloc_bootmem_align(
357   - roundup_pow_of_two(sizeof(struct mask_info)),
358   - roundup_pow_of_two(sizeof(struct mask_info)));
359   - mask = mask->next;
360   - }
361   -}
362   -
363   -void __init s390_init_cpu_topology(void)
364   -{
365   - struct sysinfo_15_1_x *info;
366   - int i;
367   -
368   - if (!MACHINE_HAS_TOPOLOGY)
369   - return;
370   - tl_info = alloc_bootmem_pages(PAGE_SIZE);
371   - info = tl_info;
372   - store_topology(info);
373   - pr_info("The CPU configuration topology of the machine is:");
374   - for (i = 0; i < TOPOLOGY_NR_MAG; i++)
375   - printk(KERN_CONT " %d", info->mag[i]);
376   - printk(KERN_CONT " / %d\n", info->mnest);
377   - alloc_masks(info, &socket_info, 1);
378   - alloc_masks(info, &book_info, 2);
379   -}
380   -
381 337 static int cpu_management;
382 338  
383 339 static ssize_t dispatching_show(struct device *dev,
384 340  
385 341  
386 342  
... ... @@ -467,20 +423,29 @@
467 423  
468 424 const struct cpumask *cpu_thread_mask(int cpu)
469 425 {
470   - return &cpu_topology[cpu].thread_mask;
  426 + return &per_cpu(cpu_topology, cpu).thread_mask;
471 427 }
472 428  
473 429  
474 430 const struct cpumask *cpu_coregroup_mask(int cpu)
475 431 {
476   - return &cpu_topology[cpu].core_mask;
  432 + return &per_cpu(cpu_topology, cpu).core_mask;
477 433 }
478 434  
479 435 static const struct cpumask *cpu_book_mask(int cpu)
480 436 {
481   - return &cpu_topology[cpu].book_mask;
  437 + return &per_cpu(cpu_topology, cpu).book_mask;
482 438 }
483 439  
  440 +static int __init early_parse_topology(char *p)
  441 +{
  442 + if (strncmp(p, "off", 3))
  443 + return 0;
  444 + topology_enabled = 0;
  445 + return 0;
  446 +}
  447 +early_param("topology", early_parse_topology);
  448 +
484 449 static struct sched_domain_topology_level s390_topology[] = {
485 450 { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
486 451 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
... ... @@ -489,6 +454,42 @@
489 454 { NULL, },
490 455 };
491 456  
  457 +static void __init alloc_masks(struct sysinfo_15_1_x *info,
  458 + struct mask_info *mask, int offset)
  459 +{
  460 + int i, nr_masks;
  461 +
  462 + nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
  463 + for (i = 0; i < info->mnest - offset; i++)
  464 + nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
  465 + nr_masks = max(nr_masks, 1);
  466 + for (i = 0; i < nr_masks; i++) {
  467 + mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
  468 + mask = mask->next;
  469 + }
  470 +}
  471 +
  472 +static int __init s390_topology_init(void)
  473 +{
  474 + struct sysinfo_15_1_x *info;
  475 + int i;
  476 +
  477 + if (!MACHINE_HAS_TOPOLOGY)
  478 + return 0;
  479 + tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
  480 + info = tl_info;
  481 + store_topology(info);
  482 + pr_info("The CPU configuration topology of the machine is:");
  483 + for (i = 0; i < TOPOLOGY_NR_MAG; i++)
  484 + printk(KERN_CONT " %d", info->mag[i]);
  485 + printk(KERN_CONT " / %d\n", info->mnest);
  486 + alloc_masks(info, &socket_info, 1);
  487 + alloc_masks(info, &book_info, 2);
  488 + set_sched_topology(s390_topology);
  489 + return 0;
  490 +}
  491 +early_initcall(s390_topology_init);
  492 +
492 493 static int __init topology_init(void)
493 494 {
494 495 if (MACHINE_HAS_TOPOLOGY)
... ... @@ -498,11 +499,4 @@
498 499 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
499 500 }
500 501 device_initcall(topology_init);
501   -
502   -static int __init early_topology_init(void)
503   -{
504   - set_sched_topology(s390_topology);
505   - return 0;
506   -}
507   -early_initcall(early_topology_init);
arch/s390/kernel/vdso64/clock_gettime.S
... ... @@ -25,10 +25,8 @@
25 25 je 4f
26 26 cghi %r2,__CLOCK_REALTIME
27 27 je 5f
28   - cghi %r2,__CLOCK_THREAD_CPUTIME_ID
  28 + cghi %r2,-3 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
29 29 je 9f
30   - cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
31   - je 9f
32 30 cghi %r2,__CLOCK_MONOTONIC_COARSE
33 31 je 3f
34 32 cghi %r2,__CLOCK_MONOTONIC
... ... @@ -106,7 +104,7 @@
106 104 aghi %r15,16
107 105 br %r14
108 106  
109   - /* CLOCK_THREAD_CPUTIME_ID for this thread */
  107 + /* CPUCLOCK_VIRT for this thread */
110 108 9: icm %r0,15,__VDSO_ECTG_OK(%r5)
111 109 jz 12f
112 110 ear %r2,%a4
... ... @@ -183,7 +183,10 @@
183 183 {
184 184 unsigned long base;
185 185  
186   - base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT);
  186 + base = STACK_TOP / 3 * 2;
  187 + if (!is_32bit_task())
  188 + /* Align to 4GB */
  189 + base &= ~((1UL << 32) - 1);
187 190 return base + mmap_rnd();
188 191 }
189 192