Commit 4307d1e5ada595c87f9a4d16db16ba5edb70dcb1
Committed by
Thomas Gleixner
1 parent
434b3d3209
Exists in
master
and in
4 other branches
x86: ignore the sys_getcpu() tcache parameter
dont use the vgetcpu tcache - it's causing problems with tasks migrating, they'll see the old cache up to a jiffy after the migration, further increasing the costs of the migration. In the worst case they see a complete bogus information from the tcache, when a sys_getcpu() call "invalidated" the cache info by incrementing the jiffies _and_ the cpuid info in the cache and the following vdso_getcpu() call happens after vdso_jiffies have been incremented. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Ulrich Drepper <drepper@redhat.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Showing 2 changed files with 3 additions and 36 deletions Side-by-side Diff
arch/x86/vdso/vgetcpu.c
... | ... | @@ -13,31 +13,16 @@ |
13 | 13 | #include <asm/vgtod.h> |
14 | 14 | #include "vextern.h" |
15 | 15 | |
16 | -long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache) | |
16 | +long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused) | |
17 | 17 | { |
18 | 18 | unsigned int dummy, p; |
19 | - unsigned long j = 0; | |
20 | 19 | |
21 | - /* Fast cache - only recompute value once per jiffies and avoid | |
22 | - relatively costly rdtscp/cpuid otherwise. | |
23 | - This works because the scheduler usually keeps the process | |
24 | - on the same CPU and this syscall doesn't guarantee its | |
25 | - results anyways. | |
26 | - We do this here because otherwise user space would do it on | |
27 | - its own in a likely inferior way (no access to jiffies). | |
28 | - If you don't like it pass NULL. */ | |
29 | - if (tcache && tcache->blob[0] == (j = *vdso_jiffies)) { | |
30 | - p = tcache->blob[1]; | |
31 | - } else if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) { | |
20 | + if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) { | |
32 | 21 | /* Load per CPU data from RDTSCP */ |
33 | 22 | rdtscp(dummy, dummy, p); |
34 | 23 | } else { |
35 | 24 | /* Load per CPU data from GDT */ |
36 | 25 | asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); |
37 | - } | |
38 | - if (tcache) { | |
39 | - tcache->blob[0] = j; | |
40 | - tcache->blob[1] = p; | |
41 | 26 | } |
42 | 27 | if (cpu) |
43 | 28 | *cpu = p & 0xfff; |
kernel/sys.c
... | ... | @@ -1750,7 +1750,7 @@ |
1750 | 1750 | } |
1751 | 1751 | |
1752 | 1752 | asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, |
1753 | - struct getcpu_cache __user *cache) | |
1753 | + struct getcpu_cache __user *unused) | |
1754 | 1754 | { |
1755 | 1755 | int err = 0; |
1756 | 1756 | int cpu = raw_smp_processor_id(); |
... | ... | @@ -1758,24 +1758,6 @@ |
1758 | 1758 | err |= put_user(cpu, cpup); |
1759 | 1759 | if (nodep) |
1760 | 1760 | err |= put_user(cpu_to_node(cpu), nodep); |
1761 | - if (cache) { | |
1762 | - /* | |
1763 | - * The cache is not needed for this implementation, | |
1764 | - * but make sure user programs pass something | |
1765 | - * valid. vsyscall implementations can instead make | |
1766 | - * good use of the cache. Only use t0 and t1 because | |
1767 | - * these are available in both 32bit and 64bit ABI (no | |
1768 | - * need for a compat_getcpu). 32bit has enough | |
1769 | - * padding | |
1770 | - */ | |
1771 | - unsigned long t0, t1; | |
1772 | - get_user(t0, &cache->blob[0]); | |
1773 | - get_user(t1, &cache->blob[1]); | |
1774 | - t0++; | |
1775 | - t1++; | |
1776 | - put_user(t0, &cache->blob[0]); | |
1777 | - put_user(t1, &cache->blob[1]); | |
1778 | - } | |
1779 | 1761 | return err ? -EFAULT : 0; |
1780 | 1762 | } |
1781 | 1763 |