Commit 9f96cb1e8bca179a92afa40dfc3c49990f1cfc71
Committed by
Linus Torvalds
1 parent
8792f961ba
Exists in
master
and in
4 other branches
robust futex thread exit race
Calling handle_futex_death in exit_robust_list for the different robust mutexes of a thread basically frees the mutex. Another thread might grab the lock immediately which updates the next pointer of the mutex. fetch_robust_entry over the next pointer might therefore branch into the robust mutex list of a different thread. This can cause two problems: 1) some mutexes held by the dead thread are not getting freed and 2) some mutexs held by a different thread are freed. The next point need to be read before calling handle_futex_death. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Acked-by: Ingo Molnar <mingo@elte.hu> Acked-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 2 changed files with 34 additions and 20 deletions Side-by-side Diff
kernel/futex.c
... | ... | @@ -1943,9 +1943,10 @@ |
1943 | 1943 | void exit_robust_list(struct task_struct *curr) |
1944 | 1944 | { |
1945 | 1945 | struct robust_list_head __user *head = curr->robust_list; |
1946 | - struct robust_list __user *entry, *pending; | |
1947 | - unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; | |
1946 | + struct robust_list __user *entry, *next_entry, *pending; | |
1947 | + unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; | |
1948 | 1948 | unsigned long futex_offset; |
1949 | + int rc; | |
1949 | 1950 | |
1950 | 1951 | /* |
1951 | 1952 | * Fetch the list head (which was registered earlier, via |
1952 | 1953 | |
... | ... | @@ -1965,12 +1966,14 @@ |
1965 | 1966 | if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) |
1966 | 1967 | return; |
1967 | 1968 | |
1968 | - if (pending) | |
1969 | - handle_futex_death((void __user *)pending + futex_offset, | |
1970 | - curr, pip); | |
1971 | - | |
1969 | + next_entry = NULL; /* avoid warning with gcc */ | |
1972 | 1970 | while (entry != &head->list) { |
1973 | 1971 | /* |
1972 | + * Fetch the next entry in the list before calling | |
1973 | + * handle_futex_death: | |
1974 | + */ | |
1975 | + rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); | |
1976 | + /* | |
1974 | 1977 | * A pending lock might already be on the list, so |
1975 | 1978 | * don't process it twice: |
1976 | 1979 | */ |
1977 | 1980 | |
... | ... | @@ -1978,11 +1981,10 @@ |
1978 | 1981 | if (handle_futex_death((void __user *)entry + futex_offset, |
1979 | 1982 | curr, pi)) |
1980 | 1983 | return; |
1981 | - /* | |
1982 | - * Fetch the next entry in the list: | |
1983 | - */ | |
1984 | - if (fetch_robust_entry(&entry, &entry->next, &pi)) | |
1984 | + if (rc) | |
1985 | 1985 | return; |
1986 | + entry = next_entry; | |
1987 | + pi = next_pi; | |
1986 | 1988 | /* |
1987 | 1989 | * Avoid excessively long or circular lists: |
1988 | 1990 | */ |
... | ... | @@ -1991,6 +1993,10 @@ |
1991 | 1993 | |
1992 | 1994 | cond_resched(); |
1993 | 1995 | } |
1996 | + | |
1997 | + if (pending) | |
1998 | + handle_futex_death((void __user *)pending + futex_offset, | |
1999 | + curr, pip); | |
1994 | 2000 | } |
1995 | 2001 | |
1996 | 2002 | long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, |
kernel/futex_compat.c
... | ... | @@ -38,10 +38,11 @@ |
38 | 38 | void compat_exit_robust_list(struct task_struct *curr) |
39 | 39 | { |
40 | 40 | struct compat_robust_list_head __user *head = curr->compat_robust_list; |
41 | - struct robust_list __user *entry, *pending; | |
42 | - unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; | |
43 | - compat_uptr_t uentry, upending; | |
41 | + struct robust_list __user *entry, *next_entry, *pending; | |
42 | + unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; | |
43 | + compat_uptr_t uentry, next_uentry, upending; | |
44 | 44 | compat_long_t futex_offset; |
45 | + int rc; | |
45 | 46 | |
46 | 47 | /* |
47 | 48 | * Fetch the list head (which was registered earlier, via |
48 | 49 | |
49 | 50 | |
... | ... | @@ -61,11 +62,16 @@ |
61 | 62 | if (fetch_robust_entry(&upending, &pending, |
62 | 63 | &head->list_op_pending, &pip)) |
63 | 64 | return; |
64 | - if (pending) | |
65 | - handle_futex_death((void __user *)pending + futex_offset, curr, pip); | |
66 | 65 | |
66 | + next_entry = NULL; /* avoid warning with gcc */ | |
67 | 67 | while (entry != (struct robust_list __user *) &head->list) { |
68 | 68 | /* |
69 | + * Fetch the next entry in the list before calling | |
70 | + * handle_futex_death: | |
71 | + */ | |
72 | + rc = fetch_robust_entry(&next_uentry, &next_entry, | |
73 | + (compat_uptr_t __user *)&entry->next, &next_pi); | |
74 | + /* | |
69 | 75 | * A pending lock might already be on the list, so |
70 | 76 | * dont process it twice: |
71 | 77 | */ |
72 | 78 | |
... | ... | @@ -74,12 +80,11 @@ |
74 | 80 | curr, pi)) |
75 | 81 | return; |
76 | 82 | |
77 | - /* | |
78 | - * Fetch the next entry in the list: | |
79 | - */ | |
80 | - if (fetch_robust_entry(&uentry, &entry, | |
81 | - (compat_uptr_t __user *)&entry->next, &pi)) | |
83 | + if (rc) | |
82 | 84 | return; |
85 | + uentry = next_uentry; | |
86 | + entry = next_entry; | |
87 | + pi = next_pi; | |
83 | 88 | /* |
84 | 89 | * Avoid excessively long or circular lists: |
85 | 90 | */ |
... | ... | @@ -88,6 +93,9 @@ |
88 | 93 | |
89 | 94 | cond_resched(); |
90 | 95 | } |
96 | + if (pending) | |
97 | + handle_futex_death((void __user *)pending + futex_offset, | |
98 | + curr, pip); | |
91 | 99 | } |
92 | 100 | |
93 | 101 | asmlinkage long |