Commit d038a63ace6cf2ce3aeafa741b73d542ffb65163
Exists in
ti-lsk-linux-4.1.y
and in
10 other branches
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs deadlock fix from Chris Mason: "This has a fix for a long standing deadlock that we've been trying to nail down for a while. It ended up being a bad interaction with the fair reader/writer locks and the order btrfs reacquires locks in the btree" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: btrfs: fix lockups from btrfs_clear_path_blocking
Showing 3 changed files Side-by-side Diff
fs/btrfs/ctree.c
... | ... | @@ -80,13 +80,6 @@ |
80 | 80 | { |
81 | 81 | int i; |
82 | 82 | |
83 | -#ifdef CONFIG_DEBUG_LOCK_ALLOC | |
84 | - /* lockdep really cares that we take all of these spinlocks | |
85 | - * in the right order. If any of the locks in the path are not | |
86 | - * currently blocking, it is going to complain. So, make really | |
87 | - * really sure by forcing the path to blocking before we clear | |
88 | - * the path blocking. | |
89 | - */ | |
90 | 83 | if (held) { |
91 | 84 | btrfs_set_lock_blocking_rw(held, held_rw); |
92 | 85 | if (held_rw == BTRFS_WRITE_LOCK) |
... | ... | @@ -95,7 +88,6 @@ |
95 | 88 | held_rw = BTRFS_READ_LOCK_BLOCKING; |
96 | 89 | } |
97 | 90 | btrfs_set_path_blocking(p); |
98 | -#endif | |
99 | 91 | |
100 | 92 | for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { |
101 | 93 | if (p->nodes[i] && p->locks[i]) { |
102 | 94 | |
... | ... | @@ -107,10 +99,8 @@ |
107 | 99 | } |
108 | 100 | } |
109 | 101 | |
110 | -#ifdef CONFIG_DEBUG_LOCK_ALLOC | |
111 | 102 | if (held) |
112 | 103 | btrfs_clear_lock_blocking_rw(held, held_rw); |
113 | -#endif | |
114 | 104 | } |
115 | 105 | |
116 | 106 | /* this also releases the path */ |
... | ... | @@ -2893,7 +2883,7 @@ |
2893 | 2883 | } |
2894 | 2884 | p->locks[level] = BTRFS_WRITE_LOCK; |
2895 | 2885 | } else { |
2896 | - err = btrfs_try_tree_read_lock(b); | |
2886 | + err = btrfs_tree_read_lock_atomic(b); | |
2897 | 2887 | if (!err) { |
2898 | 2888 | btrfs_set_path_blocking(p); |
2899 | 2889 | btrfs_tree_read_lock(b); |
... | ... | @@ -3025,7 +3015,7 @@ |
3025 | 3015 | } |
3026 | 3016 | |
3027 | 3017 | level = btrfs_header_level(b); |
3028 | - err = btrfs_try_tree_read_lock(b); | |
3018 | + err = btrfs_tree_read_lock_atomic(b); | |
3029 | 3019 | if (!err) { |
3030 | 3020 | btrfs_set_path_blocking(p); |
3031 | 3021 | btrfs_tree_read_lock(b); |
fs/btrfs/locking.c
... | ... | @@ -128,9 +128,29 @@ |
128 | 128 | } |
129 | 129 | |
130 | 130 | /* |
131 | + * take a spinning read lock. | |
131 | 132 | * returns 1 if we get the read lock and 0 if we don't |
132 | 133 | * this won't wait for blocking writers |
133 | 134 | */ |
135 | +int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) | |
136 | +{ | |
137 | + if (atomic_read(&eb->blocking_writers)) | |
138 | + return 0; | |
139 | + | |
140 | + read_lock(&eb->lock); | |
141 | + if (atomic_read(&eb->blocking_writers)) { | |
142 | + read_unlock(&eb->lock); | |
143 | + return 0; | |
144 | + } | |
145 | + atomic_inc(&eb->read_locks); | |
146 | + atomic_inc(&eb->spinning_readers); | |
147 | + return 1; | |
148 | +} | |
149 | + | |
150 | +/* | |
151 | + * returns 1 if we get the read lock and 0 if we don't | |
152 | + * this won't wait for blocking writers | |
153 | + */ | |
134 | 154 | int btrfs_try_tree_read_lock(struct extent_buffer *eb) |
135 | 155 | { |
136 | 156 | if (atomic_read(&eb->blocking_writers)) |
... | ... | @@ -158,9 +178,7 @@ |
158 | 178 | atomic_read(&eb->blocking_readers)) |
159 | 179 | return 0; |
160 | 180 | |
161 | - if (!write_trylock(&eb->lock)) | |
162 | - return 0; | |
163 | - | |
181 | + write_lock(&eb->lock); | |
164 | 182 | if (atomic_read(&eb->blocking_writers) || |
165 | 183 | atomic_read(&eb->blocking_readers)) { |
166 | 184 | write_unlock(&eb->lock); |
fs/btrfs/locking.h
... | ... | @@ -35,6 +35,8 @@ |
35 | 35 | void btrfs_assert_tree_locked(struct extent_buffer *eb); |
36 | 36 | int btrfs_try_tree_read_lock(struct extent_buffer *eb); |
37 | 37 | int btrfs_try_tree_write_lock(struct extent_buffer *eb); |
38 | +int btrfs_tree_read_lock_atomic(struct extent_buffer *eb); | |
39 | + | |
38 | 40 | |
39 | 41 | static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) |
40 | 42 | { |